4 * IO engine using the new native Linux aio io_uring interface. See:
6 * http://git.kernel.dk/cgit/linux-block/log/?h=io_uring
13 #include <sys/resource.h>
16 #include "../lib/pow2.h"
17 #include "../optgroup.h"
18 #include "../lib/memalign.h"
19 #include "../lib/fls.h"
20 #include "../lib/roundup.h"
21 #include "../verify.h"
23 #ifdef ARCH_HAVE_IOURING
25 #include "../lib/types.h"
26 #include "../os/linux/io_uring.h"
34 FIO_URING_CMD_NVME = 1,
41 unsigned *ring_entries;
50 unsigned *ring_entries;
51 struct io_uring_cqe *cqes;
62 struct io_u **io_u_index;
67 struct io_sq_ring sq_ring;
68 struct io_uring_sqe *sqes;
70 unsigned sq_ring_mask;
72 struct io_cq_ring cq_ring;
73 unsigned cq_ring_mask;
80 struct ioring_mmap mmap[3];
82 struct cmdprio cmdprio;
85 uint32_t cdw12_flags[DDIR_RWDIR_CNT];
88 struct ioring_options {
89 struct thread_data *td;
92 unsigned int writefua;
93 struct cmdprio_options cmdprio_options;
94 unsigned int fixedbufs;
95 unsigned int registerfiles;
96 unsigned int sqpoll_thread;
97 unsigned int sqpoll_set;
98 unsigned int sqpoll_cpu;
99 unsigned int nonvectored;
100 unsigned int uncached;
102 unsigned int force_async;
103 unsigned int md_per_io_size;
106 unsigned int apptag_mask;
109 enum uring_cmd_type cmd_type;
112 static const int ddir_to_op[2][2] = {
113 { IORING_OP_READV, IORING_OP_READ },
114 { IORING_OP_WRITEV, IORING_OP_WRITE }
117 static const int fixed_ddir_to_op[2] = {
118 IORING_OP_READ_FIXED,
119 IORING_OP_WRITE_FIXED
122 static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
124 struct ioring_options *o = data;
126 o->sqpoll_cpu = *val;
131 static struct fio_option options[] = {
134 .lname = "High Priority",
135 .type = FIO_OPT_STR_SET,
136 .off1 = offsetof(struct ioring_options, hipri),
137 .help = "Use polled IO completions",
138 .category = FIO_OPT_C_ENGINE,
139 .group = FIO_OPT_G_IOURING,
143 .lname = "Read fua flag support",
144 .type = FIO_OPT_BOOL,
145 .off1 = offsetof(struct ioring_options, readfua),
146 .help = "Set FUA flag (force unit access) for all Read operations",
148 .category = FIO_OPT_C_ENGINE,
149 .group = FIO_OPT_G_IOURING,
153 .lname = "Write fua flag support",
154 .type = FIO_OPT_BOOL,
155 .off1 = offsetof(struct ioring_options, writefua),
156 .help = "Set FUA flag (force unit access) for all Write operations",
158 .category = FIO_OPT_C_ENGINE,
159 .group = FIO_OPT_G_IOURING,
163 .lname = "Fixed (pre-mapped) IO buffers",
164 .type = FIO_OPT_STR_SET,
165 .off1 = offsetof(struct ioring_options, fixedbufs),
166 .help = "Pre map IO buffers",
167 .category = FIO_OPT_C_ENGINE,
168 .group = FIO_OPT_G_IOURING,
171 .name = "registerfiles",
172 .lname = "Register file set",
173 .type = FIO_OPT_STR_SET,
174 .off1 = offsetof(struct ioring_options, registerfiles),
175 .help = "Pre-open/register files",
176 .category = FIO_OPT_C_ENGINE,
177 .group = FIO_OPT_G_IOURING,
180 .name = "sqthread_poll",
181 .lname = "Kernel SQ thread polling",
182 .type = FIO_OPT_STR_SET,
183 .off1 = offsetof(struct ioring_options, sqpoll_thread),
184 .help = "Offload submission/completion to kernel thread",
185 .category = FIO_OPT_C_ENGINE,
186 .group = FIO_OPT_G_IOURING,
189 .name = "sqthread_poll_cpu",
190 .lname = "SQ Thread Poll CPU",
192 .cb = fio_ioring_sqpoll_cb,
193 .help = "What CPU to run SQ thread polling on",
194 .category = FIO_OPT_C_ENGINE,
195 .group = FIO_OPT_G_IOURING,
198 .name = "nonvectored",
199 .lname = "Non-vectored",
201 .off1 = offsetof(struct ioring_options, nonvectored),
203 .help = "Use non-vectored read/write commands",
204 .category = FIO_OPT_C_ENGINE,
205 .group = FIO_OPT_G_IOURING,
211 .off1 = offsetof(struct ioring_options, uncached),
212 .help = "Use RWF_UNCACHED for buffered read/writes",
213 .category = FIO_OPT_C_ENGINE,
214 .group = FIO_OPT_G_IOURING,
218 .lname = "RWF_NOWAIT",
219 .type = FIO_OPT_BOOL,
220 .off1 = offsetof(struct ioring_options, nowait),
221 .help = "Use RWF_NOWAIT for reads/writes",
222 .category = FIO_OPT_C_ENGINE,
223 .group = FIO_OPT_G_IOURING,
226 .name = "force_async",
227 .lname = "Force async",
229 .off1 = offsetof(struct ioring_options, force_async),
230 .help = "Set IOSQE_ASYNC every N requests",
231 .category = FIO_OPT_C_ENGINE,
232 .group = FIO_OPT_G_IOURING,
236 .lname = "Uring cmd type",
238 .off1 = offsetof(struct ioring_options, cmd_type),
239 .help = "Specify uring-cmd type",
243 .oval = FIO_URING_CMD_NVME,
244 .help = "Issue nvme-uring-cmd",
247 .category = FIO_OPT_C_ENGINE,
248 .group = FIO_OPT_G_IOURING,
250 CMDPRIO_OPTIONS(struct ioring_options, FIO_OPT_G_IOURING),
252 .name = "md_per_io_size",
253 .lname = "Separate Metadata Buffer Size per I/O",
255 .off1 = offsetof(struct ioring_options, md_per_io_size),
257 .help = "Size of separate metadata buffer per I/O (Default: 0)",
258 .category = FIO_OPT_C_ENGINE,
259 .group = FIO_OPT_G_IOURING,
263 .lname = "Protection Information Action",
264 .type = FIO_OPT_BOOL,
265 .off1 = offsetof(struct ioring_options, pi_act),
267 .help = "Protection Information Action bit (pi_act=1 or pi_act=0)",
268 .category = FIO_OPT_C_ENGINE,
269 .group = FIO_OPT_G_IOURING,
273 .lname = "Protection Information Check",
274 .type = FIO_OPT_STR_STORE,
275 .off1 = offsetof(struct ioring_options, pi_chk),
277 .help = "Control of Protection Information Checking (pi_chk=GUARD,REFTAG,APPTAG)",
278 .category = FIO_OPT_C_ENGINE,
279 .group = FIO_OPT_G_IOURING,
283 .lname = "Application Tag used in Protection Information",
285 .off1 = offsetof(struct ioring_options, apptag),
287 .help = "Application Tag used in Protection Information field (Default: 0x1234)",
288 .category = FIO_OPT_C_ENGINE,
289 .group = FIO_OPT_G_IOURING,
292 .name = "apptag_mask",
293 .lname = "Application Tag Mask",
295 .off1 = offsetof(struct ioring_options, apptag_mask),
297 .help = "Application Tag Mask used with Application Tag (Default: 0xffff)",
298 .category = FIO_OPT_C_ENGINE,
299 .group = FIO_OPT_G_IOURING,
306 static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit,
307 unsigned int min_complete, unsigned int flags)
309 #ifdef FIO_ARCH_HAS_SYSCALL
310 return __do_syscall6(__NR_io_uring_enter, ld->ring_fd, to_submit,
311 min_complete, flags, NULL, 0);
313 return syscall(__NR_io_uring_enter, ld->ring_fd, to_submit,
314 min_complete, flags, NULL, 0);
318 static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
320 struct ioring_data *ld = td->io_ops_data;
321 struct ioring_options *o = td->eo;
322 struct fio_file *f = io_u->file;
323 struct io_uring_sqe *sqe;
325 sqe = &ld->sqes[io_u->index];
327 if (o->registerfiles) {
328 sqe->fd = f->engine_pos;
329 sqe->flags = IOSQE_FIXED_FILE;
335 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
337 sqe->opcode = fixed_ddir_to_op[io_u->ddir];
338 sqe->addr = (unsigned long) io_u->xfer_buf;
339 sqe->len = io_u->xfer_buflen;
340 sqe->buf_index = io_u->index;
342 struct iovec *iov = &ld->iovecs[io_u->index];
345 * Update based on actual io_u, requeue could have
348 iov->iov_base = io_u->xfer_buf;
349 iov->iov_len = io_u->xfer_buflen;
351 sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
352 if (o->nonvectored) {
353 sqe->addr = (unsigned long) iov->iov_base;
354 sqe->len = iov->iov_len;
356 sqe->addr = (unsigned long) iov;
361 if (!td->o.odirect && o->uncached)
362 sqe->rw_flags |= RWF_UNCACHED;
364 sqe->rw_flags |= RWF_NOWAIT;
367 * Since io_uring can have a submission context (sqthread_poll)
368 * that is different from the process context, we cannot rely on
369 * the IO priority set by ioprio_set() (options prio, prioclass,
370 * and priohint) to be inherited.
371 * td->ioprio will have the value of the "default prio", so set
372 * this unconditionally. This value might get overridden by
373 * fio_ioring_cmdprio_prep() if the option cmdprio_percentage or
374 * cmdprio_bssplit is used.
376 sqe->ioprio = td->ioprio;
377 sqe->off = io_u->offset;
378 } else if (ddir_sync(io_u->ddir)) {
380 if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
381 sqe->off = f->first_write;
382 sqe->len = f->last_write - f->first_write;
383 sqe->sync_range_flags = td->o.sync_file_range;
384 sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
389 if (io_u->ddir == DDIR_DATASYNC)
390 sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
391 sqe->opcode = IORING_OP_FSYNC;
395 if (o->force_async && ++ld->prepped == o->force_async) {
397 sqe->flags |= IOSQE_ASYNC;
400 sqe->user_data = (unsigned long) io_u;
404 static int fio_ioring_cmd_prep(struct thread_data *td, struct io_u *io_u)
406 struct ioring_data *ld = td->io_ops_data;
407 struct ioring_options *o = td->eo;
408 struct fio_file *f = io_u->file;
409 struct nvme_uring_cmd *cmd;
410 struct io_uring_sqe *sqe;
411 struct nvme_dsm *dsm;
413 unsigned int dsm_size;
415 /* only supports nvme_uring_cmd */
416 if (o->cmd_type != FIO_URING_CMD_NVME)
419 if (io_u->ddir == DDIR_TRIM && td->io_ops->flags & FIO_ASYNCIO_SYNC_TRIM)
422 sqe = &ld->sqes[(io_u->index) << 1];
424 if (o->registerfiles) {
425 sqe->fd = f->engine_pos;
426 sqe->flags = IOSQE_FIXED_FILE;
431 if (!td->o.odirect && o->uncached)
432 sqe->rw_flags |= RWF_UNCACHED;
434 sqe->rw_flags |= RWF_NOWAIT;
436 sqe->opcode = IORING_OP_URING_CMD;
437 sqe->user_data = (unsigned long) io_u;
439 sqe->cmd_op = NVME_URING_CMD_IO;
441 sqe->cmd_op = NVME_URING_CMD_IO_VEC;
442 if (o->force_async && ++ld->prepped == o->force_async) {
444 sqe->flags |= IOSQE_ASYNC;
447 sqe->uring_cmd_flags = IORING_URING_CMD_FIXED;
448 sqe->buf_index = io_u->index;
451 cmd = (struct nvme_uring_cmd *)sqe->cmd;
452 dsm_size = sizeof(*ld->dsm) + td->o.num_range * sizeof(struct nvme_dsm_range);
453 ptr += io_u->index * dsm_size;
454 dsm = (struct nvme_dsm *)ptr;
456 return fio_nvme_uring_cmd_prep(cmd, io_u,
457 o->nonvectored ? NULL : &ld->iovecs[io_u->index],
458 dsm, ld->cdw12_flags[io_u->ddir]);
461 static struct io_u *fio_ioring_event(struct thread_data *td, int event)
463 struct ioring_data *ld = td->io_ops_data;
464 struct io_uring_cqe *cqe;
468 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
470 cqe = &ld->cq_ring.cqes[index];
471 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
473 if (cqe->res != io_u->xfer_buflen) {
474 if (cqe->res > io_u->xfer_buflen)
475 io_u->error = -cqe->res;
477 io_u->resid = io_u->xfer_buflen - cqe->res;
484 static struct io_u *fio_ioring_cmd_event(struct thread_data *td, int event)
486 struct ioring_data *ld = td->io_ops_data;
487 struct ioring_options *o = td->eo;
488 struct io_uring_cqe *cqe;
490 struct nvme_data *data;
494 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
495 if (o->cmd_type == FIO_URING_CMD_NVME)
498 cqe = &ld->cq_ring.cqes[index];
499 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
502 io_u->error = -cqe->res;
508 if (o->cmd_type == FIO_URING_CMD_NVME) {
509 data = FILE_ENG_DATA(io_u->file);
510 if (data->pi_type && (io_u->ddir == DDIR_READ) && !o->pi_act) {
511 ret = fio_nvme_pi_verify(data, io_u);
520 static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events,
523 struct ioring_data *ld = td->io_ops_data;
524 struct io_cq_ring *ring = &ld->cq_ring;
525 unsigned head, reaped = 0;
529 if (head == atomic_load_acquire(ring->tail))
533 } while (reaped + events < max);
536 atomic_store_release(ring->head, head);
541 static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
542 unsigned int max, const struct timespec *t)
544 struct ioring_data *ld = td->io_ops_data;
545 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
546 struct ioring_options *o = td->eo;
547 struct io_cq_ring *ring = &ld->cq_ring;
551 ld->cq_ring_off = *ring->head;
553 r = fio_ioring_cqring_reap(td, events, max);
562 if (!o->sqpoll_thread) {
563 r = io_uring_enter(ld, 0, actual_min,
564 IORING_ENTER_GETEVENTS);
566 if (errno == EAGAIN || errno == EINTR)
569 td_verror(td, errno, "io_uring_enter");
573 } while (events < min);
575 return r < 0 ? r : events;
578 static inline void fio_ioring_cmd_nvme_pi(struct thread_data *td,
581 struct ioring_data *ld = td->io_ops_data;
582 struct ioring_options *o = td->eo;
583 struct nvme_uring_cmd *cmd;
584 struct io_uring_sqe *sqe;
585 struct nvme_cmd_ext_io_opts ext_opts = {0};
586 struct nvme_data *data = FILE_ENG_DATA(io_u->file);
588 if (io_u->ddir == DDIR_TRIM)
591 sqe = &ld->sqes[(io_u->index) << 1];
592 cmd = (struct nvme_uring_cmd *)sqe->cmd;
596 ext_opts.io_flags |= NVME_IO_PRINFO_PRACT;
597 ext_opts.io_flags |= o->prchk;
598 ext_opts.apptag = o->apptag;
599 ext_opts.apptag_mask = o->apptag_mask;
602 fio_nvme_pi_fill(cmd, io_u, &ext_opts);
605 static inline void fio_ioring_cmdprio_prep(struct thread_data *td,
608 struct ioring_data *ld = td->io_ops_data;
609 struct cmdprio *cmdprio = &ld->cmdprio;
611 if (fio_cmdprio_set_ioprio(td, cmdprio, io_u))
612 ld->sqes[io_u->index].ioprio = io_u->ioprio;
615 static enum fio_q_status fio_ioring_queue(struct thread_data *td,
618 struct ioring_data *ld = td->io_ops_data;
619 struct ioring_options *o = td->eo;
620 struct io_sq_ring *ring = &ld->sq_ring;
621 unsigned tail, next_tail;
623 fio_ro_check(td, io_u);
625 if (ld->queued == ld->iodepth)
628 if (io_u->ddir == DDIR_TRIM && td->io_ops->flags & FIO_ASYNCIO_SYNC_TRIM) {
632 do_io_u_trim(td, io_u);
634 io_u_mark_submit(td, 1);
635 io_u_mark_complete(td, 1);
636 return FIO_Q_COMPLETED;
640 next_tail = tail + 1;
641 if (next_tail == atomic_load_relaxed(ring->head))
644 if (ld->cmdprio.mode != CMDPRIO_MODE_NONE)
645 fio_ioring_cmdprio_prep(td, io_u);
647 if (!strcmp(td->io_ops->name, "io_uring_cmd") &&
648 o->cmd_type == FIO_URING_CMD_NVME)
649 fio_ioring_cmd_nvme_pi(td, io_u);
651 ring->array[tail & ld->sq_ring_mask] = io_u->index;
652 atomic_store_release(ring->tail, next_tail);
658 static void fio_ioring_queued(struct thread_data *td, int start, int nr)
660 struct ioring_data *ld = td->io_ops_data;
663 if (!fio_fill_issue_time(td))
666 fio_gettime(&now, NULL);
669 struct io_sq_ring *ring = &ld->sq_ring;
670 int index = ring->array[start & ld->sq_ring_mask];
671 struct io_u *io_u = ld->io_u_index[index];
673 memcpy(&io_u->issue_time, &now, sizeof(now));
674 io_u_queued(td, io_u);
680 * only used for iolog
682 if (td->o.read_iolog_file)
683 memcpy(&td->last_issue, &now, sizeof(now));
686 static int fio_ioring_commit(struct thread_data *td)
688 struct ioring_data *ld = td->io_ops_data;
689 struct ioring_options *o = td->eo;
696 * Kernel side does submission. just need to check if the ring is
697 * flagged as needing a kick, if so, call io_uring_enter(). This
698 * only happens if we've been idle too long.
700 if (o->sqpoll_thread) {
701 struct io_sq_ring *ring = &ld->sq_ring;
702 unsigned start = *ld->sq_ring.tail - ld->queued;
705 flags = atomic_load_relaxed(ring->flags);
706 if (flags & IORING_SQ_NEED_WAKEUP)
707 io_uring_enter(ld, ld->queued, 0,
708 IORING_ENTER_SQ_WAKEUP);
709 fio_ioring_queued(td, start, ld->queued);
710 io_u_mark_submit(td, ld->queued);
717 unsigned start = *ld->sq_ring.head;
718 long nr = ld->queued;
720 ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS);
722 fio_ioring_queued(td, start, ret);
723 io_u_mark_submit(td, ret);
728 io_u_mark_submit(td, ret);
731 if (errno == EAGAIN || errno == EINTR) {
732 ret = fio_ioring_cqring_reap(td, 0, ld->queued);
735 /* Shouldn't happen */
740 td_verror(td, errno, "io_uring_enter submit");
743 } while (ld->queued);
748 static void fio_ioring_unmap(struct ioring_data *ld)
752 for (i = 0; i < FIO_ARRAY_SIZE(ld->mmap); i++)
753 munmap(ld->mmap[i].ptr, ld->mmap[i].len);
757 static void fio_ioring_cleanup(struct thread_data *td)
759 struct ioring_data *ld = td->io_ops_data;
762 if (!(td->flags & TD_F_CHILD))
763 fio_ioring_unmap(ld);
765 fio_cmdprio_cleanup(&ld->cmdprio);
766 free(ld->io_u_index);
775 static int fio_ioring_mmap(struct ioring_data *ld, struct io_uring_params *p)
777 struct io_sq_ring *sring = &ld->sq_ring;
778 struct io_cq_ring *cring = &ld->cq_ring;
781 ld->mmap[0].len = p->sq_off.array + p->sq_entries * sizeof(__u32);
782 ptr = mmap(0, ld->mmap[0].len, PROT_READ | PROT_WRITE,
783 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
785 ld->mmap[0].ptr = ptr;
786 sring->head = ptr + p->sq_off.head;
787 sring->tail = ptr + p->sq_off.tail;
788 sring->ring_mask = ptr + p->sq_off.ring_mask;
789 sring->ring_entries = ptr + p->sq_off.ring_entries;
790 sring->flags = ptr + p->sq_off.flags;
791 sring->array = ptr + p->sq_off.array;
792 ld->sq_ring_mask = *sring->ring_mask;
794 if (p->flags & IORING_SETUP_SQE128)
795 ld->mmap[1].len = 2 * p->sq_entries * sizeof(struct io_uring_sqe);
797 ld->mmap[1].len = p->sq_entries * sizeof(struct io_uring_sqe);
798 ld->sqes = mmap(0, ld->mmap[1].len, PROT_READ | PROT_WRITE,
799 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
801 ld->mmap[1].ptr = ld->sqes;
803 if (p->flags & IORING_SETUP_CQE32) {
804 ld->mmap[2].len = p->cq_off.cqes +
805 2 * p->cq_entries * sizeof(struct io_uring_cqe);
807 ld->mmap[2].len = p->cq_off.cqes +
808 p->cq_entries * sizeof(struct io_uring_cqe);
810 ptr = mmap(0, ld->mmap[2].len, PROT_READ | PROT_WRITE,
811 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
813 ld->mmap[2].ptr = ptr;
814 cring->head = ptr + p->cq_off.head;
815 cring->tail = ptr + p->cq_off.tail;
816 cring->ring_mask = ptr + p->cq_off.ring_mask;
817 cring->ring_entries = ptr + p->cq_off.ring_entries;
818 cring->cqes = ptr + p->cq_off.cqes;
819 ld->cq_ring_mask = *cring->ring_mask;
823 static void fio_ioring_probe(struct thread_data *td)
825 struct ioring_data *ld = td->io_ops_data;
826 struct ioring_options *o = td->eo;
827 struct io_uring_probe *p;
830 /* already set by user, don't touch */
831 if (o->nonvectored != -1)
834 /* default to off, as that's always safe */
837 p = calloc(1, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
841 ret = syscall(__NR_io_uring_register, ld->ring_fd,
842 IORING_REGISTER_PROBE, p, 256);
846 if (IORING_OP_WRITE > p->ops_len)
849 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED) &&
850 (p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED))
856 static int fio_ioring_queue_init(struct thread_data *td)
858 struct ioring_data *ld = td->io_ops_data;
859 struct ioring_options *o = td->eo;
860 int depth = td->o.iodepth;
861 struct io_uring_params p;
864 memset(&p, 0, sizeof(p));
867 p.flags |= IORING_SETUP_IOPOLL;
868 if (o->sqpoll_thread) {
869 p.flags |= IORING_SETUP_SQPOLL;
871 p.flags |= IORING_SETUP_SQ_AFF;
872 p.sq_thread_cpu = o->sqpoll_cpu;
876 * Submission latency for sqpoll_thread is just the time it
877 * takes to fill in the SQ ring entries, and any syscall if
878 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
881 td->o.disable_slat = 1;
885 * Clamp CQ ring size at our SQ ring size, we don't need more entries
888 p.flags |= IORING_SETUP_CQSIZE;
889 p.cq_entries = depth;
892 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
893 * completing IO operations.
895 p.flags |= IORING_SETUP_COOP_TASKRUN;
898 * io_uring is always a single issuer, and we can defer task_work
899 * runs until we reap events.
901 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
904 ret = syscall(__NR_io_uring_setup, depth, &p);
906 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
907 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
908 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
911 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
912 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
915 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
916 p.flags &= ~IORING_SETUP_CQSIZE;
924 fio_ioring_probe(td);
927 ret = syscall(__NR_io_uring_register, ld->ring_fd,
928 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
933 return fio_ioring_mmap(ld, &p);
936 static int fio_ioring_cmd_queue_init(struct thread_data *td)
938 struct ioring_data *ld = td->io_ops_data;
939 struct ioring_options *o = td->eo;
940 int depth = td->o.iodepth;
941 struct io_uring_params p;
944 memset(&p, 0, sizeof(p));
947 p.flags |= IORING_SETUP_IOPOLL;
948 if (o->sqpoll_thread) {
949 p.flags |= IORING_SETUP_SQPOLL;
951 p.flags |= IORING_SETUP_SQ_AFF;
952 p.sq_thread_cpu = o->sqpoll_cpu;
956 * Submission latency for sqpoll_thread is just the time it
957 * takes to fill in the SQ ring entries, and any syscall if
958 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
961 td->o.disable_slat = 1;
963 if (o->cmd_type == FIO_URING_CMD_NVME) {
964 p.flags |= IORING_SETUP_SQE128;
965 p.flags |= IORING_SETUP_CQE32;
969 * Clamp CQ ring size at our SQ ring size, we don't need more entries
972 p.flags |= IORING_SETUP_CQSIZE;
973 p.cq_entries = depth;
976 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
977 * completing IO operations.
979 p.flags |= IORING_SETUP_COOP_TASKRUN;
982 * io_uring is always a single issuer, and we can defer task_work
983 * runs until we reap events.
985 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
988 ret = syscall(__NR_io_uring_setup, depth, &p);
990 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
991 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
992 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
995 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
996 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
999 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
1000 p.flags &= ~IORING_SETUP_CQSIZE;
1008 fio_ioring_probe(td);
1011 ret = syscall(__NR_io_uring_register, ld->ring_fd,
1012 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
1017 return fio_ioring_mmap(ld, &p);
1020 static int fio_ioring_register_files(struct thread_data *td)
1022 struct ioring_data *ld = td->io_ops_data;
1027 ld->fds = calloc(td->o.nr_files, sizeof(int));
1029 for_each_file(td, f, i) {
1030 ret = generic_open_file(td, f);
1037 ret = syscall(__NR_io_uring_register, ld->ring_fd,
1038 IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
1046 * Pretend the file is closed again, and really close it if we hit
1049 for_each_file(td, f, i) {
1051 int fio_unused ret2;
1052 ret2 = generic_close_file(td, f);
1060 static int fio_ioring_post_init(struct thread_data *td)
1062 struct ioring_data *ld = td->io_ops_data;
1063 struct ioring_options *o = td->eo;
1067 for (i = 0; i < td->o.iodepth; i++) {
1068 struct iovec *iov = &ld->iovecs[i];
1070 io_u = ld->io_u_index[i];
1071 iov->iov_base = io_u->buf;
1072 iov->iov_len = td_max_bs(td);
1075 err = fio_ioring_queue_init(td);
1077 int init_err = errno;
1079 if (init_err == ENOSYS)
1080 log_err("fio: your kernel doesn't support io_uring\n");
1081 td_verror(td, init_err, "io_queue_init");
1085 for (i = 0; i < td->o.iodepth; i++) {
1086 struct io_uring_sqe *sqe;
1089 memset(sqe, 0, sizeof(*sqe));
1092 if (o->registerfiles) {
1093 err = fio_ioring_register_files(td);
1095 td_verror(td, errno, "ioring_register_files");
1103 static int fio_ioring_cmd_post_init(struct thread_data *td)
1105 struct ioring_data *ld = td->io_ops_data;
1106 struct ioring_options *o = td->eo;
1110 for (i = 0; i < td->o.iodepth; i++) {
1111 struct iovec *iov = &ld->iovecs[i];
1113 io_u = ld->io_u_index[i];
1114 iov->iov_base = io_u->buf;
1115 iov->iov_len = td_max_bs(td);
1118 err = fio_ioring_cmd_queue_init(td);
1120 int init_err = errno;
1122 td_verror(td, init_err, "io_queue_init");
1126 for (i = 0; i < td->o.iodepth; i++) {
1127 struct io_uring_sqe *sqe;
1129 if (o->cmd_type == FIO_URING_CMD_NVME) {
1130 sqe = &ld->sqes[i << 1];
1131 memset(sqe, 0, 2 * sizeof(*sqe));
1134 memset(sqe, 0, sizeof(*sqe));
1138 if (o->registerfiles) {
1139 err = fio_ioring_register_files(td);
1141 td_verror(td, errno, "ioring_register_files");
1149 static void parse_prchk_flags(struct ioring_options *o)
1154 if (strstr(o->pi_chk, "GUARD") != NULL)
1155 o->prchk = NVME_IO_PRINFO_PRCHK_GUARD;
1156 if (strstr(o->pi_chk, "REFTAG") != NULL)
1157 o->prchk |= NVME_IO_PRINFO_PRCHK_REF;
1158 if (strstr(o->pi_chk, "APPTAG") != NULL)
1159 o->prchk |= NVME_IO_PRINFO_PRCHK_APP;
1162 static int fio_ioring_init(struct thread_data *td)
1164 struct ioring_options *o = td->eo;
1165 struct ioring_data *ld;
1166 struct nvme_dsm *dsm;
1168 unsigned int dsm_size;
1169 unsigned long long md_size;
1172 /* sqthread submission requires registered files */
1173 if (o->sqpoll_thread)
1174 o->registerfiles = 1;
1176 if (o->registerfiles && td->o.nr_files != td->o.open_files) {
1177 log_err("fio: io_uring registered files require nr_files to "
1178 "be identical to open_files\n");
1182 ld = calloc(1, sizeof(*ld));
1184 /* ring depth must be a power-of-2 */
1185 ld->iodepth = td->o.iodepth;
1186 td->o.iodepth = roundup_pow2(td->o.iodepth);
1189 ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
1192 * metadata buffer for nvme command.
1193 * We are only supporting iomem=malloc / mem=malloc as of now.
1195 if (!strcmp(td->io_ops->name, "io_uring_cmd") &&
1196 (o->cmd_type == FIO_URING_CMD_NVME) && o->md_per_io_size) {
1197 md_size = (unsigned long long) o->md_per_io_size
1198 * (unsigned long long) td->o.iodepth;
1199 md_size += page_mask + td->o.mem_align;
1200 if (td->o.mem_align && td->o.mem_align > page_size)
1201 md_size += td->o.mem_align - page_size;
1202 if (td->o.mem_type == MEM_MALLOC) {
1203 ld->md_buf = malloc(md_size);
1209 log_err("fio: Only iomem=malloc or mem=malloc is supported\n");
1214 parse_prchk_flags(o);
1216 ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
1218 td->io_ops_data = ld;
1220 ret = fio_cmdprio_init(td, &ld->cmdprio, &o->cmdprio_options);
1222 td_verror(td, EINVAL, "fio_ioring_init");
1227 * For io_uring_cmd, trims are async operations unless we are operating
1228 * in zbd mode where trim means zone reset.
1230 if (!strcmp(td->io_ops->name, "io_uring_cmd") && td_trim(td) &&
1231 td->o.zone_mode == ZONE_MODE_ZBD) {
1232 td->io_ops->flags |= FIO_ASYNCIO_SYNC_TRIM;
1234 dsm_size = sizeof(*ld->dsm) +
1235 td->o.num_range * sizeof(struct nvme_dsm_range);
1236 ld->dsm = calloc(td->o.iodepth, dsm_size);
1238 for (i = 0; i < td->o.iodepth; i++) {
1239 dsm = (struct nvme_dsm *)ptr;
1240 dsm->nr_ranges = td->o.num_range;
1245 if (!strcmp(td->io_ops->name, "io_uring_cmd")) {
1247 ld->cdw12_flags[DDIR_READ] = 1 << 30;
1249 ld->cdw12_flags[DDIR_WRITE] = 1 << 30;
1255 static int fio_ioring_io_u_init(struct thread_data *td, struct io_u *io_u)
1257 struct ioring_data *ld = td->io_ops_data;
1258 struct ioring_options *o = td->eo;
1259 struct nvme_pi_data *pi_data;
1262 ld->io_u_index[io_u->index] = io_u;
1264 if (!strcmp(td->io_ops->name, "io_uring_cmd")) {
1265 p = PTR_ALIGN(ld->md_buf, page_mask) + td->o.mem_align;
1266 p += o->md_per_io_size * io_u->index;
1267 io_u->mmap_data = p;
1270 pi_data = calloc(1, sizeof(*pi_data));
1271 pi_data->io_flags |= o->prchk;
1272 pi_data->apptag_mask = o->apptag_mask;
1273 pi_data->apptag = o->apptag;
1274 io_u->engine_data = pi_data;
1281 static void fio_ioring_io_u_free(struct thread_data *td, struct io_u *io_u)
1283 struct ioring_options *o = td->eo;
1286 if (!strcmp(td->io_ops->name, "io_uring_cmd") &&
1287 (o->cmd_type == FIO_URING_CMD_NVME)) {
1288 pi = io_u->engine_data;
1290 io_u->engine_data = NULL;
1294 static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
1296 struct ioring_data *ld = td->io_ops_data;
1297 struct ioring_options *o = td->eo;
1299 if (!ld || !o->registerfiles)
1300 return generic_open_file(td, f);
1302 f->fd = ld->fds[f->engine_pos];
1306 static int fio_ioring_cmd_open_file(struct thread_data *td, struct fio_file *f)
1308 struct ioring_data *ld = td->io_ops_data;
1309 struct ioring_options *o = td->eo;
1311 if (o->cmd_type == FIO_URING_CMD_NVME) {
1312 struct nvme_data *data = NULL;
1313 unsigned int lba_size = 0;
1317 /* Store the namespace-id and lba size. */
1318 data = FILE_ENG_DATA(f);
1320 data = calloc(1, sizeof(struct nvme_data));
1321 ret = fio_nvme_get_info(f, &nlba, o->pi_act, data);
1327 FILE_SET_ENG_DATA(f, data);
1330 lba_size = data->lba_ext ? data->lba_ext : data->lba_size;
1332 for_each_rw_ddir(ddir) {
1333 if (td->o.min_bs[ddir] % lba_size || td->o.max_bs[ddir] % lba_size) {
1334 if (data->lba_ext) {
1335 log_err("%s: block size must be a multiple of %u "
1336 "(LBA data size + Metadata size)\n", f->file_name, lba_size);
1337 if (td->o.min_bs[ddir] == td->o.max_bs[ddir] &&
1338 !(td->o.min_bs[ddir] % data->lba_size)) {
1339 /* fixed block size is actually a multiple of LBA data size */
1340 unsigned long long suggestion = lba_size *
1341 (td->o.min_bs[ddir] / data->lba_size);
1342 log_err("Did you mean to use a block size of %llu?\n", suggestion);
1345 log_err("%s: block size must be a multiple of LBA data size\n",
1348 td_verror(td, EINVAL, "fio_ioring_cmd_open_file");
1351 if (data->ms && !data->lba_ext && ddir != DDIR_TRIM &&
1352 (o->md_per_io_size < ((td->o.max_bs[ddir] / data->lba_size) *
1354 log_err("%s: md_per_io_size should be at least %llu bytes\n",
1356 ((td->o.max_bs[ddir] / data->lba_size) * data->ms));
1357 td_verror(td, EINVAL, "fio_ioring_cmd_open_file");
1363 * For extended logical block sizes we cannot use verify when
1364 * end to end data protection checks are enabled, as the PI
1365 * section of data buffer conflicts with verify.
1367 if (data->ms && data->pi_type && data->lba_ext &&
1368 td->o.verify != VERIFY_NONE) {
1369 log_err("%s: for extended LBA, verify cannot be used when E2E data protection is enabled\n",
1371 td_verror(td, EINVAL, "fio_ioring_cmd_open_file");
1375 if (!ld || !o->registerfiles)
1376 return generic_open_file(td, f);
1378 f->fd = ld->fds[f->engine_pos];
1382 static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f)
1384 struct ioring_data *ld = td->io_ops_data;
1385 struct ioring_options *o = td->eo;
1387 if (!ld || !o->registerfiles)
1388 return generic_close_file(td, f);
1394 static int fio_ioring_cmd_close_file(struct thread_data *td,
1397 struct ioring_data *ld = td->io_ops_data;
1398 struct ioring_options *o = td->eo;
1400 if (o->cmd_type == FIO_URING_CMD_NVME) {
1401 struct nvme_data *data = FILE_ENG_DATA(f);
1403 FILE_SET_ENG_DATA(f, NULL);
1406 if (!ld || !o->registerfiles)
1407 return generic_close_file(td, f);
1413 static int fio_ioring_cmd_get_file_size(struct thread_data *td,
1416 struct ioring_options *o = td->eo;
1418 if (fio_file_size_known(f))
1421 if (o->cmd_type == FIO_URING_CMD_NVME) {
1422 struct nvme_data *data = NULL;
1426 data = calloc(1, sizeof(struct nvme_data));
1427 ret = fio_nvme_get_info(f, &nlba, o->pi_act, data);
1433 f->real_file_size = data->lba_size * nlba;
1434 fio_file_set_size_known(f);
1436 FILE_SET_ENG_DATA(f, data);
1439 return generic_get_file_size(td, f);
1442 static int fio_ioring_cmd_get_zoned_model(struct thread_data *td,
1444 enum zbd_zoned_model *model)
1446 return fio_nvme_get_zoned_model(td, f, model);
1449 static int fio_ioring_cmd_report_zones(struct thread_data *td,
1450 struct fio_file *f, uint64_t offset,
1451 struct zbd_zone *zbdz,
1452 unsigned int nr_zones)
1454 return fio_nvme_report_zones(td, f, offset, zbdz, nr_zones);
1457 static int fio_ioring_cmd_reset_wp(struct thread_data *td, struct fio_file *f,
1458 uint64_t offset, uint64_t length)
1460 return fio_nvme_reset_wp(td, f, offset, length);
1463 static int fio_ioring_cmd_get_max_open_zones(struct thread_data *td,
1465 unsigned int *max_open_zones)
1467 return fio_nvme_get_max_open_zones(td, f, max_open_zones);
1470 static int fio_ioring_cmd_fetch_ruhs(struct thread_data *td, struct fio_file *f,
1471 struct fio_ruhs_info *fruhs_info)
1473 struct nvme_fdp_ruh_status *ruhs;
1476 bytes = sizeof(*ruhs) + FDP_MAX_RUHS * sizeof(struct nvme_fdp_ruh_status_desc);
1477 ruhs = scalloc(1, bytes);
1481 ret = fio_nvme_iomgmt_ruhs(td, f, ruhs, bytes);
1485 fruhs_info->nr_ruhs = le16_to_cpu(ruhs->nruhsd);
1486 for (i = 0; i < fruhs_info->nr_ruhs; i++)
1487 fruhs_info->plis[i] = le16_to_cpu(ruhs->ruhss[i].pid);
1493 static struct ioengine_ops ioengine_uring = {
1495 .version = FIO_IOOPS_VERSION,
1496 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
1497 FIO_ASYNCIO_SETS_ISSUE_TIME,
1498 .init = fio_ioring_init,
1499 .post_init = fio_ioring_post_init,
1500 .io_u_init = fio_ioring_io_u_init,
1501 .prep = fio_ioring_prep,
1502 .queue = fio_ioring_queue,
1503 .commit = fio_ioring_commit,
1504 .getevents = fio_ioring_getevents,
1505 .event = fio_ioring_event,
1506 .cleanup = fio_ioring_cleanup,
1507 .open_file = fio_ioring_open_file,
1508 .close_file = fio_ioring_close_file,
1509 .get_file_size = generic_get_file_size,
1511 .option_struct_size = sizeof(struct ioring_options),
1514 static struct ioengine_ops ioengine_uring_cmd = {
1515 .name = "io_uring_cmd",
1516 .version = FIO_IOOPS_VERSION,
1517 .flags = FIO_NO_OFFLOAD | FIO_MEMALIGN | FIO_RAWIO |
1518 FIO_ASYNCIO_SETS_ISSUE_TIME |
1519 FIO_MULTI_RANGE_TRIM,
1520 .init = fio_ioring_init,
1521 .post_init = fio_ioring_cmd_post_init,
1522 .io_u_init = fio_ioring_io_u_init,
1523 .io_u_free = fio_ioring_io_u_free,
1524 .prep = fio_ioring_cmd_prep,
1525 .queue = fio_ioring_queue,
1526 .commit = fio_ioring_commit,
1527 .getevents = fio_ioring_getevents,
1528 .event = fio_ioring_cmd_event,
1529 .cleanup = fio_ioring_cleanup,
1530 .open_file = fio_ioring_cmd_open_file,
1531 .close_file = fio_ioring_cmd_close_file,
1532 .get_file_size = fio_ioring_cmd_get_file_size,
1533 .get_zoned_model = fio_ioring_cmd_get_zoned_model,
1534 .report_zones = fio_ioring_cmd_report_zones,
1535 .reset_wp = fio_ioring_cmd_reset_wp,
1536 .get_max_open_zones = fio_ioring_cmd_get_max_open_zones,
1538 .option_struct_size = sizeof(struct ioring_options),
1539 .fdp_fetch_ruhs = fio_ioring_cmd_fetch_ruhs,
1542 static void fio_init fio_ioring_register(void)
1544 register_ioengine(&ioengine_uring);
1545 register_ioengine(&ioengine_uring_cmd);
1548 static void fio_exit fio_ioring_unregister(void)
1550 unregister_ioengine(&ioengine_uring);
1551 unregister_ioengine(&ioengine_uring_cmd);