4 * IO engine using the new native Linux aio io_uring interface. See:
6 * http://git.kernel.dk/cgit/linux-block/log/?h=io_uring
13 #include <sys/resource.h>
16 #include "../lib/pow2.h"
17 #include "../optgroup.h"
18 #include "../lib/memalign.h"
19 #include "../lib/fls.h"
20 #include "../lib/roundup.h"
22 #ifdef ARCH_HAVE_IOURING
24 #include "../lib/types.h"
25 #include "../os/linux/io_uring.h"
32 unsigned *ring_entries;
41 unsigned *ring_entries;
42 struct io_uring_cqe *cqes;
53 struct io_u **io_u_index;
57 struct io_sq_ring sq_ring;
58 struct io_uring_sqe *sqes;
60 unsigned sq_ring_mask;
62 struct io_cq_ring cq_ring;
63 unsigned cq_ring_mask;
70 struct ioring_mmap mmap[3];
75 struct ioring_options {
76 struct thread_data *td;
78 struct cmdprio cmdprio;
79 unsigned int fixedbufs;
80 unsigned int registerfiles;
81 unsigned int sqpoll_thread;
82 unsigned int sqpoll_set;
83 unsigned int sqpoll_cpu;
84 unsigned int nonvectored;
85 unsigned int uncached;
87 unsigned int force_async;
90 static const int ddir_to_op[2][2] = {
91 { IORING_OP_READV, IORING_OP_READ },
92 { IORING_OP_WRITEV, IORING_OP_WRITE }
95 static const int fixed_ddir_to_op[2] = {
100 static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
102 struct ioring_options *o = data;
104 o->sqpoll_cpu = *val;
109 static int str_cmdprio_bssplit_cb(void *data, const char *input)
111 struct ioring_options *o = data;
112 struct thread_data *td = o->td;
113 struct cmdprio *cmdprio = &o->cmdprio;
115 return fio_cmdprio_bssplit_parse(td, input, cmdprio);
118 static struct fio_option options[] = {
121 .lname = "High Priority",
122 .type = FIO_OPT_STR_SET,
123 .off1 = offsetof(struct ioring_options, hipri),
124 .help = "Use polled IO completions",
125 .category = FIO_OPT_C_ENGINE,
126 .group = FIO_OPT_G_IOURING,
128 #ifdef FIO_HAVE_IOPRIO_CLASS
130 .name = "cmdprio_percentage",
131 .lname = "high priority percentage",
133 .off1 = offsetof(struct ioring_options,
134 cmdprio.percentage[DDIR_READ]),
135 .off2 = offsetof(struct ioring_options,
136 cmdprio.percentage[DDIR_WRITE]),
139 .help = "Send high priority I/O this percentage of the time",
140 .category = FIO_OPT_C_ENGINE,
141 .group = FIO_OPT_G_IOURING,
144 .name = "cmdprio_class",
145 .lname = "Asynchronous I/O priority class",
147 .off1 = offsetof(struct ioring_options,
148 cmdprio.class[DDIR_READ]),
149 .off2 = offsetof(struct ioring_options,
150 cmdprio.class[DDIR_WRITE]),
151 .help = "Set asynchronous IO priority class",
152 .minval = IOPRIO_MIN_PRIO_CLASS + 1,
153 .maxval = IOPRIO_MAX_PRIO_CLASS,
155 .category = FIO_OPT_C_ENGINE,
156 .group = FIO_OPT_G_IOURING,
160 .lname = "Asynchronous I/O priority level",
162 .off1 = offsetof(struct ioring_options,
163 cmdprio.level[DDIR_READ]),
164 .off2 = offsetof(struct ioring_options,
165 cmdprio.level[DDIR_WRITE]),
166 .help = "Set asynchronous IO priority level",
167 .minval = IOPRIO_MIN_PRIO,
168 .maxval = IOPRIO_MAX_PRIO,
170 .category = FIO_OPT_C_ENGINE,
171 .group = FIO_OPT_G_IOURING,
174 .name = "cmdprio_bssplit",
175 .lname = "Priority percentage block size split",
176 .type = FIO_OPT_STR_ULL,
177 .cb = str_cmdprio_bssplit_cb,
178 .off1 = offsetof(struct ioring_options, cmdprio.bssplit),
179 .help = "Set priority percentages for different block sizes",
180 .category = FIO_OPT_C_ENGINE,
181 .group = FIO_OPT_G_IOURING,
185 .name = "cmdprio_percentage",
186 .lname = "high priority percentage",
187 .type = FIO_OPT_UNSUPPORTED,
188 .help = "Your platform does not support I/O priority classes",
191 .name = "cmdprio_class",
192 .lname = "Asynchronous I/O priority class",
193 .type = FIO_OPT_UNSUPPORTED,
194 .help = "Your platform does not support I/O priority classes",
198 .lname = "Asynchronous I/O priority level",
199 .type = FIO_OPT_UNSUPPORTED,
200 .help = "Your platform does not support I/O priority classes",
203 .name = "cmdprio_bssplit",
204 .lname = "Priority percentage block size split",
205 .type = FIO_OPT_UNSUPPORTED,
206 .help = "Your platform does not support I/O priority classes",
211 .lname = "Fixed (pre-mapped) IO buffers",
212 .type = FIO_OPT_STR_SET,
213 .off1 = offsetof(struct ioring_options, fixedbufs),
214 .help = "Pre map IO buffers",
215 .category = FIO_OPT_C_ENGINE,
216 .group = FIO_OPT_G_IOURING,
219 .name = "registerfiles",
220 .lname = "Register file set",
221 .type = FIO_OPT_STR_SET,
222 .off1 = offsetof(struct ioring_options, registerfiles),
223 .help = "Pre-open/register files",
224 .category = FIO_OPT_C_ENGINE,
225 .group = FIO_OPT_G_IOURING,
228 .name = "sqthread_poll",
229 .lname = "Kernel SQ thread polling",
231 .off1 = offsetof(struct ioring_options, sqpoll_thread),
232 .help = "Offload submission/completion to kernel thread",
233 .category = FIO_OPT_C_ENGINE,
234 .group = FIO_OPT_G_IOURING,
237 .name = "sqthread_poll_cpu",
238 .lname = "SQ Thread Poll CPU",
240 .cb = fio_ioring_sqpoll_cb,
241 .help = "What CPU to run SQ thread polling on",
242 .category = FIO_OPT_C_ENGINE,
243 .group = FIO_OPT_G_IOURING,
246 .name = "nonvectored",
247 .lname = "Non-vectored",
249 .off1 = offsetof(struct ioring_options, nonvectored),
251 .help = "Use non-vectored read/write commands",
252 .category = FIO_OPT_C_ENGINE,
253 .group = FIO_OPT_G_IOURING,
259 .off1 = offsetof(struct ioring_options, uncached),
260 .help = "Use RWF_UNCACHED for buffered read/writes",
261 .category = FIO_OPT_C_ENGINE,
262 .group = FIO_OPT_G_IOURING,
266 .lname = "RWF_NOWAIT",
267 .type = FIO_OPT_BOOL,
268 .off1 = offsetof(struct ioring_options, nowait),
269 .help = "Use RWF_NOWAIT for reads/writes",
270 .category = FIO_OPT_C_ENGINE,
271 .group = FIO_OPT_G_IOURING,
274 .name = "force_async",
275 .lname = "Force async",
277 .off1 = offsetof(struct ioring_options, force_async),
278 .help = "Set IOSQE_ASYNC every N requests",
279 .category = FIO_OPT_C_ENGINE,
280 .group = FIO_OPT_G_IOURING,
287 static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit,
288 unsigned int min_complete, unsigned int flags)
290 return syscall(__NR_io_uring_enter, ld->ring_fd, to_submit,
291 min_complete, flags, NULL, 0);
294 static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
296 struct ioring_data *ld = td->io_ops_data;
297 struct ioring_options *o = td->eo;
298 struct fio_file *f = io_u->file;
299 struct io_uring_sqe *sqe;
301 sqe = &ld->sqes[io_u->index];
303 if (o->registerfiles) {
304 sqe->fd = f->engine_pos;
305 sqe->flags = IOSQE_FIXED_FILE;
311 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
313 sqe->opcode = fixed_ddir_to_op[io_u->ddir];
314 sqe->addr = (unsigned long) io_u->xfer_buf;
315 sqe->len = io_u->xfer_buflen;
316 sqe->buf_index = io_u->index;
318 struct iovec *iov = &ld->iovecs[io_u->index];
321 * Update based on actual io_u, requeue could have
324 iov->iov_base = io_u->xfer_buf;
325 iov->iov_len = io_u->xfer_buflen;
327 sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
328 if (o->nonvectored) {
329 sqe->addr = (unsigned long) iov->iov_base;
330 sqe->len = iov->iov_len;
332 sqe->addr = (unsigned long) iov;
337 if (!td->o.odirect && o->uncached)
338 sqe->rw_flags |= RWF_UNCACHED;
340 sqe->rw_flags |= RWF_NOWAIT;
341 sqe->off = io_u->offset;
342 } else if (ddir_sync(io_u->ddir)) {
344 if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
345 sqe->off = f->first_write;
346 sqe->len = f->last_write - f->first_write;
347 sqe->sync_range_flags = td->o.sync_file_range;
348 sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
353 if (io_u->ddir == DDIR_DATASYNC)
354 sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
355 sqe->opcode = IORING_OP_FSYNC;
359 if (o->force_async && ++ld->prepped == o->force_async) {
361 sqe->flags |= IOSQE_ASYNC;
364 sqe->user_data = (unsigned long) io_u;
368 static struct io_u *fio_ioring_event(struct thread_data *td, int event)
370 struct ioring_data *ld = td->io_ops_data;
371 struct io_uring_cqe *cqe;
375 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
377 cqe = &ld->cq_ring.cqes[index];
378 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
380 if (cqe->res != io_u->xfer_buflen) {
381 if (cqe->res > io_u->xfer_buflen)
382 io_u->error = -cqe->res;
384 io_u->resid = io_u->xfer_buflen - cqe->res;
391 static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events,
394 struct ioring_data *ld = td->io_ops_data;
395 struct io_cq_ring *ring = &ld->cq_ring;
396 unsigned head, reaped = 0;
400 if (head == atomic_load_acquire(ring->tail))
404 } while (reaped + events < max);
407 atomic_store_release(ring->head, head);
412 static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
413 unsigned int max, const struct timespec *t)
415 struct ioring_data *ld = td->io_ops_data;
416 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
417 struct ioring_options *o = td->eo;
418 struct io_cq_ring *ring = &ld->cq_ring;
422 ld->cq_ring_off = *ring->head;
424 r = fio_ioring_cqring_reap(td, events, max);
432 if (!o->sqpoll_thread) {
433 r = io_uring_enter(ld, 0, actual_min,
434 IORING_ENTER_GETEVENTS);
436 if (errno == EAGAIN || errno == EINTR)
438 td_verror(td, errno, "io_uring_enter");
442 } while (events < min);
444 return r < 0 ? r : events;
447 static void fio_ioring_prio_prep(struct thread_data *td, struct io_u *io_u)
449 struct ioring_options *o = td->eo;
450 struct ioring_data *ld = td->io_ops_data;
451 struct io_uring_sqe *sqe = &ld->sqes[io_u->index];
452 struct cmdprio *cmdprio = &o->cmdprio;
453 enum fio_ddir ddir = io_u->ddir;
454 unsigned int p = fio_cmdprio_percentage(cmdprio, io_u);
455 unsigned int cmdprio_value =
456 ioprio_value(cmdprio->class[ddir], cmdprio->level[ddir]);
458 if (p && rand_between(&td->prio_state, 0, 99) < p) {
459 sqe->ioprio = cmdprio_value;
460 if (!td->ioprio || cmdprio_value < td->ioprio) {
462 * The async IO priority is higher (has a lower value)
463 * than the priority set by "prio" and "prioclass"
466 io_u->flags |= IO_U_F_HIGH_PRIO;
469 sqe->ioprio = td->ioprio;
470 if (cmdprio_value && td->ioprio && td->ioprio < cmdprio_value) {
472 * The IO will be executed with the priority set by
473 * "prio" and "prioclass" options, and this priority
474 * is higher (has a lower value) than the async IO
477 io_u->flags |= IO_U_F_HIGH_PRIO;
481 io_u->ioprio = sqe->ioprio;
484 static enum fio_q_status fio_ioring_queue(struct thread_data *td,
487 struct ioring_data *ld = td->io_ops_data;
488 struct io_sq_ring *ring = &ld->sq_ring;
489 unsigned tail, next_tail;
491 fio_ro_check(td, io_u);
493 if (ld->queued == ld->iodepth)
496 if (io_u->ddir == DDIR_TRIM) {
500 do_io_u_trim(td, io_u);
501 io_u_mark_submit(td, 1);
502 io_u_mark_complete(td, 1);
503 return FIO_Q_COMPLETED;
507 next_tail = tail + 1;
508 if (next_tail == atomic_load_acquire(ring->head))
512 fio_ioring_prio_prep(td, io_u);
513 ring->array[tail & ld->sq_ring_mask] = io_u->index;
514 atomic_store_release(ring->tail, next_tail);
520 static void fio_ioring_queued(struct thread_data *td, int start, int nr)
522 struct ioring_data *ld = td->io_ops_data;
525 if (!fio_fill_issue_time(td))
528 fio_gettime(&now, NULL);
531 struct io_sq_ring *ring = &ld->sq_ring;
532 int index = ring->array[start & ld->sq_ring_mask];
533 struct io_u *io_u = ld->io_u_index[index];
535 memcpy(&io_u->issue_time, &now, sizeof(now));
536 io_u_queued(td, io_u);
542 static int fio_ioring_commit(struct thread_data *td)
544 struct ioring_data *ld = td->io_ops_data;
545 struct ioring_options *o = td->eo;
552 * Kernel side does submission. just need to check if the ring is
553 * flagged as needing a kick, if so, call io_uring_enter(). This
554 * only happens if we've been idle too long.
556 if (o->sqpoll_thread) {
557 struct io_sq_ring *ring = &ld->sq_ring;
560 flags = atomic_load_acquire(ring->flags);
561 if (flags & IORING_SQ_NEED_WAKEUP)
562 io_uring_enter(ld, ld->queued, 0,
563 IORING_ENTER_SQ_WAKEUP);
569 unsigned start = *ld->sq_ring.head;
570 long nr = ld->queued;
572 ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS);
574 fio_ioring_queued(td, start, ret);
575 io_u_mark_submit(td, ret);
580 io_u_mark_submit(td, ret);
583 if (errno == EAGAIN || errno == EINTR) {
584 ret = fio_ioring_cqring_reap(td, 0, ld->queued);
587 /* Shouldn't happen */
591 td_verror(td, errno, "io_uring_enter submit");
594 } while (ld->queued);
599 static void fio_ioring_unmap(struct ioring_data *ld)
603 for (i = 0; i < FIO_ARRAY_SIZE(ld->mmap); i++)
604 munmap(ld->mmap[i].ptr, ld->mmap[i].len);
608 static void fio_ioring_cleanup(struct thread_data *td)
610 struct ioring_data *ld = td->io_ops_data;
613 if (!(td->flags & TD_F_CHILD))
614 fio_ioring_unmap(ld);
616 free(ld->io_u_index);
623 static int fio_ioring_mmap(struct ioring_data *ld, struct io_uring_params *p)
625 struct io_sq_ring *sring = &ld->sq_ring;
626 struct io_cq_ring *cring = &ld->cq_ring;
629 ld->mmap[0].len = p->sq_off.array + p->sq_entries * sizeof(__u32);
630 ptr = mmap(0, ld->mmap[0].len, PROT_READ | PROT_WRITE,
631 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
633 ld->mmap[0].ptr = ptr;
634 sring->head = ptr + p->sq_off.head;
635 sring->tail = ptr + p->sq_off.tail;
636 sring->ring_mask = ptr + p->sq_off.ring_mask;
637 sring->ring_entries = ptr + p->sq_off.ring_entries;
638 sring->flags = ptr + p->sq_off.flags;
639 sring->array = ptr + p->sq_off.array;
640 ld->sq_ring_mask = *sring->ring_mask;
642 ld->mmap[1].len = p->sq_entries * sizeof(struct io_uring_sqe);
643 ld->sqes = mmap(0, ld->mmap[1].len, PROT_READ | PROT_WRITE,
644 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
646 ld->mmap[1].ptr = ld->sqes;
648 ld->mmap[2].len = p->cq_off.cqes +
649 p->cq_entries * sizeof(struct io_uring_cqe);
650 ptr = mmap(0, ld->mmap[2].len, PROT_READ | PROT_WRITE,
651 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
653 ld->mmap[2].ptr = ptr;
654 cring->head = ptr + p->cq_off.head;
655 cring->tail = ptr + p->cq_off.tail;
656 cring->ring_mask = ptr + p->cq_off.ring_mask;
657 cring->ring_entries = ptr + p->cq_off.ring_entries;
658 cring->cqes = ptr + p->cq_off.cqes;
659 ld->cq_ring_mask = *cring->ring_mask;
663 static void fio_ioring_probe(struct thread_data *td)
665 struct ioring_data *ld = td->io_ops_data;
666 struct ioring_options *o = td->eo;
667 struct io_uring_probe *p;
670 /* already set by user, don't touch */
671 if (o->nonvectored != -1)
674 /* default to off, as that's always safe */
677 p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
681 memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
682 ret = syscall(__NR_io_uring_register, ld->ring_fd,
683 IORING_REGISTER_PROBE, p, 256);
687 if (IORING_OP_WRITE > p->ops_len)
690 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED) &&
691 (p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED))
697 static int fio_ioring_queue_init(struct thread_data *td)
699 struct ioring_data *ld = td->io_ops_data;
700 struct ioring_options *o = td->eo;
701 int depth = td->o.iodepth;
702 struct io_uring_params p;
705 memset(&p, 0, sizeof(p));
708 p.flags |= IORING_SETUP_IOPOLL;
709 if (o->sqpoll_thread) {
710 p.flags |= IORING_SETUP_SQPOLL;
712 p.flags |= IORING_SETUP_SQ_AFF;
713 p.sq_thread_cpu = o->sqpoll_cpu;
717 ret = syscall(__NR_io_uring_setup, depth, &p);
723 fio_ioring_probe(td);
726 ret = syscall(__NR_io_uring_register, ld->ring_fd,
727 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
732 return fio_ioring_mmap(ld, &p);
735 static int fio_ioring_register_files(struct thread_data *td)
737 struct ioring_data *ld = td->io_ops_data;
742 ld->fds = calloc(td->o.nr_files, sizeof(int));
744 for_each_file(td, f, i) {
745 ret = generic_open_file(td, f);
752 ret = syscall(__NR_io_uring_register, ld->ring_fd,
753 IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
761 * Pretend the file is closed again, and really close it if we hit
764 for_each_file(td, f, i) {
767 ret2 = generic_close_file(td, f);
775 static int fio_ioring_post_init(struct thread_data *td)
777 struct ioring_data *ld = td->io_ops_data;
778 struct ioring_options *o = td->eo;
782 for (i = 0; i < td->o.iodepth; i++) {
783 struct iovec *iov = &ld->iovecs[i];
785 io_u = ld->io_u_index[i];
786 iov->iov_base = io_u->buf;
787 iov->iov_len = td_max_bs(td);
790 err = fio_ioring_queue_init(td);
792 int init_err = errno;
794 if (init_err == ENOSYS)
795 log_err("fio: your kernel doesn't support io_uring\n");
796 td_verror(td, init_err, "io_queue_init");
800 for (i = 0; i < td->o.iodepth; i++) {
801 struct io_uring_sqe *sqe;
804 memset(sqe, 0, sizeof(*sqe));
807 if (o->registerfiles) {
808 err = fio_ioring_register_files(td);
810 td_verror(td, errno, "ioring_register_files");
818 static int fio_ioring_init(struct thread_data *td)
820 struct ioring_options *o = td->eo;
821 struct ioring_data *ld;
822 struct cmdprio *cmdprio = &o->cmdprio;
823 bool has_cmdprio = false;
826 /* sqthread submission requires registered files */
827 if (o->sqpoll_thread)
828 o->registerfiles = 1;
830 if (o->registerfiles && td->o.nr_files != td->o.open_files) {
831 log_err("fio: io_uring registered files require nr_files to "
832 "be identical to open_files\n");
836 ld = calloc(1, sizeof(*ld));
838 /* ring depth must be a power-of-2 */
839 ld->iodepth = td->o.iodepth;
840 td->o.iodepth = roundup_pow2(td->o.iodepth);
843 ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
844 ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
846 td->io_ops_data = ld;
848 ret = fio_cmdprio_init(td, cmdprio, &has_cmdprio);
850 td_verror(td, EINVAL, "fio_ioring_init");
855 * Since io_uring can have a submission context (sqthread_poll) that is
856 * different from the process context, we cannot rely on the the IO
857 * priority set by ioprio_set() (option prio/prioclass) to be inherited.
858 * Therefore, we set the sqe->ioprio field when prio/prioclass is used.
860 ld->use_cmdprio = has_cmdprio ||
861 fio_option_is_set(&td->o, ioprio_class) ||
862 fio_option_is_set(&td->o, ioprio);
867 static int fio_ioring_io_u_init(struct thread_data *td, struct io_u *io_u)
869 struct ioring_data *ld = td->io_ops_data;
871 ld->io_u_index[io_u->index] = io_u;
875 static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
877 struct ioring_data *ld = td->io_ops_data;
878 struct ioring_options *o = td->eo;
880 if (!ld || !o->registerfiles)
881 return generic_open_file(td, f);
883 f->fd = ld->fds[f->engine_pos];
887 static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f)
889 struct ioring_data *ld = td->io_ops_data;
890 struct ioring_options *o = td->eo;
892 if (!ld || !o->registerfiles)
893 return generic_close_file(td, f);
899 static struct ioengine_ops ioengine = {
901 .version = FIO_IOOPS_VERSION,
902 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD,
903 .init = fio_ioring_init,
904 .post_init = fio_ioring_post_init,
905 .io_u_init = fio_ioring_io_u_init,
906 .prep = fio_ioring_prep,
907 .queue = fio_ioring_queue,
908 .commit = fio_ioring_commit,
909 .getevents = fio_ioring_getevents,
910 .event = fio_ioring_event,
911 .cleanup = fio_ioring_cleanup,
912 .open_file = fio_ioring_open_file,
913 .close_file = fio_ioring_close_file,
914 .get_file_size = generic_get_file_size,
916 .option_struct_size = sizeof(struct ioring_options),
919 static void fio_init fio_ioring_register(void)
921 register_ioengine(&ioengine);
924 static void fio_exit fio_ioring_unregister(void)
926 unregister_ioengine(&ioengine);