Add randtrimwrite data direction
[fio.git] / engines / io_uring.c
... / ...
CommitLineData
1/*
2 * io_uring engine
3 *
4 * IO engine using the new native Linux aio io_uring interface. See:
5 *
6 * http://git.kernel.dk/cgit/linux-block/log/?h=io_uring
7 *
8 */
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <sys/time.h>
13#include <sys/resource.h>
14
15#include "../fio.h"
16#include "../lib/pow2.h"
17#include "../optgroup.h"
18#include "../lib/memalign.h"
19#include "../lib/fls.h"
20#include "../lib/roundup.h"
21
22#ifdef ARCH_HAVE_IOURING
23
24#include "../lib/types.h"
25#include "../os/linux/io_uring.h"
26#include "cmdprio.h"
27#include "nvme.h"
28
29#include <sys/stat.h>
30
31enum uring_cmd_type {
32 FIO_URING_CMD_NVME = 1,
33};
34
35struct io_sq_ring {
36 unsigned *head;
37 unsigned *tail;
38 unsigned *ring_mask;
39 unsigned *ring_entries;
40 unsigned *flags;
41 unsigned *array;
42};
43
44struct io_cq_ring {
45 unsigned *head;
46 unsigned *tail;
47 unsigned *ring_mask;
48 unsigned *ring_entries;
49 struct io_uring_cqe *cqes;
50};
51
52struct ioring_mmap {
53 void *ptr;
54 size_t len;
55};
56
57struct ioring_data {
58 int ring_fd;
59
60 struct io_u **io_u_index;
61
62 int *fds;
63
64 struct io_sq_ring sq_ring;
65 struct io_uring_sqe *sqes;
66 struct iovec *iovecs;
67 unsigned sq_ring_mask;
68
69 struct io_cq_ring cq_ring;
70 unsigned cq_ring_mask;
71
72 int queued;
73 int cq_ring_off;
74 unsigned iodepth;
75 int prepped;
76
77 struct ioring_mmap mmap[3];
78
79 struct cmdprio cmdprio;
80};
81
82struct ioring_options {
83 struct thread_data *td;
84 unsigned int hipri;
85 struct cmdprio_options cmdprio_options;
86 unsigned int fixedbufs;
87 unsigned int registerfiles;
88 unsigned int sqpoll_thread;
89 unsigned int sqpoll_set;
90 unsigned int sqpoll_cpu;
91 unsigned int nonvectored;
92 unsigned int uncached;
93 unsigned int nowait;
94 unsigned int force_async;
95 enum uring_cmd_type cmd_type;
96};
97
98static const int ddir_to_op[2][2] = {
99 { IORING_OP_READV, IORING_OP_READ },
100 { IORING_OP_WRITEV, IORING_OP_WRITE }
101};
102
103static const int fixed_ddir_to_op[2] = {
104 IORING_OP_READ_FIXED,
105 IORING_OP_WRITE_FIXED
106};
107
108static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
109{
110 struct ioring_options *o = data;
111
112 o->sqpoll_cpu = *val;
113 o->sqpoll_set = 1;
114 return 0;
115}
116
117static struct fio_option options[] = {
118 {
119 .name = "hipri",
120 .lname = "High Priority",
121 .type = FIO_OPT_STR_SET,
122 .off1 = offsetof(struct ioring_options, hipri),
123 .help = "Use polled IO completions",
124 .category = FIO_OPT_C_ENGINE,
125 .group = FIO_OPT_G_IOURING,
126 },
127#ifdef FIO_HAVE_IOPRIO_CLASS
128 {
129 .name = "cmdprio_percentage",
130 .lname = "high priority percentage",
131 .type = FIO_OPT_INT,
132 .off1 = offsetof(struct ioring_options,
133 cmdprio_options.percentage[DDIR_READ]),
134 .off2 = offsetof(struct ioring_options,
135 cmdprio_options.percentage[DDIR_WRITE]),
136 .minval = 0,
137 .maxval = 100,
138 .help = "Send high priority I/O this percentage of the time",
139 .category = FIO_OPT_C_ENGINE,
140 .group = FIO_OPT_G_IOURING,
141 },
142 {
143 .name = "cmdprio_class",
144 .lname = "Asynchronous I/O priority class",
145 .type = FIO_OPT_INT,
146 .off1 = offsetof(struct ioring_options,
147 cmdprio_options.class[DDIR_READ]),
148 .off2 = offsetof(struct ioring_options,
149 cmdprio_options.class[DDIR_WRITE]),
150 .help = "Set asynchronous IO priority class",
151 .minval = IOPRIO_MIN_PRIO_CLASS + 1,
152 .maxval = IOPRIO_MAX_PRIO_CLASS,
153 .interval = 1,
154 .category = FIO_OPT_C_ENGINE,
155 .group = FIO_OPT_G_IOURING,
156 },
157 {
158 .name = "cmdprio",
159 .lname = "Asynchronous I/O priority level",
160 .type = FIO_OPT_INT,
161 .off1 = offsetof(struct ioring_options,
162 cmdprio_options.level[DDIR_READ]),
163 .off2 = offsetof(struct ioring_options,
164 cmdprio_options.level[DDIR_WRITE]),
165 .help = "Set asynchronous IO priority level",
166 .minval = IOPRIO_MIN_PRIO,
167 .maxval = IOPRIO_MAX_PRIO,
168 .interval = 1,
169 .category = FIO_OPT_C_ENGINE,
170 .group = FIO_OPT_G_IOURING,
171 },
172 {
173 .name = "cmdprio_bssplit",
174 .lname = "Priority percentage block size split",
175 .type = FIO_OPT_STR_STORE,
176 .off1 = offsetof(struct ioring_options,
177 cmdprio_options.bssplit_str),
178 .help = "Set priority percentages for different block sizes",
179 .category = FIO_OPT_C_ENGINE,
180 .group = FIO_OPT_G_IOURING,
181 },
182#else
183 {
184 .name = "cmdprio_percentage",
185 .lname = "high priority percentage",
186 .type = FIO_OPT_UNSUPPORTED,
187 .help = "Your platform does not support I/O priority classes",
188 },
189 {
190 .name = "cmdprio_class",
191 .lname = "Asynchronous I/O priority class",
192 .type = FIO_OPT_UNSUPPORTED,
193 .help = "Your platform does not support I/O priority classes",
194 },
195 {
196 .name = "cmdprio",
197 .lname = "Asynchronous I/O priority level",
198 .type = FIO_OPT_UNSUPPORTED,
199 .help = "Your platform does not support I/O priority classes",
200 },
201 {
202 .name = "cmdprio_bssplit",
203 .lname = "Priority percentage block size split",
204 .type = FIO_OPT_UNSUPPORTED,
205 .help = "Your platform does not support I/O priority classes",
206 },
207#endif
208 {
209 .name = "fixedbufs",
210 .lname = "Fixed (pre-mapped) IO buffers",
211 .type = FIO_OPT_STR_SET,
212 .off1 = offsetof(struct ioring_options, fixedbufs),
213 .help = "Pre map IO buffers",
214 .category = FIO_OPT_C_ENGINE,
215 .group = FIO_OPT_G_IOURING,
216 },
217 {
218 .name = "registerfiles",
219 .lname = "Register file set",
220 .type = FIO_OPT_STR_SET,
221 .off1 = offsetof(struct ioring_options, registerfiles),
222 .help = "Pre-open/register files",
223 .category = FIO_OPT_C_ENGINE,
224 .group = FIO_OPT_G_IOURING,
225 },
226 {
227 .name = "sqthread_poll",
228 .lname = "Kernel SQ thread polling",
229 .type = FIO_OPT_INT,
230 .off1 = offsetof(struct ioring_options, sqpoll_thread),
231 .help = "Offload submission/completion to kernel thread",
232 .category = FIO_OPT_C_ENGINE,
233 .group = FIO_OPT_G_IOURING,
234 },
235 {
236 .name = "sqthread_poll_cpu",
237 .lname = "SQ Thread Poll CPU",
238 .type = FIO_OPT_INT,
239 .cb = fio_ioring_sqpoll_cb,
240 .help = "What CPU to run SQ thread polling on",
241 .category = FIO_OPT_C_ENGINE,
242 .group = FIO_OPT_G_IOURING,
243 },
244 {
245 .name = "nonvectored",
246 .lname = "Non-vectored",
247 .type = FIO_OPT_INT,
248 .off1 = offsetof(struct ioring_options, nonvectored),
249 .def = "-1",
250 .help = "Use non-vectored read/write commands",
251 .category = FIO_OPT_C_ENGINE,
252 .group = FIO_OPT_G_IOURING,
253 },
254 {
255 .name = "uncached",
256 .lname = "Uncached",
257 .type = FIO_OPT_INT,
258 .off1 = offsetof(struct ioring_options, uncached),
259 .help = "Use RWF_UNCACHED for buffered read/writes",
260 .category = FIO_OPT_C_ENGINE,
261 .group = FIO_OPT_G_IOURING,
262 },
263 {
264 .name = "nowait",
265 .lname = "RWF_NOWAIT",
266 .type = FIO_OPT_BOOL,
267 .off1 = offsetof(struct ioring_options, nowait),
268 .help = "Use RWF_NOWAIT for reads/writes",
269 .category = FIO_OPT_C_ENGINE,
270 .group = FIO_OPT_G_IOURING,
271 },
272 {
273 .name = "force_async",
274 .lname = "Force async",
275 .type = FIO_OPT_INT,
276 .off1 = offsetof(struct ioring_options, force_async),
277 .help = "Set IOSQE_ASYNC every N requests",
278 .category = FIO_OPT_C_ENGINE,
279 .group = FIO_OPT_G_IOURING,
280 },
281 {
282 .name = "cmd_type",
283 .lname = "Uring cmd type",
284 .type = FIO_OPT_STR,
285 .off1 = offsetof(struct ioring_options, cmd_type),
286 .help = "Specify uring-cmd type",
287 .def = "nvme",
288 .posval = {
289 { .ival = "nvme",
290 .oval = FIO_URING_CMD_NVME,
291 .help = "Issue nvme-uring-cmd",
292 },
293 },
294 .category = FIO_OPT_C_ENGINE,
295 .group = FIO_OPT_G_IOURING,
296 },
297 {
298 .name = NULL,
299 },
300};
301
302static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit,
303 unsigned int min_complete, unsigned int flags)
304{
305#ifdef FIO_ARCH_HAS_SYSCALL
306 return __do_syscall6(__NR_io_uring_enter, ld->ring_fd, to_submit,
307 min_complete, flags, NULL, 0);
308#else
309 return syscall(__NR_io_uring_enter, ld->ring_fd, to_submit,
310 min_complete, flags, NULL, 0);
311#endif
312}
313
314static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
315{
316 struct ioring_data *ld = td->io_ops_data;
317 struct ioring_options *o = td->eo;
318 struct fio_file *f = io_u->file;
319 struct io_uring_sqe *sqe;
320
321 sqe = &ld->sqes[io_u->index];
322
323 if (o->registerfiles) {
324 sqe->fd = f->engine_pos;
325 sqe->flags = IOSQE_FIXED_FILE;
326 } else {
327 sqe->fd = f->fd;
328 sqe->flags = 0;
329 }
330
331 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
332 if (o->fixedbufs) {
333 sqe->opcode = fixed_ddir_to_op[io_u->ddir];
334 sqe->addr = (unsigned long) io_u->xfer_buf;
335 sqe->len = io_u->xfer_buflen;
336 sqe->buf_index = io_u->index;
337 } else {
338 struct iovec *iov = &ld->iovecs[io_u->index];
339
340 /*
341 * Update based on actual io_u, requeue could have
342 * adjusted these
343 */
344 iov->iov_base = io_u->xfer_buf;
345 iov->iov_len = io_u->xfer_buflen;
346
347 sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
348 if (o->nonvectored) {
349 sqe->addr = (unsigned long) iov->iov_base;
350 sqe->len = iov->iov_len;
351 } else {
352 sqe->addr = (unsigned long) iov;
353 sqe->len = 1;
354 }
355 }
356 sqe->rw_flags = 0;
357 if (!td->o.odirect && o->uncached)
358 sqe->rw_flags |= RWF_UNCACHED;
359 if (o->nowait)
360 sqe->rw_flags |= RWF_NOWAIT;
361
362 /*
363 * Since io_uring can have a submission context (sqthread_poll)
364 * that is different from the process context, we cannot rely on
365 * the IO priority set by ioprio_set() (option prio/prioclass)
366 * to be inherited.
367 * td->ioprio will have the value of the "default prio", so set
368 * this unconditionally. This value might get overridden by
369 * fio_ioring_cmdprio_prep() if the option cmdprio_percentage or
370 * cmdprio_bssplit is used.
371 */
372 sqe->ioprio = td->ioprio;
373 sqe->off = io_u->offset;
374 } else if (ddir_sync(io_u->ddir)) {
375 sqe->ioprio = 0;
376 if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
377 sqe->off = f->first_write;
378 sqe->len = f->last_write - f->first_write;
379 sqe->sync_range_flags = td->o.sync_file_range;
380 sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
381 } else {
382 sqe->off = 0;
383 sqe->addr = 0;
384 sqe->len = 0;
385 if (io_u->ddir == DDIR_DATASYNC)
386 sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
387 sqe->opcode = IORING_OP_FSYNC;
388 }
389 }
390
391 if (o->force_async && ++ld->prepped == o->force_async) {
392 ld->prepped = 0;
393 sqe->flags |= IOSQE_ASYNC;
394 }
395
396 sqe->user_data = (unsigned long) io_u;
397 return 0;
398}
399
400static int fio_ioring_cmd_prep(struct thread_data *td, struct io_u *io_u)
401{
402 struct ioring_data *ld = td->io_ops_data;
403 struct ioring_options *o = td->eo;
404 struct fio_file *f = io_u->file;
405 struct nvme_uring_cmd *cmd;
406 struct io_uring_sqe *sqe;
407
408 /* only supports nvme_uring_cmd */
409 if (o->cmd_type != FIO_URING_CMD_NVME)
410 return -EINVAL;
411
412 sqe = &ld->sqes[(io_u->index) << 1];
413
414 if (o->registerfiles) {
415 sqe->fd = f->engine_pos;
416 sqe->flags = IOSQE_FIXED_FILE;
417 } else {
418 sqe->fd = f->fd;
419 }
420 sqe->rw_flags = 0;
421 if (!td->o.odirect && o->uncached)
422 sqe->rw_flags |= RWF_UNCACHED;
423 if (o->nowait)
424 sqe->rw_flags |= RWF_NOWAIT;
425
426 sqe->opcode = IORING_OP_URING_CMD;
427 sqe->user_data = (unsigned long) io_u;
428 if (o->nonvectored)
429 sqe->cmd_op = NVME_URING_CMD_IO;
430 else
431 sqe->cmd_op = NVME_URING_CMD_IO_VEC;
432 if (o->force_async && ++ld->prepped == o->force_async) {
433 ld->prepped = 0;
434 sqe->flags |= IOSQE_ASYNC;
435 }
436
437 cmd = (struct nvme_uring_cmd *)sqe->cmd;
438 return fio_nvme_uring_cmd_prep(cmd, io_u,
439 o->nonvectored ? NULL : &ld->iovecs[io_u->index]);
440}
441
442static struct io_u *fio_ioring_event(struct thread_data *td, int event)
443{
444 struct ioring_data *ld = td->io_ops_data;
445 struct io_uring_cqe *cqe;
446 struct io_u *io_u;
447 unsigned index;
448
449 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
450
451 cqe = &ld->cq_ring.cqes[index];
452 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
453
454 if (cqe->res != io_u->xfer_buflen) {
455 if (cqe->res > io_u->xfer_buflen)
456 io_u->error = -cqe->res;
457 else
458 io_u->resid = io_u->xfer_buflen - cqe->res;
459 } else
460 io_u->error = 0;
461
462 return io_u;
463}
464
465static struct io_u *fio_ioring_cmd_event(struct thread_data *td, int event)
466{
467 struct ioring_data *ld = td->io_ops_data;
468 struct ioring_options *o = td->eo;
469 struct io_uring_cqe *cqe;
470 struct io_u *io_u;
471 unsigned index;
472
473 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
474 if (o->cmd_type == FIO_URING_CMD_NVME)
475 index <<= 1;
476
477 cqe = &ld->cq_ring.cqes[index];
478 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
479
480 if (cqe->res != 0)
481 io_u->error = -cqe->res;
482 else
483 io_u->error = 0;
484
485 return io_u;
486}
487
488static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events,
489 unsigned int max)
490{
491 struct ioring_data *ld = td->io_ops_data;
492 struct io_cq_ring *ring = &ld->cq_ring;
493 unsigned head, reaped = 0;
494
495 head = *ring->head;
496 do {
497 if (head == atomic_load_acquire(ring->tail))
498 break;
499 reaped++;
500 head++;
501 } while (reaped + events < max);
502
503 if (reaped)
504 atomic_store_release(ring->head, head);
505
506 return reaped;
507}
508
509static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
510 unsigned int max, const struct timespec *t)
511{
512 struct ioring_data *ld = td->io_ops_data;
513 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
514 struct ioring_options *o = td->eo;
515 struct io_cq_ring *ring = &ld->cq_ring;
516 unsigned events = 0;
517 int r;
518
519 ld->cq_ring_off = *ring->head;
520 do {
521 r = fio_ioring_cqring_reap(td, events, max);
522 if (r) {
523 events += r;
524 if (actual_min != 0)
525 actual_min -= r;
526 continue;
527 }
528
529 if (!o->sqpoll_thread) {
530 r = io_uring_enter(ld, 0, actual_min,
531 IORING_ENTER_GETEVENTS);
532 if (r < 0) {
533 if (errno == EAGAIN || errno == EINTR)
534 continue;
535 r = -errno;
536 td_verror(td, errno, "io_uring_enter");
537 break;
538 }
539 }
540 } while (events < min);
541
542 return r < 0 ? r : events;
543}
544
545static inline void fio_ioring_cmdprio_prep(struct thread_data *td,
546 struct io_u *io_u)
547{
548 struct ioring_data *ld = td->io_ops_data;
549 struct cmdprio *cmdprio = &ld->cmdprio;
550
551 if (fio_cmdprio_set_ioprio(td, cmdprio, io_u))
552 ld->sqes[io_u->index].ioprio = io_u->ioprio;
553}
554
555static enum fio_q_status fio_ioring_queue(struct thread_data *td,
556 struct io_u *io_u)
557{
558 struct ioring_data *ld = td->io_ops_data;
559 struct io_sq_ring *ring = &ld->sq_ring;
560 unsigned tail, next_tail;
561
562 fio_ro_check(td, io_u);
563
564 if (ld->queued == ld->iodepth)
565 return FIO_Q_BUSY;
566
567 if (io_u->ddir == DDIR_TRIM) {
568 if (ld->queued)
569 return FIO_Q_BUSY;
570
571 do_io_u_trim(td, io_u);
572 io_u_mark_submit(td, 1);
573 io_u_mark_complete(td, 1);
574 return FIO_Q_COMPLETED;
575 }
576
577 tail = *ring->tail;
578 next_tail = tail + 1;
579 if (next_tail == atomic_load_acquire(ring->head))
580 return FIO_Q_BUSY;
581
582 if (ld->cmdprio.mode != CMDPRIO_MODE_NONE)
583 fio_ioring_cmdprio_prep(td, io_u);
584
585 ring->array[tail & ld->sq_ring_mask] = io_u->index;
586 atomic_store_release(ring->tail, next_tail);
587
588 ld->queued++;
589 return FIO_Q_QUEUED;
590}
591
592static void fio_ioring_queued(struct thread_data *td, int start, int nr)
593{
594 struct ioring_data *ld = td->io_ops_data;
595 struct timespec now;
596
597 if (!fio_fill_issue_time(td))
598 return;
599
600 fio_gettime(&now, NULL);
601
602 while (nr--) {
603 struct io_sq_ring *ring = &ld->sq_ring;
604 int index = ring->array[start & ld->sq_ring_mask];
605 struct io_u *io_u = ld->io_u_index[index];
606
607 memcpy(&io_u->issue_time, &now, sizeof(now));
608 io_u_queued(td, io_u);
609
610 start++;
611 }
612
613 /*
614 * only used for iolog
615 */
616 if (td->o.read_iolog_file)
617 memcpy(&td->last_issue, &now, sizeof(now));
618}
619
620static int fio_ioring_commit(struct thread_data *td)
621{
622 struct ioring_data *ld = td->io_ops_data;
623 struct ioring_options *o = td->eo;
624 int ret;
625
626 if (!ld->queued)
627 return 0;
628
629 /*
630 * Kernel side does submission. just need to check if the ring is
631 * flagged as needing a kick, if so, call io_uring_enter(). This
632 * only happens if we've been idle too long.
633 */
634 if (o->sqpoll_thread) {
635 struct io_sq_ring *ring = &ld->sq_ring;
636 unsigned flags;
637
638 flags = atomic_load_acquire(ring->flags);
639 if (flags & IORING_SQ_NEED_WAKEUP)
640 io_uring_enter(ld, ld->queued, 0,
641 IORING_ENTER_SQ_WAKEUP);
642 ld->queued = 0;
643 return 0;
644 }
645
646 do {
647 unsigned start = *ld->sq_ring.head;
648 long nr = ld->queued;
649
650 ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS);
651 if (ret > 0) {
652 fio_ioring_queued(td, start, ret);
653 io_u_mark_submit(td, ret);
654
655 ld->queued -= ret;
656 ret = 0;
657 } else if (!ret) {
658 io_u_mark_submit(td, ret);
659 continue;
660 } else {
661 if (errno == EAGAIN || errno == EINTR) {
662 ret = fio_ioring_cqring_reap(td, 0, ld->queued);
663 if (ret)
664 continue;
665 /* Shouldn't happen */
666 usleep(1);
667 continue;
668 }
669 ret = -errno;
670 td_verror(td, errno, "io_uring_enter submit");
671 break;
672 }
673 } while (ld->queued);
674
675 return ret;
676}
677
678static void fio_ioring_unmap(struct ioring_data *ld)
679{
680 int i;
681
682 for (i = 0; i < FIO_ARRAY_SIZE(ld->mmap); i++)
683 munmap(ld->mmap[i].ptr, ld->mmap[i].len);
684 close(ld->ring_fd);
685}
686
687static void fio_ioring_cleanup(struct thread_data *td)
688{
689 struct ioring_data *ld = td->io_ops_data;
690
691 if (ld) {
692 if (!(td->flags & TD_F_CHILD))
693 fio_ioring_unmap(ld);
694
695 fio_cmdprio_cleanup(&ld->cmdprio);
696 free(ld->io_u_index);
697 free(ld->iovecs);
698 free(ld->fds);
699 free(ld);
700 }
701}
702
703static int fio_ioring_mmap(struct ioring_data *ld, struct io_uring_params *p)
704{
705 struct io_sq_ring *sring = &ld->sq_ring;
706 struct io_cq_ring *cring = &ld->cq_ring;
707 void *ptr;
708
709 ld->mmap[0].len = p->sq_off.array + p->sq_entries * sizeof(__u32);
710 ptr = mmap(0, ld->mmap[0].len, PROT_READ | PROT_WRITE,
711 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
712 IORING_OFF_SQ_RING);
713 ld->mmap[0].ptr = ptr;
714 sring->head = ptr + p->sq_off.head;
715 sring->tail = ptr + p->sq_off.tail;
716 sring->ring_mask = ptr + p->sq_off.ring_mask;
717 sring->ring_entries = ptr + p->sq_off.ring_entries;
718 sring->flags = ptr + p->sq_off.flags;
719 sring->array = ptr + p->sq_off.array;
720 ld->sq_ring_mask = *sring->ring_mask;
721
722 if (p->flags & IORING_SETUP_SQE128)
723 ld->mmap[1].len = 2 * p->sq_entries * sizeof(struct io_uring_sqe);
724 else
725 ld->mmap[1].len = p->sq_entries * sizeof(struct io_uring_sqe);
726 ld->sqes = mmap(0, ld->mmap[1].len, PROT_READ | PROT_WRITE,
727 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
728 IORING_OFF_SQES);
729 ld->mmap[1].ptr = ld->sqes;
730
731 if (p->flags & IORING_SETUP_CQE32) {
732 ld->mmap[2].len = p->cq_off.cqes +
733 2 * p->cq_entries * sizeof(struct io_uring_cqe);
734 } else {
735 ld->mmap[2].len = p->cq_off.cqes +
736 p->cq_entries * sizeof(struct io_uring_cqe);
737 }
738 ptr = mmap(0, ld->mmap[2].len, PROT_READ | PROT_WRITE,
739 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
740 IORING_OFF_CQ_RING);
741 ld->mmap[2].ptr = ptr;
742 cring->head = ptr + p->cq_off.head;
743 cring->tail = ptr + p->cq_off.tail;
744 cring->ring_mask = ptr + p->cq_off.ring_mask;
745 cring->ring_entries = ptr + p->cq_off.ring_entries;
746 cring->cqes = ptr + p->cq_off.cqes;
747 ld->cq_ring_mask = *cring->ring_mask;
748 return 0;
749}
750
751static void fio_ioring_probe(struct thread_data *td)
752{
753 struct ioring_data *ld = td->io_ops_data;
754 struct ioring_options *o = td->eo;
755 struct io_uring_probe *p;
756 int ret;
757
758 /* already set by user, don't touch */
759 if (o->nonvectored != -1)
760 return;
761
762 /* default to off, as that's always safe */
763 o->nonvectored = 0;
764
765 p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
766 if (!p)
767 return;
768
769 memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
770 ret = syscall(__NR_io_uring_register, ld->ring_fd,
771 IORING_REGISTER_PROBE, p, 256);
772 if (ret < 0)
773 goto out;
774
775 if (IORING_OP_WRITE > p->ops_len)
776 goto out;
777
778 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED) &&
779 (p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED))
780 o->nonvectored = 1;
781out:
782 free(p);
783}
784
785static int fio_ioring_queue_init(struct thread_data *td)
786{
787 struct ioring_data *ld = td->io_ops_data;
788 struct ioring_options *o = td->eo;
789 int depth = td->o.iodepth;
790 struct io_uring_params p;
791 int ret;
792
793 memset(&p, 0, sizeof(p));
794
795 if (o->hipri)
796 p.flags |= IORING_SETUP_IOPOLL;
797 if (o->sqpoll_thread) {
798 p.flags |= IORING_SETUP_SQPOLL;
799 if (o->sqpoll_set) {
800 p.flags |= IORING_SETUP_SQ_AFF;
801 p.sq_thread_cpu = o->sqpoll_cpu;
802 }
803 }
804
805 /*
806 * Clamp CQ ring size at our SQ ring size, we don't need more entries
807 * than that.
808 */
809 p.flags |= IORING_SETUP_CQSIZE;
810 p.cq_entries = depth;
811
812 /*
813 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
814 * completing IO operations.
815 */
816 p.flags |= IORING_SETUP_COOP_TASKRUN;
817
818 /*
819 * io_uring is always a single issuer, and we can defer task_work
820 * runs until we reap events.
821 */
822 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
823
824retry:
825 ret = syscall(__NR_io_uring_setup, depth, &p);
826 if (ret < 0) {
827 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
828 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
829 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
830 goto retry;
831 }
832 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
833 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
834 goto retry;
835 }
836 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
837 p.flags &= ~IORING_SETUP_CQSIZE;
838 goto retry;
839 }
840 return ret;
841 }
842
843 ld->ring_fd = ret;
844
845 fio_ioring_probe(td);
846
847 if (o->fixedbufs) {
848 ret = syscall(__NR_io_uring_register, ld->ring_fd,
849 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
850 if (ret < 0)
851 return ret;
852 }
853
854 return fio_ioring_mmap(ld, &p);
855}
856
857static int fio_ioring_cmd_queue_init(struct thread_data *td)
858{
859 struct ioring_data *ld = td->io_ops_data;
860 struct ioring_options *o = td->eo;
861 int depth = td->o.iodepth;
862 struct io_uring_params p;
863 int ret;
864
865 memset(&p, 0, sizeof(p));
866
867 if (o->hipri)
868 p.flags |= IORING_SETUP_IOPOLL;
869 if (o->sqpoll_thread) {
870 p.flags |= IORING_SETUP_SQPOLL;
871 if (o->sqpoll_set) {
872 p.flags |= IORING_SETUP_SQ_AFF;
873 p.sq_thread_cpu = o->sqpoll_cpu;
874 }
875 }
876 if (o->cmd_type == FIO_URING_CMD_NVME) {
877 p.flags |= IORING_SETUP_SQE128;
878 p.flags |= IORING_SETUP_CQE32;
879 }
880
881 /*
882 * Clamp CQ ring size at our SQ ring size, we don't need more entries
883 * than that.
884 */
885 p.flags |= IORING_SETUP_CQSIZE;
886 p.cq_entries = depth;
887
888retry:
889 ret = syscall(__NR_io_uring_setup, depth, &p);
890 if (ret < 0) {
891 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
892 p.flags &= ~IORING_SETUP_CQSIZE;
893 goto retry;
894 }
895 return ret;
896 }
897
898 ld->ring_fd = ret;
899
900 fio_ioring_probe(td);
901
902 if (o->fixedbufs) {
903 ret = syscall(__NR_io_uring_register, ld->ring_fd,
904 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
905 if (ret < 0)
906 return ret;
907 }
908
909 return fio_ioring_mmap(ld, &p);
910}
911
912static int fio_ioring_register_files(struct thread_data *td)
913{
914 struct ioring_data *ld = td->io_ops_data;
915 struct fio_file *f;
916 unsigned int i;
917 int ret;
918
919 ld->fds = calloc(td->o.nr_files, sizeof(int));
920
921 for_each_file(td, f, i) {
922 ret = generic_open_file(td, f);
923 if (ret)
924 goto err;
925 ld->fds[i] = f->fd;
926 f->engine_pos = i;
927 }
928
929 ret = syscall(__NR_io_uring_register, ld->ring_fd,
930 IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
931 if (ret) {
932err:
933 free(ld->fds);
934 ld->fds = NULL;
935 }
936
937 /*
938 * Pretend the file is closed again, and really close it if we hit
939 * an error.
940 */
941 for_each_file(td, f, i) {
942 if (ret) {
943 int fio_unused ret2;
944 ret2 = generic_close_file(td, f);
945 } else
946 f->fd = -1;
947 }
948
949 return ret;
950}
951
952static int fio_ioring_post_init(struct thread_data *td)
953{
954 struct ioring_data *ld = td->io_ops_data;
955 struct ioring_options *o = td->eo;
956 struct io_u *io_u;
957 int err, i;
958
959 for (i = 0; i < td->o.iodepth; i++) {
960 struct iovec *iov = &ld->iovecs[i];
961
962 io_u = ld->io_u_index[i];
963 iov->iov_base = io_u->buf;
964 iov->iov_len = td_max_bs(td);
965 }
966
967 err = fio_ioring_queue_init(td);
968 if (err) {
969 int init_err = errno;
970
971 if (init_err == ENOSYS)
972 log_err("fio: your kernel doesn't support io_uring\n");
973 td_verror(td, init_err, "io_queue_init");
974 return 1;
975 }
976
977 for (i = 0; i < td->o.iodepth; i++) {
978 struct io_uring_sqe *sqe;
979
980 sqe = &ld->sqes[i];
981 memset(sqe, 0, sizeof(*sqe));
982 }
983
984 if (o->registerfiles) {
985 err = fio_ioring_register_files(td);
986 if (err) {
987 td_verror(td, errno, "ioring_register_files");
988 return 1;
989 }
990 }
991
992 return 0;
993}
994
995static int fio_ioring_cmd_post_init(struct thread_data *td)
996{
997 struct ioring_data *ld = td->io_ops_data;
998 struct ioring_options *o = td->eo;
999 struct io_u *io_u;
1000 int err, i;
1001
1002 for (i = 0; i < td->o.iodepth; i++) {
1003 struct iovec *iov = &ld->iovecs[i];
1004
1005 io_u = ld->io_u_index[i];
1006 iov->iov_base = io_u->buf;
1007 iov->iov_len = td_max_bs(td);
1008 }
1009
1010 err = fio_ioring_cmd_queue_init(td);
1011 if (err) {
1012 int init_err = errno;
1013
1014 td_verror(td, init_err, "io_queue_init");
1015 return 1;
1016 }
1017
1018 for (i = 0; i < td->o.iodepth; i++) {
1019 struct io_uring_sqe *sqe;
1020
1021 if (o->cmd_type == FIO_URING_CMD_NVME) {
1022 sqe = &ld->sqes[i << 1];
1023 memset(sqe, 0, 2 * sizeof(*sqe));
1024 } else {
1025 sqe = &ld->sqes[i];
1026 memset(sqe, 0, sizeof(*sqe));
1027 }
1028 }
1029
1030 if (o->registerfiles) {
1031 err = fio_ioring_register_files(td);
1032 if (err) {
1033 td_verror(td, errno, "ioring_register_files");
1034 return 1;
1035 }
1036 }
1037
1038 return 0;
1039}
1040
1041static int fio_ioring_init(struct thread_data *td)
1042{
1043 struct ioring_options *o = td->eo;
1044 struct ioring_data *ld;
1045 int ret;
1046
1047 /* sqthread submission requires registered files */
1048 if (o->sqpoll_thread)
1049 o->registerfiles = 1;
1050
1051 if (o->registerfiles && td->o.nr_files != td->o.open_files) {
1052 log_err("fio: io_uring registered files require nr_files to "
1053 "be identical to open_files\n");
1054 return 1;
1055 }
1056
1057 ld = calloc(1, sizeof(*ld));
1058
1059 /* ring depth must be a power-of-2 */
1060 ld->iodepth = td->o.iodepth;
1061 td->o.iodepth = roundup_pow2(td->o.iodepth);
1062
1063 /* io_u index */
1064 ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
1065 ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
1066
1067 td->io_ops_data = ld;
1068
1069 ret = fio_cmdprio_init(td, &ld->cmdprio, &o->cmdprio_options);
1070 if (ret) {
1071 td_verror(td, EINVAL, "fio_ioring_init");
1072 return 1;
1073 }
1074
1075 return 0;
1076}
1077
1078static int fio_ioring_io_u_init(struct thread_data *td, struct io_u *io_u)
1079{
1080 struct ioring_data *ld = td->io_ops_data;
1081
1082 ld->io_u_index[io_u->index] = io_u;
1083 return 0;
1084}
1085
1086static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
1087{
1088 struct ioring_data *ld = td->io_ops_data;
1089 struct ioring_options *o = td->eo;
1090
1091 if (!ld || !o->registerfiles)
1092 return generic_open_file(td, f);
1093
1094 f->fd = ld->fds[f->engine_pos];
1095 return 0;
1096}
1097
1098static int fio_ioring_cmd_open_file(struct thread_data *td, struct fio_file *f)
1099{
1100 struct ioring_data *ld = td->io_ops_data;
1101 struct ioring_options *o = td->eo;
1102
1103 if (o->cmd_type == FIO_URING_CMD_NVME) {
1104 struct nvme_data *data = NULL;
1105 unsigned int nsid, lba_size = 0;
1106 unsigned long long nlba = 0;
1107 int ret;
1108
1109 /* Store the namespace-id and lba size. */
1110 data = FILE_ENG_DATA(f);
1111 if (data == NULL) {
1112 ret = fio_nvme_get_info(f, &nsid, &lba_size, &nlba);
1113 if (ret)
1114 return ret;
1115
1116 data = calloc(1, sizeof(struct nvme_data));
1117 data->nsid = nsid;
1118 data->lba_shift = ilog2(lba_size);
1119
1120 FILE_SET_ENG_DATA(f, data);
1121 }
1122 }
1123 if (!ld || !o->registerfiles)
1124 return generic_open_file(td, f);
1125
1126 f->fd = ld->fds[f->engine_pos];
1127 return 0;
1128}
1129
1130static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f)
1131{
1132 struct ioring_data *ld = td->io_ops_data;
1133 struct ioring_options *o = td->eo;
1134
1135 if (!ld || !o->registerfiles)
1136 return generic_close_file(td, f);
1137
1138 f->fd = -1;
1139 return 0;
1140}
1141
1142static int fio_ioring_cmd_close_file(struct thread_data *td,
1143 struct fio_file *f)
1144{
1145 struct ioring_data *ld = td->io_ops_data;
1146 struct ioring_options *o = td->eo;
1147
1148 if (o->cmd_type == FIO_URING_CMD_NVME) {
1149 struct nvme_data *data = FILE_ENG_DATA(f);
1150
1151 FILE_SET_ENG_DATA(f, NULL);
1152 free(data);
1153 }
1154 if (!ld || !o->registerfiles)
1155 return generic_close_file(td, f);
1156
1157 f->fd = -1;
1158 return 0;
1159}
1160
1161static int fio_ioring_cmd_get_file_size(struct thread_data *td,
1162 struct fio_file *f)
1163{
1164 struct ioring_options *o = td->eo;
1165
1166 if (fio_file_size_known(f))
1167 return 0;
1168
1169 if (o->cmd_type == FIO_URING_CMD_NVME) {
1170 struct nvme_data *data = NULL;
1171 unsigned int nsid, lba_size = 0;
1172 unsigned long long nlba = 0;
1173 int ret;
1174
1175 ret = fio_nvme_get_info(f, &nsid, &lba_size, &nlba);
1176 if (ret)
1177 return ret;
1178
1179 data = calloc(1, sizeof(struct nvme_data));
1180 data->nsid = nsid;
1181 data->lba_shift = ilog2(lba_size);
1182
1183 f->real_file_size = lba_size * nlba;
1184 fio_file_set_size_known(f);
1185
1186 FILE_SET_ENG_DATA(f, data);
1187 return 0;
1188 }
1189 return generic_get_file_size(td, f);
1190}
1191
1192static int fio_ioring_cmd_get_zoned_model(struct thread_data *td,
1193 struct fio_file *f,
1194 enum zbd_zoned_model *model)
1195{
1196 return fio_nvme_get_zoned_model(td, f, model);
1197}
1198
1199static int fio_ioring_cmd_report_zones(struct thread_data *td,
1200 struct fio_file *f, uint64_t offset,
1201 struct zbd_zone *zbdz,
1202 unsigned int nr_zones)
1203{
1204 return fio_nvme_report_zones(td, f, offset, zbdz, nr_zones);
1205}
1206
1207static int fio_ioring_cmd_reset_wp(struct thread_data *td, struct fio_file *f,
1208 uint64_t offset, uint64_t length)
1209{
1210 return fio_nvme_reset_wp(td, f, offset, length);
1211}
1212
1213static int fio_ioring_cmd_get_max_open_zones(struct thread_data *td,
1214 struct fio_file *f,
1215 unsigned int *max_open_zones)
1216{
1217 return fio_nvme_get_max_open_zones(td, f, max_open_zones);
1218}
1219
1220static struct ioengine_ops ioengine_uring = {
1221 .name = "io_uring",
1222 .version = FIO_IOOPS_VERSION,
1223 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
1224 FIO_ASYNCIO_SETS_ISSUE_TIME,
1225 .init = fio_ioring_init,
1226 .post_init = fio_ioring_post_init,
1227 .io_u_init = fio_ioring_io_u_init,
1228 .prep = fio_ioring_prep,
1229 .queue = fio_ioring_queue,
1230 .commit = fio_ioring_commit,
1231 .getevents = fio_ioring_getevents,
1232 .event = fio_ioring_event,
1233 .cleanup = fio_ioring_cleanup,
1234 .open_file = fio_ioring_open_file,
1235 .close_file = fio_ioring_close_file,
1236 .get_file_size = generic_get_file_size,
1237 .options = options,
1238 .option_struct_size = sizeof(struct ioring_options),
1239};
1240
1241static struct ioengine_ops ioengine_uring_cmd = {
1242 .name = "io_uring_cmd",
1243 .version = FIO_IOOPS_VERSION,
1244 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
1245 FIO_MEMALIGN | FIO_RAWIO |
1246 FIO_ASYNCIO_SETS_ISSUE_TIME,
1247 .init = fio_ioring_init,
1248 .post_init = fio_ioring_cmd_post_init,
1249 .io_u_init = fio_ioring_io_u_init,
1250 .prep = fio_ioring_cmd_prep,
1251 .queue = fio_ioring_queue,
1252 .commit = fio_ioring_commit,
1253 .getevents = fio_ioring_getevents,
1254 .event = fio_ioring_cmd_event,
1255 .cleanup = fio_ioring_cleanup,
1256 .open_file = fio_ioring_cmd_open_file,
1257 .close_file = fio_ioring_cmd_close_file,
1258 .get_file_size = fio_ioring_cmd_get_file_size,
1259 .get_zoned_model = fio_ioring_cmd_get_zoned_model,
1260 .report_zones = fio_ioring_cmd_report_zones,
1261 .reset_wp = fio_ioring_cmd_reset_wp,
1262 .get_max_open_zones = fio_ioring_cmd_get_max_open_zones,
1263 .options = options,
1264 .option_struct_size = sizeof(struct ioring_options),
1265};
1266
1267static void fio_init fio_ioring_register(void)
1268{
1269 register_ioengine(&ioengine_uring);
1270 register_ioengine(&ioengine_uring_cmd);
1271}
1272
1273static void fio_exit fio_ioring_unregister(void)
1274{
1275 unregister_ioengine(&ioengine_uring);
1276 unregister_ioengine(&ioengine_uring_cmd);
1277}
1278#endif