libaio,io_uring: improve cmdprio_percentage option
[fio.git] / engines / io_uring.c
CommitLineData
52885fa2 1/*
bffad86f 2 * io_uring engine
52885fa2 3 *
bffad86f 4 * IO engine using the new native Linux aio io_uring interface. See:
a90cd050 5 *
bffad86f 6 * http://git.kernel.dk/cgit/linux-block/log/?h=io_uring
52885fa2
JA
7 *
8 */
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
52885fa2
JA
12#include <sys/time.h>
13#include <sys/resource.h>
14
15#include "../fio.h"
16#include "../lib/pow2.h"
17#include "../optgroup.h"
18#include "../lib/memalign.h"
b87aa01a 19#include "../lib/fls.h"
6d975f2c 20#include "../lib/roundup.h"
52885fa2 21
bffad86f 22#ifdef ARCH_HAVE_IOURING
52885fa2 23
57fa61f0 24#include "../lib/types.h"
f3e769a4 25#include "../os/linux/io_uring.h"
e9f6567a 26#include "cmdprio.h"
9a2d78b3 27
bffad86f 28struct io_sq_ring {
e2239016
JA
29 unsigned *head;
30 unsigned *tail;
31 unsigned *ring_mask;
32 unsigned *ring_entries;
33 unsigned *flags;
34 unsigned *array;
52885fa2
JA
35};
36
bffad86f 37struct io_cq_ring {
e2239016
JA
38 unsigned *head;
39 unsigned *tail;
40 unsigned *ring_mask;
41 unsigned *ring_entries;
f0403f94 42 struct io_uring_cqe *cqes;
9a2d78b3
JA
43};
44
bffad86f 45struct ioring_mmap {
9a2d78b3
JA
46 void *ptr;
47 size_t len;
52885fa2
JA
48};
49
bffad86f 50struct ioring_data {
9a2d78b3
JA
51 int ring_fd;
52
52885fa2
JA
53 struct io_u **io_u_index;
54
5ffd5626
JA
55 int *fds;
56
bffad86f 57 struct io_sq_ring sq_ring;
f0403f94 58 struct io_uring_sqe *sqes;
9a2d78b3 59 struct iovec *iovecs;
b87aa01a 60 unsigned sq_ring_mask;
52885fa2 61
bffad86f 62 struct io_cq_ring cq_ring;
b87aa01a 63 unsigned cq_ring_mask;
52885fa2
JA
64
65 int queued;
66 int cq_ring_off;
b87aa01a 67 unsigned iodepth;
1af44196
XW
68 bool ioprio_class_set;
69 bool ioprio_set;
5a59a81d 70 int prepped;
96563db9 71
bffad86f 72 struct ioring_mmap mmap[3];
e9f6567a
DLM
73
74 bool use_cmdprio;
52885fa2
JA
75};
76
bffad86f 77struct ioring_options {
52885fa2
JA
78 void *pad;
79 unsigned int hipri;
e9f6567a 80 struct cmdprio cmdprio;
52885fa2 81 unsigned int fixedbufs;
5ffd5626 82 unsigned int registerfiles;
3d7d00a3 83 unsigned int sqpoll_thread;
2ea53ca3
JA
84 unsigned int sqpoll_set;
85 unsigned int sqpoll_cpu;
b10b1e70 86 unsigned int nonvectored;
4a87b584 87 unsigned int uncached;
7d42e66e 88 unsigned int nowait;
5a59a81d 89 unsigned int force_async;
52885fa2
JA
90};
91
b10b1e70
JA
92static const int ddir_to_op[2][2] = {
93 { IORING_OP_READV, IORING_OP_READ },
94 { IORING_OP_WRITEV, IORING_OP_WRITE }
95};
96
3f1e3af7
KB
97static const int fixed_ddir_to_op[2] = {
98 IORING_OP_READ_FIXED,
99 IORING_OP_WRITE_FIXED
100};
101
2ea53ca3 102static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
a90cd050 103{
bffad86f 104 struct ioring_options *o = data;
a90cd050 105
2ea53ca3
JA
106 o->sqpoll_cpu = *val;
107 o->sqpoll_set = 1;
a90cd050
JA
108 return 0;
109}
110
52885fa2
JA
111static struct fio_option options[] = {
112 {
113 .name = "hipri",
114 .lname = "High Priority",
115 .type = FIO_OPT_STR_SET,
bffad86f 116 .off1 = offsetof(struct ioring_options, hipri),
52885fa2
JA
117 .help = "Use polled IO completions",
118 .category = FIO_OPT_C_ENGINE,
27f436d9 119 .group = FIO_OPT_G_IOURING,
52885fa2 120 },
b2a432bf
PC
121#ifdef FIO_HAVE_IOPRIO_CLASS
122 {
123 .name = "cmdprio_percentage",
124 .lname = "high priority percentage",
125 .type = FIO_OPT_INT,
e9f6567a
DLM
126 .off1 = offsetof(struct ioring_options,
127 cmdprio.percentage[DDIR_READ]),
128 .off2 = offsetof(struct ioring_options,
129 cmdprio.percentage[DDIR_WRITE]),
130 .minval = 0,
b2a432bf
PC
131 .maxval = 100,
132 .help = "Send high priority I/O this percentage of the time",
133 .category = FIO_OPT_C_ENGINE,
134 .group = FIO_OPT_G_IOURING,
135 },
136#else
137 {
138 .name = "cmdprio_percentage",
139 .lname = "high priority percentage",
140 .type = FIO_OPT_UNSUPPORTED,
141 .help = "Your platform does not support I/O priority classes",
142 },
143#endif
52885fa2
JA
144 {
145 .name = "fixedbufs",
146 .lname = "Fixed (pre-mapped) IO buffers",
147 .type = FIO_OPT_STR_SET,
bffad86f 148 .off1 = offsetof(struct ioring_options, fixedbufs),
52885fa2
JA
149 .help = "Pre map IO buffers",
150 .category = FIO_OPT_C_ENGINE,
27f436d9 151 .group = FIO_OPT_G_IOURING,
52885fa2 152 },
5ffd5626
JA
153 {
154 .name = "registerfiles",
155 .lname = "Register file set",
156 .type = FIO_OPT_STR_SET,
157 .off1 = offsetof(struct ioring_options, registerfiles),
158 .help = "Pre-open/register files",
159 .category = FIO_OPT_C_ENGINE,
27f436d9 160 .group = FIO_OPT_G_IOURING,
5ffd5626 161 },
771c9901
JA
162 {
163 .name = "sqthread_poll",
3d7d00a3
JA
164 .lname = "Kernel SQ thread polling",
165 .type = FIO_OPT_INT,
166 .off1 = offsetof(struct ioring_options, sqpoll_thread),
167 .help = "Offload submission/completion to kernel thread",
168 .category = FIO_OPT_C_ENGINE,
27f436d9 169 .group = FIO_OPT_G_IOURING,
3d7d00a3
JA
170 },
171 {
172 .name = "sqthread_poll_cpu",
173 .lname = "SQ Thread Poll CPU",
2ea53ca3
JA
174 .type = FIO_OPT_INT,
175 .cb = fio_ioring_sqpoll_cb,
3d7d00a3 176 .help = "What CPU to run SQ thread polling on",
a90cd050 177 .category = FIO_OPT_C_ENGINE,
27f436d9 178 .group = FIO_OPT_G_IOURING,
a90cd050 179 },
b10b1e70
JA
180 {
181 .name = "nonvectored",
182 .lname = "Non-vectored",
183 .type = FIO_OPT_INT,
184 .off1 = offsetof(struct ioring_options, nonvectored),
556d8415 185 .def = "-1",
b10b1e70
JA
186 .help = "Use non-vectored read/write commands",
187 .category = FIO_OPT_C_ENGINE,
188 .group = FIO_OPT_G_IOURING,
189 },
4a87b584
JA
190 {
191 .name = "uncached",
192 .lname = "Uncached",
193 .type = FIO_OPT_INT,
194 .off1 = offsetof(struct ioring_options, uncached),
195 .help = "Use RWF_UNCACHED for buffered read/writes",
196 .category = FIO_OPT_C_ENGINE,
197 .group = FIO_OPT_G_IOURING,
198 },
7d42e66e
KK
199 {
200 .name = "nowait",
201 .lname = "RWF_NOWAIT",
202 .type = FIO_OPT_BOOL,
203 .off1 = offsetof(struct ioring_options, nowait),
204 .help = "Use RWF_NOWAIT for reads/writes",
205 .category = FIO_OPT_C_ENGINE,
206 .group = FIO_OPT_G_IOURING,
207 },
5a59a81d
JA
208 {
209 .name = "force_async",
210 .lname = "Force async",
211 .type = FIO_OPT_INT,
212 .off1 = offsetof(struct ioring_options, force_async),
213 .help = "Set IOSQE_ASYNC every N requests",
214 .category = FIO_OPT_C_ENGINE,
215 .group = FIO_OPT_G_IOURING,
216 },
52885fa2
JA
217 {
218 .name = NULL,
219 },
220};
221
bffad86f 222static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit,
52885fa2
JA
223 unsigned int min_complete, unsigned int flags)
224{
bfed648c 225 return syscall(__NR_io_uring_enter, ld->ring_fd, to_submit,
521164fa 226 min_complete, flags, NULL, 0);
52885fa2
JA
227}
228
bffad86f 229static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
52885fa2 230{
bffad86f 231 struct ioring_data *ld = td->io_ops_data;
cfcc8564 232 struct ioring_options *o = td->eo;
52885fa2 233 struct fio_file *f = io_u->file;
f0403f94 234 struct io_uring_sqe *sqe;
52885fa2 235
f0403f94 236 sqe = &ld->sqes[io_u->index];
34d6090e 237
5ffd5626
JA
238 if (o->registerfiles) {
239 sqe->fd = f->engine_pos;
240 sqe->flags = IOSQE_FIXED_FILE;
241 } else {
242 sqe->fd = f->fd;
87b69ef2 243 sqe->flags = 0;
5ffd5626 244 }
52885fa2 245
e3970057 246 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
f0403f94 247 if (o->fixedbufs) {
3f1e3af7 248 sqe->opcode = fixed_ddir_to_op[io_u->ddir];
919850d2 249 sqe->addr = (unsigned long) io_u->xfer_buf;
f0403f94 250 sqe->len = io_u->xfer_buflen;
2ea53ca3 251 sqe->buf_index = io_u->index;
cfcc8564 252 } else {
832faaaf
JA
253 struct iovec *iov = &ld->iovecs[io_u->index];
254
255 /*
256 * Update based on actual io_u, requeue could have
257 * adjusted these
258 */
259 iov->iov_base = io_u->xfer_buf;
260 iov->iov_len = io_u->xfer_buflen;
261
3f1e3af7 262 sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
b10b1e70 263 if (o->nonvectored) {
832faaaf
JA
264 sqe->addr = (unsigned long) iov->iov_base;
265 sqe->len = iov->iov_len;
b10b1e70 266 } else {
832faaaf 267 sqe->addr = (unsigned long) iov;
b10b1e70
JA
268 sqe->len = 1;
269 }
cfcc8564 270 }
fd70e361 271 sqe->rw_flags = 0;
4a87b584 272 if (!td->o.odirect && o->uncached)
fd70e361 273 sqe->rw_flags |= RWF_UNCACHED;
7d42e66e
KK
274 if (o->nowait)
275 sqe->rw_flags |= RWF_NOWAIT;
1af44196 276 if (ld->ioprio_class_set)
b7ed2a86 277 sqe->ioprio = td->o.ioprio_class << 13;
1af44196 278 if (ld->ioprio_set)
b7ed2a86 279 sqe->ioprio |= td->o.ioprio;
f0403f94 280 sqe->off = io_u->offset;
48e698fa 281 } else if (ddir_sync(io_u->ddir)) {
7c70f506 282 sqe->ioprio = 0;
01387bfe
AF
283 if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
284 sqe->off = f->first_write;
285 sqe->len = f->last_write - f->first_write;
286 sqe->sync_range_flags = td->o.sync_file_range;
287 sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
288 } else {
7c70f506
JA
289 sqe->off = 0;
290 sqe->addr = 0;
291 sqe->len = 0;
01387bfe
AF
292 if (io_u->ddir == DDIR_DATASYNC)
293 sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
294 sqe->opcode = IORING_OP_FSYNC;
295 }
48e698fa 296 }
52885fa2 297
5a59a81d
JA
298 if (o->force_async && ++ld->prepped == o->force_async) {
299 ld->prepped = 0;
300 sqe->flags |= IOSQE_ASYNC;
301 }
302
48e698fa 303 sqe->user_data = (unsigned long) io_u;
52885fa2
JA
304 return 0;
305}
306
bffad86f 307static struct io_u *fio_ioring_event(struct thread_data *td, int event)
52885fa2 308{
bffad86f 309 struct ioring_data *ld = td->io_ops_data;
f0403f94 310 struct io_uring_cqe *cqe;
52885fa2 311 struct io_u *io_u;
b87aa01a 312 unsigned index;
52885fa2 313
b87aa01a 314 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
52885fa2 315
f0403f94 316 cqe = &ld->cq_ring.cqes[index];
e3466352 317 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
52885fa2 318
f0403f94
JA
319 if (cqe->res != io_u->xfer_buflen) {
320 if (cqe->res > io_u->xfer_buflen)
321 io_u->error = -cqe->res;
52885fa2 322 else
f0403f94 323 io_u->resid = io_u->xfer_buflen - cqe->res;
52885fa2
JA
324 } else
325 io_u->error = 0;
326
327 return io_u;
328}
329
bffad86f 330static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events,
52885fa2
JA
331 unsigned int max)
332{
bffad86f
JA
333 struct ioring_data *ld = td->io_ops_data;
334 struct io_cq_ring *ring = &ld->cq_ring;
e2239016 335 unsigned head, reaped = 0;
52885fa2 336
9a2d78b3 337 head = *ring->head;
52885fa2 338 do {
9e26aff9 339 if (head == atomic_load_acquire(ring->tail))
52885fa2
JA
340 break;
341 reaped++;
342 head++;
52885fa2
JA
343 } while (reaped + events < max);
344
76ce63dd
AB
345 if (reaped)
346 atomic_store_release(ring->head, head);
347
52885fa2
JA
348 return reaped;
349}
350
bffad86f
JA
351static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
352 unsigned int max, const struct timespec *t)
52885fa2 353{
bffad86f 354 struct ioring_data *ld = td->io_ops_data;
52885fa2 355 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
bffad86f
JA
356 struct ioring_options *o = td->eo;
357 struct io_cq_ring *ring = &ld->cq_ring;
b87aa01a
JA
358 unsigned events = 0;
359 int r;
52885fa2 360
9a2d78b3 361 ld->cq_ring_off = *ring->head;
52885fa2 362 do {
bffad86f 363 r = fio_ioring_cqring_reap(td, events, max);
52885fa2
JA
364 if (r) {
365 events += r;
f7cbbbf8
ST
366 if (actual_min != 0)
367 actual_min -= r;
52885fa2
JA
368 continue;
369 }
370
3d7d00a3 371 if (!o->sqpoll_thread) {
9a2d78b3
JA
372 r = io_uring_enter(ld, 0, actual_min,
373 IORING_ENTER_GETEVENTS);
771c9901 374 if (r < 0) {
f6abd731 375 if (errno == EAGAIN || errno == EINTR)
771c9901 376 continue;
9a2d78b3 377 td_verror(td, errno, "io_uring_enter");
771c9901
JA
378 break;
379 }
52885fa2
JA
380 }
381 } while (events < min);
382
383 return r < 0 ? r : events;
384}
385
b2a432bf
PC
386static void fio_ioring_prio_prep(struct thread_data *td, struct io_u *io_u)
387{
388 struct ioring_options *o = td->eo;
389 struct ioring_data *ld = td->io_ops_data;
e9f6567a
DLM
390 struct io_uring_sqe *sqe = &ld->sqes[io_u->index];
391 struct cmdprio *cmdprio = &o->cmdprio;
392 unsigned int p = cmdprio->percentage[io_u->ddir];
393
394 if (p && rand_between(&td->prio_state, 0, 99) < p) {
395 sqe->ioprio = ioprio_value(IOPRIO_CLASS_RT, 0);
b2a432bf 396 io_u->flags |= IO_U_F_PRIORITY;
ff9b6876 397 } else {
e9f6567a 398 sqe->ioprio = 0;
b2a432bf 399 }
b2a432bf
PC
400}
401
bffad86f
JA
402static enum fio_q_status fio_ioring_queue(struct thread_data *td,
403 struct io_u *io_u)
52885fa2 404{
bffad86f
JA
405 struct ioring_data *ld = td->io_ops_data;
406 struct io_sq_ring *ring = &ld->sq_ring;
52885fa2
JA
407 unsigned tail, next_tail;
408
409 fio_ro_check(td, io_u);
410
b87aa01a 411 if (ld->queued == ld->iodepth)
52885fa2
JA
412 return FIO_Q_BUSY;
413
52885fa2
JA
414 if (io_u->ddir == DDIR_TRIM) {
415 if (ld->queued)
416 return FIO_Q_BUSY;
417
418 do_io_u_trim(td, io_u);
419 io_u_mark_submit(td, 1);
420 io_u_mark_complete(td, 1);
421 return FIO_Q_COMPLETED;
422 }
423
9a2d78b3 424 tail = *ring->tail;
52885fa2 425 next_tail = tail + 1;
9e26aff9 426 if (next_tail == atomic_load_acquire(ring->head))
52885fa2
JA
427 return FIO_Q_BUSY;
428
e9f6567a 429 if (ld->use_cmdprio)
b2a432bf 430 fio_ioring_prio_prep(td, io_u);
b87aa01a 431 ring->array[tail & ld->sq_ring_mask] = io_u->index;
9e26aff9 432 atomic_store_release(ring->tail, next_tail);
52885fa2
JA
433
434 ld->queued++;
435 return FIO_Q_QUEUED;
436}
437
bffad86f 438static void fio_ioring_queued(struct thread_data *td, int start, int nr)
52885fa2 439{
bffad86f 440 struct ioring_data *ld = td->io_ops_data;
52885fa2
JA
441 struct timespec now;
442
443 if (!fio_fill_issue_time(td))
444 return;
445
446 fio_gettime(&now, NULL);
447
448 while (nr--) {
bffad86f 449 struct io_sq_ring *ring = &ld->sq_ring;
9a2d78b3 450 int index = ring->array[start & ld->sq_ring_mask];
f8289afc 451 struct io_u *io_u = ld->io_u_index[index];
52885fa2
JA
452
453 memcpy(&io_u->issue_time, &now, sizeof(now));
454 io_u_queued(td, io_u);
455
456 start++;
52885fa2
JA
457 }
458}
459
bffad86f 460static int fio_ioring_commit(struct thread_data *td)
52885fa2 461{
bffad86f
JA
462 struct ioring_data *ld = td->io_ops_data;
463 struct ioring_options *o = td->eo;
52885fa2
JA
464 int ret;
465
466 if (!ld->queued)
467 return 0;
468
3d7d00a3
JA
469 /*
470 * Kernel side does submission. just need to check if the ring is
471 * flagged as needing a kick, if so, call io_uring_enter(). This
472 * only happens if we've been idle too long.
473 */
474 if (o->sqpoll_thread) {
bffad86f 475 struct io_sq_ring *ring = &ld->sq_ring;
2dd96cc4 476 unsigned flags;
4cdbc048 477
2dd96cc4
JA
478 flags = atomic_load_acquire(ring->flags);
479 if (flags & IORING_SQ_NEED_WAKEUP)
b532dd6d
JA
480 io_uring_enter(ld, ld->queued, 0,
481 IORING_ENTER_SQ_WAKEUP);
771c9901
JA
482 ld->queued = 0;
483 return 0;
484 }
485
52885fa2 486 do {
9a2d78b3 487 unsigned start = *ld->sq_ring.head;
52885fa2
JA
488 long nr = ld->queued;
489
9a2d78b3 490 ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS);
52885fa2 491 if (ret > 0) {
bffad86f 492 fio_ioring_queued(td, start, ret);
52885fa2
JA
493 io_u_mark_submit(td, ret);
494
495 ld->queued -= ret;
496 ret = 0;
a90cd050
JA
497 } else if (!ret) {
498 io_u_mark_submit(td, ret);
52885fa2 499 continue;
a90cd050 500 } else {
f6abd731 501 if (errno == EAGAIN || errno == EINTR) {
bffad86f 502 ret = fio_ioring_cqring_reap(td, 0, ld->queued);
a90cd050
JA
503 if (ret)
504 continue;
505 /* Shouldn't happen */
506 usleep(1);
507 continue;
52885fa2 508 }
9a2d78b3 509 td_verror(td, errno, "io_uring_enter submit");
52885fa2 510 break;
a90cd050 511 }
52885fa2
JA
512 } while (ld->queued);
513
514 return ret;
515}
516
bffad86f 517static void fio_ioring_unmap(struct ioring_data *ld)
52885fa2 518{
9a2d78b3 519 int i;
52885fa2 520
59f94d26 521 for (i = 0; i < FIO_ARRAY_SIZE(ld->mmap); i++)
9a2d78b3
JA
522 munmap(ld->mmap[i].ptr, ld->mmap[i].len);
523 close(ld->ring_fd);
b87aa01a
JA
524}
525
bffad86f 526static void fio_ioring_cleanup(struct thread_data *td)
52885fa2 527{
bffad86f 528 struct ioring_data *ld = td->io_ops_data;
52885fa2
JA
529
530 if (ld) {
52885fa2 531 if (!(td->flags & TD_F_CHILD))
bffad86f 532 fio_ioring_unmap(ld);
9a2d78b3 533
52885fa2 534 free(ld->io_u_index);
9a2d78b3 535 free(ld->iovecs);
5ffd5626 536 free(ld->fds);
52885fa2
JA
537 free(ld);
538 }
539}
540
bffad86f 541static int fio_ioring_mmap(struct ioring_data *ld, struct io_uring_params *p)
9a2d78b3 542{
bffad86f
JA
543 struct io_sq_ring *sring = &ld->sq_ring;
544 struct io_cq_ring *cring = &ld->cq_ring;
9a2d78b3
JA
545 void *ptr;
546
e2239016 547 ld->mmap[0].len = p->sq_off.array + p->sq_entries * sizeof(__u32);
9a2d78b3
JA
548 ptr = mmap(0, ld->mmap[0].len, PROT_READ | PROT_WRITE,
549 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
550 IORING_OFF_SQ_RING);
551 ld->mmap[0].ptr = ptr;
552 sring->head = ptr + p->sq_off.head;
553 sring->tail = ptr + p->sq_off.tail;
554 sring->ring_mask = ptr + p->sq_off.ring_mask;
555 sring->ring_entries = ptr + p->sq_off.ring_entries;
556 sring->flags = ptr + p->sq_off.flags;
ac122fea 557 sring->array = ptr + p->sq_off.array;
9a2d78b3
JA
558 ld->sq_ring_mask = *sring->ring_mask;
559
f0403f94
JA
560 ld->mmap[1].len = p->sq_entries * sizeof(struct io_uring_sqe);
561 ld->sqes = mmap(0, ld->mmap[1].len, PROT_READ | PROT_WRITE,
9a2d78b3 562 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
f0403f94
JA
563 IORING_OFF_SQES);
564 ld->mmap[1].ptr = ld->sqes;
9a2d78b3 565
f0403f94
JA
566 ld->mmap[2].len = p->cq_off.cqes +
567 p->cq_entries * sizeof(struct io_uring_cqe);
9a2d78b3
JA
568 ptr = mmap(0, ld->mmap[2].len, PROT_READ | PROT_WRITE,
569 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
570 IORING_OFF_CQ_RING);
571 ld->mmap[2].ptr = ptr;
572 cring->head = ptr + p->cq_off.head;
573 cring->tail = ptr + p->cq_off.tail;
574 cring->ring_mask = ptr + p->cq_off.ring_mask;
575 cring->ring_entries = ptr + p->cq_off.ring_entries;
f0403f94 576 cring->cqes = ptr + p->cq_off.cqes;
9a2d78b3
JA
577 ld->cq_ring_mask = *cring->ring_mask;
578 return 0;
579}
580
556d8415
JA
581static void fio_ioring_probe(struct thread_data *td)
582{
583 struct ioring_data *ld = td->io_ops_data;
584 struct ioring_options *o = td->eo;
585 struct io_uring_probe *p;
586 int ret;
587
588 /* already set by user, don't touch */
589 if (o->nonvectored != -1)
590 return;
591
592 /* default to off, as that's always safe */
593 o->nonvectored = 0;
594
595 p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
596 if (!p)
597 return;
598
599 memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
600 ret = syscall(__NR_io_uring_register, ld->ring_fd,
601 IORING_REGISTER_PROBE, p, 256);
602 if (ret < 0)
603 goto out;
604
605 if (IORING_OP_WRITE > p->ops_len)
606 goto out;
607
608 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED) &&
609 (p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED))
610 o->nonvectored = 1;
611out:
612 free(p);
613}
614
bffad86f 615static int fio_ioring_queue_init(struct thread_data *td)
52885fa2 616{
bffad86f
JA
617 struct ioring_data *ld = td->io_ops_data;
618 struct ioring_options *o = td->eo;
52885fa2 619 int depth = td->o.iodepth;
bffad86f 620 struct io_uring_params p;
9a2d78b3
JA
621 int ret;
622
623 memset(&p, 0, sizeof(p));
52885fa2
JA
624
625 if (o->hipri)
bffad86f 626 p.flags |= IORING_SETUP_IOPOLL;
3d7d00a3
JA
627 if (o->sqpoll_thread) {
628 p.flags |= IORING_SETUP_SQPOLL;
629 if (o->sqpoll_set) {
630 p.flags |= IORING_SETUP_SQ_AFF;
631 p.sq_thread_cpu = o->sqpoll_cpu;
632 }
f635f1fb 633 }
a90cd050 634
bfed648c 635 ret = syscall(__NR_io_uring_setup, depth, &p);
9a2d78b3
JA
636 if (ret < 0)
637 return ret;
638
639 ld->ring_fd = ret;
2ea53ca3 640
556d8415
JA
641 fio_ioring_probe(td);
642
2ea53ca3 643 if (o->fixedbufs) {
bfed648c 644 ret = syscall(__NR_io_uring_register, ld->ring_fd,
919850d2 645 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
2ea53ca3
JA
646 if (ret < 0)
647 return ret;
648 }
649
bffad86f 650 return fio_ioring_mmap(ld, &p);
52885fa2
JA
651}
652
5ffd5626
JA
653static int fio_ioring_register_files(struct thread_data *td)
654{
655 struct ioring_data *ld = td->io_ops_data;
656 struct fio_file *f;
657 unsigned int i;
658 int ret;
659
660 ld->fds = calloc(td->o.nr_files, sizeof(int));
661
662 for_each_file(td, f, i) {
663 ret = generic_open_file(td, f);
664 if (ret)
665 goto err;
666 ld->fds[i] = f->fd;
667 f->engine_pos = i;
668 }
669
bfed648c 670 ret = syscall(__NR_io_uring_register, ld->ring_fd,
5ffd5626
JA
671 IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
672 if (ret) {
673err:
674 free(ld->fds);
675 ld->fds = NULL;
676 }
677
678 /*
679 * Pretend the file is closed again, and really close it if we hit
680 * an error.
681 */
682 for_each_file(td, f, i) {
683 if (ret) {
684 int fio_unused ret2;
685 ret2 = generic_close_file(td, f);
686 } else
687 f->fd = -1;
688 }
689
690 return ret;
691}
692
bffad86f 693static int fio_ioring_post_init(struct thread_data *td)
52885fa2 694{
bffad86f 695 struct ioring_data *ld = td->io_ops_data;
5ffd5626 696 struct ioring_options *o = td->eo;
52885fa2 697 struct io_u *io_u;
650346e1 698 int err, i;
52885fa2 699
650346e1
JA
700 for (i = 0; i < td->o.iodepth; i++) {
701 struct iovec *iov = &ld->iovecs[i];
9a2d78b3 702
650346e1
JA
703 io_u = ld->io_u_index[i];
704 iov->iov_base = io_u->buf;
705 iov->iov_len = td_max_bs(td);
52885fa2
JA
706 }
707
bffad86f 708 err = fio_ioring_queue_init(td);
52885fa2 709 if (err) {
0442b53f 710 int init_err = errno;
c4f5c92f 711
0442b53f 712 if (init_err == ENOSYS)
c4f5c92f 713 log_err("fio: your kernel doesn't support io_uring\n");
0442b53f 714 td_verror(td, init_err, "io_queue_init");
52885fa2
JA
715 return 1;
716 }
717
7c70f506
JA
718 for (i = 0; i < td->o.iodepth; i++) {
719 struct io_uring_sqe *sqe;
720
721 sqe = &ld->sqes[i];
722 memset(sqe, 0, sizeof(*sqe));
723 }
724
5ffd5626
JA
725 if (o->registerfiles) {
726 err = fio_ioring_register_files(td);
727 if (err) {
728 td_verror(td, errno, "ioring_register_files");
729 return 1;
730 }
731 }
732
52885fa2
JA
733 return 0;
734}
735
bffad86f 736static int fio_ioring_init(struct thread_data *td)
52885fa2 737{
5ffd5626 738 struct ioring_options *o = td->eo;
bffad86f 739 struct ioring_data *ld;
e9f6567a
DLM
740 struct cmdprio *cmdprio = &o->cmdprio;
741 int ret;
52885fa2 742
5ffd5626
JA
743 /* sqthread submission requires registered files */
744 if (o->sqpoll_thread)
745 o->registerfiles = 1;
746
747 if (o->registerfiles && td->o.nr_files != td->o.open_files) {
748 log_err("fio: io_uring registered files require nr_files to "
749 "be identical to open_files\n");
750 return 1;
751 }
752
52885fa2
JA
753 ld = calloc(1, sizeof(*ld));
754
b87aa01a
JA
755 /* ring depth must be a power-of-2 */
756 ld->iodepth = td->o.iodepth;
757 td->o.iodepth = roundup_pow2(td->o.iodepth);
758
52885fa2
JA
759 /* io_u index */
760 ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
650346e1 761 ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
52885fa2
JA
762
763 td->io_ops_data = ld;
b2a432bf 764
e9f6567a
DLM
765 ret = fio_cmdprio_init(td, cmdprio, &ld->use_cmdprio);
766 if (ret) {
767 td_verror(td, EINVAL, "fio_ioring_init");
b2a432bf
PC
768 return 1;
769 }
1af44196
XW
770
771 if (fio_option_is_set(&td->o, ioprio_class))
772 ld->ioprio_class_set = true;
773 if (fio_option_is_set(&td->o, ioprio))
774 ld->ioprio_set = true;
775
52885fa2
JA
776 return 0;
777}
778
bffad86f 779static int fio_ioring_io_u_init(struct thread_data *td, struct io_u *io_u)
52885fa2 780{
bffad86f 781 struct ioring_data *ld = td->io_ops_data;
52885fa2
JA
782
783 ld->io_u_index[io_u->index] = io_u;
784 return 0;
785}
786
5ffd5626
JA
787static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
788{
789 struct ioring_data *ld = td->io_ops_data;
790 struct ioring_options *o = td->eo;
791
17318cf6 792 if (!ld || !o->registerfiles)
5ffd5626
JA
793 return generic_open_file(td, f);
794
795 f->fd = ld->fds[f->engine_pos];
796 return 0;
797}
798
799static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f)
800{
17318cf6 801 struct ioring_data *ld = td->io_ops_data;
5ffd5626
JA
802 struct ioring_options *o = td->eo;
803
17318cf6 804 if (!ld || !o->registerfiles)
5ffd5626
JA
805 return generic_close_file(td, f);
806
807 f->fd = -1;
808 return 0;
809}
810
52885fa2 811static struct ioengine_ops ioengine = {
bffad86f 812 .name = "io_uring",
52885fa2 813 .version = FIO_IOOPS_VERSION,
8bfe330e 814 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD,
bffad86f
JA
815 .init = fio_ioring_init,
816 .post_init = fio_ioring_post_init,
817 .io_u_init = fio_ioring_io_u_init,
818 .prep = fio_ioring_prep,
819 .queue = fio_ioring_queue,
820 .commit = fio_ioring_commit,
821 .getevents = fio_ioring_getevents,
822 .event = fio_ioring_event,
823 .cleanup = fio_ioring_cleanup,
5ffd5626
JA
824 .open_file = fio_ioring_open_file,
825 .close_file = fio_ioring_close_file,
52885fa2
JA
826 .get_file_size = generic_get_file_size,
827 .options = options,
bffad86f 828 .option_struct_size = sizeof(struct ioring_options),
52885fa2
JA
829};
830
bffad86f 831static void fio_init fio_ioring_register(void)
52885fa2 832{
52885fa2 833 register_ioengine(&ioengine);
52885fa2
JA
834}
835
bffad86f 836static void fio_exit fio_ioring_unregister(void)
52885fa2 837{
52885fa2 838 unregister_ioengine(&ioengine);
52885fa2 839}
1f90e9bb 840#endif