nvme: add nvme opcodes, structures and helper functions
[fio.git] / engines / io_uring.c
CommitLineData
52885fa2 1/*
bffad86f 2 * io_uring engine
52885fa2 3 *
bffad86f 4 * IO engine using the new native Linux aio io_uring interface. See:
a90cd050 5 *
bffad86f 6 * http://git.kernel.dk/cgit/linux-block/log/?h=io_uring
52885fa2
JA
7 *
8 */
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
52885fa2
JA
12#include <sys/time.h>
13#include <sys/resource.h>
14
15#include "../fio.h"
16#include "../lib/pow2.h"
17#include "../optgroup.h"
18#include "../lib/memalign.h"
b87aa01a 19#include "../lib/fls.h"
6d975f2c 20#include "../lib/roundup.h"
52885fa2 21
bffad86f 22#ifdef ARCH_HAVE_IOURING
52885fa2 23
57fa61f0 24#include "../lib/types.h"
f3e769a4 25#include "../os/linux/io_uring.h"
e9f6567a 26#include "cmdprio.h"
9a2d78b3 27
bffad86f 28struct io_sq_ring {
e2239016
JA
29 unsigned *head;
30 unsigned *tail;
31 unsigned *ring_mask;
32 unsigned *ring_entries;
33 unsigned *flags;
34 unsigned *array;
52885fa2
JA
35};
36
bffad86f 37struct io_cq_ring {
e2239016
JA
38 unsigned *head;
39 unsigned *tail;
40 unsigned *ring_mask;
41 unsigned *ring_entries;
f0403f94 42 struct io_uring_cqe *cqes;
9a2d78b3
JA
43};
44
bffad86f 45struct ioring_mmap {
9a2d78b3
JA
46 void *ptr;
47 size_t len;
52885fa2
JA
48};
49
bffad86f 50struct ioring_data {
9a2d78b3
JA
51 int ring_fd;
52
52885fa2
JA
53 struct io_u **io_u_index;
54
5ffd5626
JA
55 int *fds;
56
bffad86f 57 struct io_sq_ring sq_ring;
f0403f94 58 struct io_uring_sqe *sqes;
9a2d78b3 59 struct iovec *iovecs;
b87aa01a 60 unsigned sq_ring_mask;
52885fa2 61
bffad86f 62 struct io_cq_ring cq_ring;
b87aa01a 63 unsigned cq_ring_mask;
52885fa2
JA
64
65 int queued;
66 int cq_ring_off;
b87aa01a 67 unsigned iodepth;
5a59a81d 68 int prepped;
96563db9 69
bffad86f 70 struct ioring_mmap mmap[3];
d6cbeab4
NC
71
72 struct cmdprio cmdprio;
52885fa2
JA
73};
74
bffad86f 75struct ioring_options {
a48f0cc7 76 struct thread_data *td;
52885fa2 77 unsigned int hipri;
d6cbeab4 78 struct cmdprio_options cmdprio_options;
52885fa2 79 unsigned int fixedbufs;
5ffd5626 80 unsigned int registerfiles;
3d7d00a3 81 unsigned int sqpoll_thread;
2ea53ca3
JA
82 unsigned int sqpoll_set;
83 unsigned int sqpoll_cpu;
b10b1e70 84 unsigned int nonvectored;
4a87b584 85 unsigned int uncached;
7d42e66e 86 unsigned int nowait;
5a59a81d 87 unsigned int force_async;
52885fa2
JA
88};
89
b10b1e70
JA
90static const int ddir_to_op[2][2] = {
91 { IORING_OP_READV, IORING_OP_READ },
92 { IORING_OP_WRITEV, IORING_OP_WRITE }
93};
94
3f1e3af7
KB
95static const int fixed_ddir_to_op[2] = {
96 IORING_OP_READ_FIXED,
97 IORING_OP_WRITE_FIXED
98};
99
2ea53ca3 100static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
a90cd050 101{
bffad86f 102 struct ioring_options *o = data;
a90cd050 103
2ea53ca3
JA
104 o->sqpoll_cpu = *val;
105 o->sqpoll_set = 1;
a90cd050
JA
106 return 0;
107}
108
52885fa2
JA
109static struct fio_option options[] = {
110 {
111 .name = "hipri",
112 .lname = "High Priority",
113 .type = FIO_OPT_STR_SET,
bffad86f 114 .off1 = offsetof(struct ioring_options, hipri),
52885fa2
JA
115 .help = "Use polled IO completions",
116 .category = FIO_OPT_C_ENGINE,
27f436d9 117 .group = FIO_OPT_G_IOURING,
52885fa2 118 },
b2a432bf
PC
119#ifdef FIO_HAVE_IOPRIO_CLASS
120 {
121 .name = "cmdprio_percentage",
122 .lname = "high priority percentage",
123 .type = FIO_OPT_INT,
e9f6567a 124 .off1 = offsetof(struct ioring_options,
d6cbeab4 125 cmdprio_options.percentage[DDIR_READ]),
e9f6567a 126 .off2 = offsetof(struct ioring_options,
d6cbeab4 127 cmdprio_options.percentage[DDIR_WRITE]),
e9f6567a 128 .minval = 0,
b2a432bf
PC
129 .maxval = 100,
130 .help = "Send high priority I/O this percentage of the time",
131 .category = FIO_OPT_C_ENGINE,
132 .group = FIO_OPT_G_IOURING,
133 },
12f9d54a
DLM
134 {
135 .name = "cmdprio_class",
136 .lname = "Asynchronous I/O priority class",
137 .type = FIO_OPT_INT,
138 .off1 = offsetof(struct ioring_options,
d6cbeab4 139 cmdprio_options.class[DDIR_READ]),
12f9d54a 140 .off2 = offsetof(struct ioring_options,
d6cbeab4 141 cmdprio_options.class[DDIR_WRITE]),
12f9d54a
DLM
142 .help = "Set asynchronous IO priority class",
143 .minval = IOPRIO_MIN_PRIO_CLASS + 1,
144 .maxval = IOPRIO_MAX_PRIO_CLASS,
145 .interval = 1,
146 .category = FIO_OPT_C_ENGINE,
147 .group = FIO_OPT_G_IOURING,
148 },
149 {
150 .name = "cmdprio",
151 .lname = "Asynchronous I/O priority level",
152 .type = FIO_OPT_INT,
153 .off1 = offsetof(struct ioring_options,
d6cbeab4 154 cmdprio_options.level[DDIR_READ]),
12f9d54a 155 .off2 = offsetof(struct ioring_options,
d6cbeab4 156 cmdprio_options.level[DDIR_WRITE]),
12f9d54a
DLM
157 .help = "Set asynchronous IO priority level",
158 .minval = IOPRIO_MIN_PRIO,
159 .maxval = IOPRIO_MAX_PRIO,
160 .interval = 1,
161 .category = FIO_OPT_C_ENGINE,
162 .group = FIO_OPT_G_IOURING,
163 },
a48f0cc7
DLM
164 {
165 .name = "cmdprio_bssplit",
166 .lname = "Priority percentage block size split",
d6cbeab4
NC
167 .type = FIO_OPT_STR_STORE,
168 .off1 = offsetof(struct ioring_options,
169 cmdprio_options.bssplit_str),
a48f0cc7
DLM
170 .help = "Set priority percentages for different block sizes",
171 .category = FIO_OPT_C_ENGINE,
172 .group = FIO_OPT_G_IOURING,
173 },
b2a432bf
PC
174#else
175 {
176 .name = "cmdprio_percentage",
177 .lname = "high priority percentage",
178 .type = FIO_OPT_UNSUPPORTED,
179 .help = "Your platform does not support I/O priority classes",
180 },
12f9d54a
DLM
181 {
182 .name = "cmdprio_class",
183 .lname = "Asynchronous I/O priority class",
184 .type = FIO_OPT_UNSUPPORTED,
185 .help = "Your platform does not support I/O priority classes",
186 },
187 {
188 .name = "cmdprio",
189 .lname = "Asynchronous I/O priority level",
190 .type = FIO_OPT_UNSUPPORTED,
191 .help = "Your platform does not support I/O priority classes",
192 },
a48f0cc7
DLM
193 {
194 .name = "cmdprio_bssplit",
195 .lname = "Priority percentage block size split",
196 .type = FIO_OPT_UNSUPPORTED,
197 .help = "Your platform does not support I/O priority classes",
198 },
b2a432bf 199#endif
52885fa2
JA
200 {
201 .name = "fixedbufs",
202 .lname = "Fixed (pre-mapped) IO buffers",
203 .type = FIO_OPT_STR_SET,
bffad86f 204 .off1 = offsetof(struct ioring_options, fixedbufs),
52885fa2
JA
205 .help = "Pre map IO buffers",
206 .category = FIO_OPT_C_ENGINE,
27f436d9 207 .group = FIO_OPT_G_IOURING,
52885fa2 208 },
5ffd5626
JA
209 {
210 .name = "registerfiles",
211 .lname = "Register file set",
212 .type = FIO_OPT_STR_SET,
213 .off1 = offsetof(struct ioring_options, registerfiles),
214 .help = "Pre-open/register files",
215 .category = FIO_OPT_C_ENGINE,
27f436d9 216 .group = FIO_OPT_G_IOURING,
5ffd5626 217 },
771c9901
JA
218 {
219 .name = "sqthread_poll",
3d7d00a3
JA
220 .lname = "Kernel SQ thread polling",
221 .type = FIO_OPT_INT,
222 .off1 = offsetof(struct ioring_options, sqpoll_thread),
223 .help = "Offload submission/completion to kernel thread",
224 .category = FIO_OPT_C_ENGINE,
27f436d9 225 .group = FIO_OPT_G_IOURING,
3d7d00a3
JA
226 },
227 {
228 .name = "sqthread_poll_cpu",
229 .lname = "SQ Thread Poll CPU",
2ea53ca3
JA
230 .type = FIO_OPT_INT,
231 .cb = fio_ioring_sqpoll_cb,
3d7d00a3 232 .help = "What CPU to run SQ thread polling on",
a90cd050 233 .category = FIO_OPT_C_ENGINE,
27f436d9 234 .group = FIO_OPT_G_IOURING,
a90cd050 235 },
b10b1e70
JA
236 {
237 .name = "nonvectored",
238 .lname = "Non-vectored",
239 .type = FIO_OPT_INT,
240 .off1 = offsetof(struct ioring_options, nonvectored),
556d8415 241 .def = "-1",
b10b1e70
JA
242 .help = "Use non-vectored read/write commands",
243 .category = FIO_OPT_C_ENGINE,
244 .group = FIO_OPT_G_IOURING,
245 },
4a87b584
JA
246 {
247 .name = "uncached",
248 .lname = "Uncached",
249 .type = FIO_OPT_INT,
250 .off1 = offsetof(struct ioring_options, uncached),
251 .help = "Use RWF_UNCACHED for buffered read/writes",
252 .category = FIO_OPT_C_ENGINE,
253 .group = FIO_OPT_G_IOURING,
254 },
7d42e66e
KK
255 {
256 .name = "nowait",
257 .lname = "RWF_NOWAIT",
258 .type = FIO_OPT_BOOL,
259 .off1 = offsetof(struct ioring_options, nowait),
260 .help = "Use RWF_NOWAIT for reads/writes",
261 .category = FIO_OPT_C_ENGINE,
262 .group = FIO_OPT_G_IOURING,
263 },
5a59a81d
JA
264 {
265 .name = "force_async",
266 .lname = "Force async",
267 .type = FIO_OPT_INT,
268 .off1 = offsetof(struct ioring_options, force_async),
269 .help = "Set IOSQE_ASYNC every N requests",
270 .category = FIO_OPT_C_ENGINE,
271 .group = FIO_OPT_G_IOURING,
272 },
52885fa2
JA
273 {
274 .name = NULL,
275 },
276};
277
bffad86f 278static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit,
52885fa2
JA
279 unsigned int min_complete, unsigned int flags)
280{
c377f4f8
JA
281#ifdef FIO_ARCH_HAS_SYSCALL
282 return __do_syscall6(__NR_io_uring_enter, ld->ring_fd, to_submit,
283 min_complete, flags, NULL, 0);
284#else
bfed648c 285 return syscall(__NR_io_uring_enter, ld->ring_fd, to_submit,
521164fa 286 min_complete, flags, NULL, 0);
c377f4f8 287#endif
52885fa2
JA
288}
289
bffad86f 290static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
52885fa2 291{
bffad86f 292 struct ioring_data *ld = td->io_ops_data;
cfcc8564 293 struct ioring_options *o = td->eo;
52885fa2 294 struct fio_file *f = io_u->file;
f0403f94 295 struct io_uring_sqe *sqe;
52885fa2 296
f0403f94 297 sqe = &ld->sqes[io_u->index];
34d6090e 298
5ffd5626
JA
299 if (o->registerfiles) {
300 sqe->fd = f->engine_pos;
301 sqe->flags = IOSQE_FIXED_FILE;
302 } else {
303 sqe->fd = f->fd;
87b69ef2 304 sqe->flags = 0;
5ffd5626 305 }
52885fa2 306
e3970057 307 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
f0403f94 308 if (o->fixedbufs) {
3f1e3af7 309 sqe->opcode = fixed_ddir_to_op[io_u->ddir];
919850d2 310 sqe->addr = (unsigned long) io_u->xfer_buf;
f0403f94 311 sqe->len = io_u->xfer_buflen;
2ea53ca3 312 sqe->buf_index = io_u->index;
cfcc8564 313 } else {
832faaaf
JA
314 struct iovec *iov = &ld->iovecs[io_u->index];
315
316 /*
317 * Update based on actual io_u, requeue could have
318 * adjusted these
319 */
320 iov->iov_base = io_u->xfer_buf;
321 iov->iov_len = io_u->xfer_buflen;
322
3f1e3af7 323 sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
b10b1e70 324 if (o->nonvectored) {
832faaaf
JA
325 sqe->addr = (unsigned long) iov->iov_base;
326 sqe->len = iov->iov_len;
b10b1e70 327 } else {
832faaaf 328 sqe->addr = (unsigned long) iov;
b10b1e70
JA
329 sqe->len = 1;
330 }
cfcc8564 331 }
fd70e361 332 sqe->rw_flags = 0;
4a87b584 333 if (!td->o.odirect && o->uncached)
fd70e361 334 sqe->rw_flags |= RWF_UNCACHED;
7d42e66e
KK
335 if (o->nowait)
336 sqe->rw_flags |= RWF_NOWAIT;
8ff6b289
NC
337
338 /*
339 * Since io_uring can have a submission context (sqthread_poll)
340 * that is different from the process context, we cannot rely on
341 * the IO priority set by ioprio_set() (option prio/prioclass)
342 * to be inherited.
343 * td->ioprio will have the value of the "default prio", so set
344 * this unconditionally. This value might get overridden by
ff00f247 345 * fio_ioring_cmdprio_prep() if the option cmdprio_percentage or
8ff6b289
NC
346 * cmdprio_bssplit is used.
347 */
348 sqe->ioprio = td->ioprio;
f0403f94 349 sqe->off = io_u->offset;
48e698fa 350 } else if (ddir_sync(io_u->ddir)) {
7c70f506 351 sqe->ioprio = 0;
01387bfe
AF
352 if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
353 sqe->off = f->first_write;
354 sqe->len = f->last_write - f->first_write;
355 sqe->sync_range_flags = td->o.sync_file_range;
356 sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
357 } else {
7c70f506
JA
358 sqe->off = 0;
359 sqe->addr = 0;
360 sqe->len = 0;
01387bfe
AF
361 if (io_u->ddir == DDIR_DATASYNC)
362 sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
363 sqe->opcode = IORING_OP_FSYNC;
364 }
48e698fa 365 }
52885fa2 366
5a59a81d
JA
367 if (o->force_async && ++ld->prepped == o->force_async) {
368 ld->prepped = 0;
369 sqe->flags |= IOSQE_ASYNC;
370 }
371
48e698fa 372 sqe->user_data = (unsigned long) io_u;
52885fa2
JA
373 return 0;
374}
375
bffad86f 376static struct io_u *fio_ioring_event(struct thread_data *td, int event)
52885fa2 377{
bffad86f 378 struct ioring_data *ld = td->io_ops_data;
f0403f94 379 struct io_uring_cqe *cqe;
52885fa2 380 struct io_u *io_u;
b87aa01a 381 unsigned index;
52885fa2 382
b87aa01a 383 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
52885fa2 384
f0403f94 385 cqe = &ld->cq_ring.cqes[index];
e3466352 386 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
52885fa2 387
f0403f94
JA
388 if (cqe->res != io_u->xfer_buflen) {
389 if (cqe->res > io_u->xfer_buflen)
390 io_u->error = -cqe->res;
52885fa2 391 else
f0403f94 392 io_u->resid = io_u->xfer_buflen - cqe->res;
52885fa2
JA
393 } else
394 io_u->error = 0;
395
396 return io_u;
397}
398
bffad86f 399static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events,
52885fa2
JA
400 unsigned int max)
401{
bffad86f
JA
402 struct ioring_data *ld = td->io_ops_data;
403 struct io_cq_ring *ring = &ld->cq_ring;
e2239016 404 unsigned head, reaped = 0;
52885fa2 405
9a2d78b3 406 head = *ring->head;
52885fa2 407 do {
9e26aff9 408 if (head == atomic_load_acquire(ring->tail))
52885fa2
JA
409 break;
410 reaped++;
411 head++;
52885fa2
JA
412 } while (reaped + events < max);
413
76ce63dd
AB
414 if (reaped)
415 atomic_store_release(ring->head, head);
416
52885fa2
JA
417 return reaped;
418}
419
bffad86f
JA
420static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
421 unsigned int max, const struct timespec *t)
52885fa2 422{
bffad86f 423 struct ioring_data *ld = td->io_ops_data;
52885fa2 424 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
bffad86f
JA
425 struct ioring_options *o = td->eo;
426 struct io_cq_ring *ring = &ld->cq_ring;
b87aa01a
JA
427 unsigned events = 0;
428 int r;
52885fa2 429
9a2d78b3 430 ld->cq_ring_off = *ring->head;
52885fa2 431 do {
bffad86f 432 r = fio_ioring_cqring_reap(td, events, max);
52885fa2
JA
433 if (r) {
434 events += r;
f7cbbbf8
ST
435 if (actual_min != 0)
436 actual_min -= r;
52885fa2
JA
437 continue;
438 }
439
3d7d00a3 440 if (!o->sqpoll_thread) {
9a2d78b3
JA
441 r = io_uring_enter(ld, 0, actual_min,
442 IORING_ENTER_GETEVENTS);
771c9901 443 if (r < 0) {
f6abd731 444 if (errno == EAGAIN || errno == EINTR)
771c9901 445 continue;
9a2d78b3 446 td_verror(td, errno, "io_uring_enter");
771c9901
JA
447 break;
448 }
52885fa2
JA
449 }
450 } while (events < min);
451
452 return r < 0 ? r : events;
453}
454
127715b6
NC
455static inline void fio_ioring_cmdprio_prep(struct thread_data *td,
456 struct io_u *io_u)
b2a432bf 457{
b2a432bf 458 struct ioring_data *ld = td->io_ops_data;
d6cbeab4 459 struct cmdprio *cmdprio = &ld->cmdprio;
127715b6
NC
460
461 if (fio_cmdprio_set_ioprio(td, cmdprio, io_u))
462 ld->sqes[io_u->index].ioprio = io_u->ioprio;
b2a432bf
PC
463}
464
bffad86f
JA
465static enum fio_q_status fio_ioring_queue(struct thread_data *td,
466 struct io_u *io_u)
52885fa2 467{
bffad86f
JA
468 struct ioring_data *ld = td->io_ops_data;
469 struct io_sq_ring *ring = &ld->sq_ring;
52885fa2
JA
470 unsigned tail, next_tail;
471
472 fio_ro_check(td, io_u);
473
b87aa01a 474 if (ld->queued == ld->iodepth)
52885fa2
JA
475 return FIO_Q_BUSY;
476
52885fa2
JA
477 if (io_u->ddir == DDIR_TRIM) {
478 if (ld->queued)
479 return FIO_Q_BUSY;
480
481 do_io_u_trim(td, io_u);
482 io_u_mark_submit(td, 1);
483 io_u_mark_complete(td, 1);
484 return FIO_Q_COMPLETED;
485 }
486
9a2d78b3 487 tail = *ring->tail;
52885fa2 488 next_tail = tail + 1;
9e26aff9 489 if (next_tail == atomic_load_acquire(ring->head))
52885fa2
JA
490 return FIO_Q_BUSY;
491
d6cbeab4 492 if (ld->cmdprio.mode != CMDPRIO_MODE_NONE)
ff00f247
NC
493 fio_ioring_cmdprio_prep(td, io_u);
494
b87aa01a 495 ring->array[tail & ld->sq_ring_mask] = io_u->index;
9e26aff9 496 atomic_store_release(ring->tail, next_tail);
52885fa2
JA
497
498 ld->queued++;
499 return FIO_Q_QUEUED;
500}
501
bffad86f 502static void fio_ioring_queued(struct thread_data *td, int start, int nr)
52885fa2 503{
bffad86f 504 struct ioring_data *ld = td->io_ops_data;
52885fa2
JA
505 struct timespec now;
506
507 if (!fio_fill_issue_time(td))
508 return;
509
510 fio_gettime(&now, NULL);
511
512 while (nr--) {
bffad86f 513 struct io_sq_ring *ring = &ld->sq_ring;
9a2d78b3 514 int index = ring->array[start & ld->sq_ring_mask];
f8289afc 515 struct io_u *io_u = ld->io_u_index[index];
52885fa2
JA
516
517 memcpy(&io_u->issue_time, &now, sizeof(now));
518 io_u_queued(td, io_u);
519
520 start++;
52885fa2
JA
521 }
522}
523
bffad86f 524static int fio_ioring_commit(struct thread_data *td)
52885fa2 525{
bffad86f
JA
526 struct ioring_data *ld = td->io_ops_data;
527 struct ioring_options *o = td->eo;
52885fa2
JA
528 int ret;
529
530 if (!ld->queued)
531 return 0;
532
3d7d00a3
JA
533 /*
534 * Kernel side does submission. just need to check if the ring is
535 * flagged as needing a kick, if so, call io_uring_enter(). This
536 * only happens if we've been idle too long.
537 */
538 if (o->sqpoll_thread) {
bffad86f 539 struct io_sq_ring *ring = &ld->sq_ring;
2dd96cc4 540 unsigned flags;
4cdbc048 541
2dd96cc4
JA
542 flags = atomic_load_acquire(ring->flags);
543 if (flags & IORING_SQ_NEED_WAKEUP)
b532dd6d
JA
544 io_uring_enter(ld, ld->queued, 0,
545 IORING_ENTER_SQ_WAKEUP);
771c9901
JA
546 ld->queued = 0;
547 return 0;
548 }
549
52885fa2 550 do {
9a2d78b3 551 unsigned start = *ld->sq_ring.head;
52885fa2
JA
552 long nr = ld->queued;
553
9a2d78b3 554 ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS);
52885fa2 555 if (ret > 0) {
bffad86f 556 fio_ioring_queued(td, start, ret);
52885fa2
JA
557 io_u_mark_submit(td, ret);
558
559 ld->queued -= ret;
560 ret = 0;
a90cd050
JA
561 } else if (!ret) {
562 io_u_mark_submit(td, ret);
52885fa2 563 continue;
a90cd050 564 } else {
f6abd731 565 if (errno == EAGAIN || errno == EINTR) {
bffad86f 566 ret = fio_ioring_cqring_reap(td, 0, ld->queued);
a90cd050
JA
567 if (ret)
568 continue;
569 /* Shouldn't happen */
570 usleep(1);
571 continue;
52885fa2 572 }
9a2d78b3 573 td_verror(td, errno, "io_uring_enter submit");
52885fa2 574 break;
a90cd050 575 }
52885fa2
JA
576 } while (ld->queued);
577
578 return ret;
579}
580
bffad86f 581static void fio_ioring_unmap(struct ioring_data *ld)
52885fa2 582{
9a2d78b3 583 int i;
52885fa2 584
59f94d26 585 for (i = 0; i < FIO_ARRAY_SIZE(ld->mmap); i++)
9a2d78b3
JA
586 munmap(ld->mmap[i].ptr, ld->mmap[i].len);
587 close(ld->ring_fd);
b87aa01a
JA
588}
589
bffad86f 590static void fio_ioring_cleanup(struct thread_data *td)
52885fa2 591{
bffad86f 592 struct ioring_data *ld = td->io_ops_data;
52885fa2
JA
593
594 if (ld) {
52885fa2 595 if (!(td->flags & TD_F_CHILD))
bffad86f 596 fio_ioring_unmap(ld);
9a2d78b3 597
d6cbeab4 598 fio_cmdprio_cleanup(&ld->cmdprio);
52885fa2 599 free(ld->io_u_index);
9a2d78b3 600 free(ld->iovecs);
5ffd5626 601 free(ld->fds);
52885fa2
JA
602 free(ld);
603 }
604}
605
bffad86f 606static int fio_ioring_mmap(struct ioring_data *ld, struct io_uring_params *p)
9a2d78b3 607{
bffad86f
JA
608 struct io_sq_ring *sring = &ld->sq_ring;
609 struct io_cq_ring *cring = &ld->cq_ring;
9a2d78b3
JA
610 void *ptr;
611
e2239016 612 ld->mmap[0].len = p->sq_off.array + p->sq_entries * sizeof(__u32);
9a2d78b3
JA
613 ptr = mmap(0, ld->mmap[0].len, PROT_READ | PROT_WRITE,
614 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
615 IORING_OFF_SQ_RING);
616 ld->mmap[0].ptr = ptr;
617 sring->head = ptr + p->sq_off.head;
618 sring->tail = ptr + p->sq_off.tail;
619 sring->ring_mask = ptr + p->sq_off.ring_mask;
620 sring->ring_entries = ptr + p->sq_off.ring_entries;
621 sring->flags = ptr + p->sq_off.flags;
ac122fea 622 sring->array = ptr + p->sq_off.array;
9a2d78b3
JA
623 ld->sq_ring_mask = *sring->ring_mask;
624
f0403f94
JA
625 ld->mmap[1].len = p->sq_entries * sizeof(struct io_uring_sqe);
626 ld->sqes = mmap(0, ld->mmap[1].len, PROT_READ | PROT_WRITE,
9a2d78b3 627 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
f0403f94
JA
628 IORING_OFF_SQES);
629 ld->mmap[1].ptr = ld->sqes;
9a2d78b3 630
f0403f94
JA
631 ld->mmap[2].len = p->cq_off.cqes +
632 p->cq_entries * sizeof(struct io_uring_cqe);
9a2d78b3
JA
633 ptr = mmap(0, ld->mmap[2].len, PROT_READ | PROT_WRITE,
634 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
635 IORING_OFF_CQ_RING);
636 ld->mmap[2].ptr = ptr;
637 cring->head = ptr + p->cq_off.head;
638 cring->tail = ptr + p->cq_off.tail;
639 cring->ring_mask = ptr + p->cq_off.ring_mask;
640 cring->ring_entries = ptr + p->cq_off.ring_entries;
f0403f94 641 cring->cqes = ptr + p->cq_off.cqes;
9a2d78b3
JA
642 ld->cq_ring_mask = *cring->ring_mask;
643 return 0;
644}
645
556d8415
JA
646static void fio_ioring_probe(struct thread_data *td)
647{
648 struct ioring_data *ld = td->io_ops_data;
649 struct ioring_options *o = td->eo;
650 struct io_uring_probe *p;
651 int ret;
652
653 /* already set by user, don't touch */
654 if (o->nonvectored != -1)
655 return;
656
657 /* default to off, as that's always safe */
658 o->nonvectored = 0;
659
660 p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
661 if (!p)
662 return;
663
664 memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
665 ret = syscall(__NR_io_uring_register, ld->ring_fd,
666 IORING_REGISTER_PROBE, p, 256);
667 if (ret < 0)
668 goto out;
669
670 if (IORING_OP_WRITE > p->ops_len)
671 goto out;
672
673 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED) &&
674 (p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED))
675 o->nonvectored = 1;
676out:
677 free(p);
678}
679
bffad86f 680static int fio_ioring_queue_init(struct thread_data *td)
52885fa2 681{
bffad86f
JA
682 struct ioring_data *ld = td->io_ops_data;
683 struct ioring_options *o = td->eo;
52885fa2 684 int depth = td->o.iodepth;
bffad86f 685 struct io_uring_params p;
9a2d78b3
JA
686 int ret;
687
688 memset(&p, 0, sizeof(p));
52885fa2
JA
689
690 if (o->hipri)
bffad86f 691 p.flags |= IORING_SETUP_IOPOLL;
3d7d00a3
JA
692 if (o->sqpoll_thread) {
693 p.flags |= IORING_SETUP_SQPOLL;
694 if (o->sqpoll_set) {
695 p.flags |= IORING_SETUP_SQ_AFF;
696 p.sq_thread_cpu = o->sqpoll_cpu;
697 }
f635f1fb 698 }
a90cd050 699
1db268db
JA
700 /*
701 * Clamp CQ ring size at our SQ ring size, we don't need more entries
702 * than that.
703 */
704 p.flags |= IORING_SETUP_CQSIZE;
705 p.cq_entries = depth;
706
b5e99df6 707retry:
bfed648c 708 ret = syscall(__NR_io_uring_setup, depth, &p);
b5e99df6
JA
709 if (ret < 0) {
710 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
711 p.flags &= ~IORING_SETUP_CQSIZE;
712 goto retry;
713 }
9a2d78b3 714 return ret;
b5e99df6 715 }
9a2d78b3
JA
716
717 ld->ring_fd = ret;
2ea53ca3 718
556d8415
JA
719 fio_ioring_probe(td);
720
2ea53ca3 721 if (o->fixedbufs) {
bfed648c 722 ret = syscall(__NR_io_uring_register, ld->ring_fd,
919850d2 723 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
2ea53ca3
JA
724 if (ret < 0)
725 return ret;
726 }
727
bffad86f 728 return fio_ioring_mmap(ld, &p);
52885fa2
JA
729}
730
5ffd5626
JA
731static int fio_ioring_register_files(struct thread_data *td)
732{
733 struct ioring_data *ld = td->io_ops_data;
734 struct fio_file *f;
735 unsigned int i;
736 int ret;
737
738 ld->fds = calloc(td->o.nr_files, sizeof(int));
739
740 for_each_file(td, f, i) {
741 ret = generic_open_file(td, f);
742 if (ret)
743 goto err;
744 ld->fds[i] = f->fd;
745 f->engine_pos = i;
746 }
747
bfed648c 748 ret = syscall(__NR_io_uring_register, ld->ring_fd,
5ffd5626
JA
749 IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
750 if (ret) {
751err:
752 free(ld->fds);
753 ld->fds = NULL;
754 }
755
756 /*
757 * Pretend the file is closed again, and really close it if we hit
758 * an error.
759 */
760 for_each_file(td, f, i) {
761 if (ret) {
762 int fio_unused ret2;
763 ret2 = generic_close_file(td, f);
764 } else
765 f->fd = -1;
766 }
767
768 return ret;
769}
770
bffad86f 771static int fio_ioring_post_init(struct thread_data *td)
52885fa2 772{
bffad86f 773 struct ioring_data *ld = td->io_ops_data;
5ffd5626 774 struct ioring_options *o = td->eo;
52885fa2 775 struct io_u *io_u;
650346e1 776 int err, i;
52885fa2 777
650346e1
JA
778 for (i = 0; i < td->o.iodepth; i++) {
779 struct iovec *iov = &ld->iovecs[i];
9a2d78b3 780
650346e1
JA
781 io_u = ld->io_u_index[i];
782 iov->iov_base = io_u->buf;
783 iov->iov_len = td_max_bs(td);
52885fa2
JA
784 }
785
bffad86f 786 err = fio_ioring_queue_init(td);
52885fa2 787 if (err) {
0442b53f 788 int init_err = errno;
c4f5c92f 789
0442b53f 790 if (init_err == ENOSYS)
c4f5c92f 791 log_err("fio: your kernel doesn't support io_uring\n");
0442b53f 792 td_verror(td, init_err, "io_queue_init");
52885fa2
JA
793 return 1;
794 }
795
7c70f506
JA
796 for (i = 0; i < td->o.iodepth; i++) {
797 struct io_uring_sqe *sqe;
798
799 sqe = &ld->sqes[i];
800 memset(sqe, 0, sizeof(*sqe));
801 }
802
5ffd5626
JA
803 if (o->registerfiles) {
804 err = fio_ioring_register_files(td);
805 if (err) {
806 td_verror(td, errno, "ioring_register_files");
807 return 1;
808 }
809 }
810
52885fa2
JA
811 return 0;
812}
813
bffad86f 814static int fio_ioring_init(struct thread_data *td)
52885fa2 815{
5ffd5626 816 struct ioring_options *o = td->eo;
bffad86f 817 struct ioring_data *ld;
e9f6567a 818 int ret;
52885fa2 819
5ffd5626
JA
820 /* sqthread submission requires registered files */
821 if (o->sqpoll_thread)
822 o->registerfiles = 1;
823
824 if (o->registerfiles && td->o.nr_files != td->o.open_files) {
825 log_err("fio: io_uring registered files require nr_files to "
826 "be identical to open_files\n");
827 return 1;
828 }
829
52885fa2
JA
830 ld = calloc(1, sizeof(*ld));
831
b87aa01a
JA
832 /* ring depth must be a power-of-2 */
833 ld->iodepth = td->o.iodepth;
834 td->o.iodepth = roundup_pow2(td->o.iodepth);
835
52885fa2
JA
836 /* io_u index */
837 ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
650346e1 838 ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
52885fa2
JA
839
840 td->io_ops_data = ld;
b2a432bf 841
d6cbeab4 842 ret = fio_cmdprio_init(td, &ld->cmdprio, &o->cmdprio_options);
e9f6567a
DLM
843 if (ret) {
844 td_verror(td, EINVAL, "fio_ioring_init");
b2a432bf
PC
845 return 1;
846 }
1af44196 847
52885fa2
JA
848 return 0;
849}
850
bffad86f 851static int fio_ioring_io_u_init(struct thread_data *td, struct io_u *io_u)
52885fa2 852{
bffad86f 853 struct ioring_data *ld = td->io_ops_data;
52885fa2
JA
854
855 ld->io_u_index[io_u->index] = io_u;
856 return 0;
857}
858
5ffd5626
JA
859static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
860{
861 struct ioring_data *ld = td->io_ops_data;
862 struct ioring_options *o = td->eo;
863
17318cf6 864 if (!ld || !o->registerfiles)
5ffd5626
JA
865 return generic_open_file(td, f);
866
867 f->fd = ld->fds[f->engine_pos];
868 return 0;
869}
870
871static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f)
872{
17318cf6 873 struct ioring_data *ld = td->io_ops_data;
5ffd5626
JA
874 struct ioring_options *o = td->eo;
875
17318cf6 876 if (!ld || !o->registerfiles)
5ffd5626
JA
877 return generic_close_file(td, f);
878
879 f->fd = -1;
880 return 0;
881}
882
52885fa2 883static struct ioengine_ops ioengine = {
bffad86f 884 .name = "io_uring",
52885fa2 885 .version = FIO_IOOPS_VERSION,
8bfe330e 886 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD,
bffad86f
JA
887 .init = fio_ioring_init,
888 .post_init = fio_ioring_post_init,
889 .io_u_init = fio_ioring_io_u_init,
890 .prep = fio_ioring_prep,
891 .queue = fio_ioring_queue,
892 .commit = fio_ioring_commit,
893 .getevents = fio_ioring_getevents,
894 .event = fio_ioring_event,
895 .cleanup = fio_ioring_cleanup,
5ffd5626
JA
896 .open_file = fio_ioring_open_file,
897 .close_file = fio_ioring_close_file,
52885fa2
JA
898 .get_file_size = generic_get_file_size,
899 .options = options,
bffad86f 900 .option_struct_size = sizeof(struct ioring_options),
52885fa2
JA
901};
902
bffad86f 903static void fio_init fio_ioring_register(void)
52885fa2 904{
52885fa2 905 register_ioengine(&ioengine);
52885fa2
JA
906}
907
bffad86f 908static void fio_exit fio_ioring_unregister(void)
52885fa2 909{
52885fa2 910 unregister_ioengine(&ioengine);
52885fa2 911}
1f90e9bb 912#endif