engines: separate declaration and assignment
[fio.git] / engines / io_uring.c
CommitLineData
52885fa2 1/*
bffad86f 2 * io_uring engine
52885fa2 3 *
bffad86f 4 * IO engine using the new native Linux aio io_uring interface. See:
a90cd050 5 *
bffad86f 6 * http://git.kernel.dk/cgit/linux-block/log/?h=io_uring
52885fa2
JA
7 *
8 */
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
52885fa2
JA
12#include <sys/time.h>
13#include <sys/resource.h>
14
15#include "../fio.h"
16#include "../lib/pow2.h"
17#include "../optgroup.h"
18#include "../lib/memalign.h"
b87aa01a 19#include "../lib/fls.h"
6d975f2c 20#include "../lib/roundup.h"
52885fa2 21
bffad86f 22#ifdef ARCH_HAVE_IOURING
52885fa2 23
57fa61f0 24#include "../lib/types.h"
f3e769a4 25#include "../os/linux/io_uring.h"
e9f6567a 26#include "cmdprio.h"
16be6037 27#include "zbd.h"
855dc4d4
AG
28#include "nvme.h"
29
30#include <sys/stat.h>
31
32enum uring_cmd_type {
33 FIO_URING_CMD_NVME = 1,
34};
9a2d78b3 35
bffad86f 36struct io_sq_ring {
e2239016
JA
37 unsigned *head;
38 unsigned *tail;
39 unsigned *ring_mask;
40 unsigned *ring_entries;
41 unsigned *flags;
42 unsigned *array;
52885fa2
JA
43};
44
bffad86f 45struct io_cq_ring {
e2239016
JA
46 unsigned *head;
47 unsigned *tail;
48 unsigned *ring_mask;
49 unsigned *ring_entries;
f0403f94 50 struct io_uring_cqe *cqes;
9a2d78b3
JA
51};
52
bffad86f 53struct ioring_mmap {
9a2d78b3
JA
54 void *ptr;
55 size_t len;
52885fa2
JA
56};
57
bffad86f 58struct ioring_data {
9a2d78b3
JA
59 int ring_fd;
60
52885fa2
JA
61 struct io_u **io_u_index;
62
5ffd5626
JA
63 int *fds;
64
bffad86f 65 struct io_sq_ring sq_ring;
f0403f94 66 struct io_uring_sqe *sqes;
9a2d78b3 67 struct iovec *iovecs;
b87aa01a 68 unsigned sq_ring_mask;
52885fa2 69
bffad86f 70 struct io_cq_ring cq_ring;
b87aa01a 71 unsigned cq_ring_mask;
52885fa2
JA
72
73 int queued;
74 int cq_ring_off;
b87aa01a 75 unsigned iodepth;
5a59a81d 76 int prepped;
96563db9 77
bffad86f 78 struct ioring_mmap mmap[3];
d6cbeab4
NC
79
80 struct cmdprio cmdprio;
52885fa2
JA
81};
82
bffad86f 83struct ioring_options {
a48f0cc7 84 struct thread_data *td;
52885fa2 85 unsigned int hipri;
d6cbeab4 86 struct cmdprio_options cmdprio_options;
52885fa2 87 unsigned int fixedbufs;
5ffd5626 88 unsigned int registerfiles;
3d7d00a3 89 unsigned int sqpoll_thread;
2ea53ca3
JA
90 unsigned int sqpoll_set;
91 unsigned int sqpoll_cpu;
b10b1e70 92 unsigned int nonvectored;
4a87b584 93 unsigned int uncached;
7d42e66e 94 unsigned int nowait;
5a59a81d 95 unsigned int force_async;
855dc4d4 96 enum uring_cmd_type cmd_type;
52885fa2
JA
97};
98
b10b1e70
JA
99static const int ddir_to_op[2][2] = {
100 { IORING_OP_READV, IORING_OP_READ },
101 { IORING_OP_WRITEV, IORING_OP_WRITE }
102};
103
3f1e3af7
KB
104static const int fixed_ddir_to_op[2] = {
105 IORING_OP_READ_FIXED,
106 IORING_OP_WRITE_FIXED
107};
108
2ea53ca3 109static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
a90cd050 110{
bffad86f 111 struct ioring_options *o = data;
a90cd050 112
2ea53ca3
JA
113 o->sqpoll_cpu = *val;
114 o->sqpoll_set = 1;
a90cd050
JA
115 return 0;
116}
117
52885fa2
JA
118static struct fio_option options[] = {
119 {
120 .name = "hipri",
121 .lname = "High Priority",
122 .type = FIO_OPT_STR_SET,
bffad86f 123 .off1 = offsetof(struct ioring_options, hipri),
52885fa2
JA
124 .help = "Use polled IO completions",
125 .category = FIO_OPT_C_ENGINE,
27f436d9 126 .group = FIO_OPT_G_IOURING,
52885fa2 127 },
b2a432bf
PC
128#ifdef FIO_HAVE_IOPRIO_CLASS
129 {
130 .name = "cmdprio_percentage",
131 .lname = "high priority percentage",
132 .type = FIO_OPT_INT,
e9f6567a 133 .off1 = offsetof(struct ioring_options,
d6cbeab4 134 cmdprio_options.percentage[DDIR_READ]),
e9f6567a 135 .off2 = offsetof(struct ioring_options,
d6cbeab4 136 cmdprio_options.percentage[DDIR_WRITE]),
e9f6567a 137 .minval = 0,
b2a432bf
PC
138 .maxval = 100,
139 .help = "Send high priority I/O this percentage of the time",
140 .category = FIO_OPT_C_ENGINE,
141 .group = FIO_OPT_G_IOURING,
142 },
12f9d54a
DLM
143 {
144 .name = "cmdprio_class",
145 .lname = "Asynchronous I/O priority class",
146 .type = FIO_OPT_INT,
147 .off1 = offsetof(struct ioring_options,
d6cbeab4 148 cmdprio_options.class[DDIR_READ]),
12f9d54a 149 .off2 = offsetof(struct ioring_options,
d6cbeab4 150 cmdprio_options.class[DDIR_WRITE]),
12f9d54a
DLM
151 .help = "Set asynchronous IO priority class",
152 .minval = IOPRIO_MIN_PRIO_CLASS + 1,
153 .maxval = IOPRIO_MAX_PRIO_CLASS,
154 .interval = 1,
155 .category = FIO_OPT_C_ENGINE,
156 .group = FIO_OPT_G_IOURING,
157 },
158 {
159 .name = "cmdprio",
160 .lname = "Asynchronous I/O priority level",
161 .type = FIO_OPT_INT,
162 .off1 = offsetof(struct ioring_options,
d6cbeab4 163 cmdprio_options.level[DDIR_READ]),
12f9d54a 164 .off2 = offsetof(struct ioring_options,
d6cbeab4 165 cmdprio_options.level[DDIR_WRITE]),
12f9d54a
DLM
166 .help = "Set asynchronous IO priority level",
167 .minval = IOPRIO_MIN_PRIO,
168 .maxval = IOPRIO_MAX_PRIO,
169 .interval = 1,
170 .category = FIO_OPT_C_ENGINE,
171 .group = FIO_OPT_G_IOURING,
172 },
a48f0cc7
DLM
173 {
174 .name = "cmdprio_bssplit",
175 .lname = "Priority percentage block size split",
d6cbeab4
NC
176 .type = FIO_OPT_STR_STORE,
177 .off1 = offsetof(struct ioring_options,
178 cmdprio_options.bssplit_str),
a48f0cc7
DLM
179 .help = "Set priority percentages for different block sizes",
180 .category = FIO_OPT_C_ENGINE,
181 .group = FIO_OPT_G_IOURING,
182 },
b2a432bf
PC
183#else
184 {
185 .name = "cmdprio_percentage",
186 .lname = "high priority percentage",
187 .type = FIO_OPT_UNSUPPORTED,
188 .help = "Your platform does not support I/O priority classes",
189 },
12f9d54a
DLM
190 {
191 .name = "cmdprio_class",
192 .lname = "Asynchronous I/O priority class",
193 .type = FIO_OPT_UNSUPPORTED,
194 .help = "Your platform does not support I/O priority classes",
195 },
196 {
197 .name = "cmdprio",
198 .lname = "Asynchronous I/O priority level",
199 .type = FIO_OPT_UNSUPPORTED,
200 .help = "Your platform does not support I/O priority classes",
201 },
a48f0cc7
DLM
202 {
203 .name = "cmdprio_bssplit",
204 .lname = "Priority percentage block size split",
205 .type = FIO_OPT_UNSUPPORTED,
206 .help = "Your platform does not support I/O priority classes",
207 },
b2a432bf 208#endif
52885fa2
JA
209 {
210 .name = "fixedbufs",
211 .lname = "Fixed (pre-mapped) IO buffers",
212 .type = FIO_OPT_STR_SET,
bffad86f 213 .off1 = offsetof(struct ioring_options, fixedbufs),
52885fa2
JA
214 .help = "Pre map IO buffers",
215 .category = FIO_OPT_C_ENGINE,
27f436d9 216 .group = FIO_OPT_G_IOURING,
52885fa2 217 },
5ffd5626
JA
218 {
219 .name = "registerfiles",
220 .lname = "Register file set",
221 .type = FIO_OPT_STR_SET,
222 .off1 = offsetof(struct ioring_options, registerfiles),
223 .help = "Pre-open/register files",
224 .category = FIO_OPT_C_ENGINE,
27f436d9 225 .group = FIO_OPT_G_IOURING,
5ffd5626 226 },
771c9901
JA
227 {
228 .name = "sqthread_poll",
3d7d00a3 229 .lname = "Kernel SQ thread polling",
d6f936d1 230 .type = FIO_OPT_STR_SET,
3d7d00a3
JA
231 .off1 = offsetof(struct ioring_options, sqpoll_thread),
232 .help = "Offload submission/completion to kernel thread",
233 .category = FIO_OPT_C_ENGINE,
27f436d9 234 .group = FIO_OPT_G_IOURING,
3d7d00a3
JA
235 },
236 {
237 .name = "sqthread_poll_cpu",
238 .lname = "SQ Thread Poll CPU",
2ea53ca3
JA
239 .type = FIO_OPT_INT,
240 .cb = fio_ioring_sqpoll_cb,
3d7d00a3 241 .help = "What CPU to run SQ thread polling on",
a90cd050 242 .category = FIO_OPT_C_ENGINE,
27f436d9 243 .group = FIO_OPT_G_IOURING,
a90cd050 244 },
b10b1e70
JA
245 {
246 .name = "nonvectored",
247 .lname = "Non-vectored",
248 .type = FIO_OPT_INT,
249 .off1 = offsetof(struct ioring_options, nonvectored),
556d8415 250 .def = "-1",
b10b1e70
JA
251 .help = "Use non-vectored read/write commands",
252 .category = FIO_OPT_C_ENGINE,
253 .group = FIO_OPT_G_IOURING,
254 },
4a87b584
JA
255 {
256 .name = "uncached",
257 .lname = "Uncached",
258 .type = FIO_OPT_INT,
259 .off1 = offsetof(struct ioring_options, uncached),
260 .help = "Use RWF_UNCACHED for buffered read/writes",
261 .category = FIO_OPT_C_ENGINE,
262 .group = FIO_OPT_G_IOURING,
263 },
7d42e66e
KK
264 {
265 .name = "nowait",
266 .lname = "RWF_NOWAIT",
267 .type = FIO_OPT_BOOL,
268 .off1 = offsetof(struct ioring_options, nowait),
269 .help = "Use RWF_NOWAIT for reads/writes",
270 .category = FIO_OPT_C_ENGINE,
271 .group = FIO_OPT_G_IOURING,
272 },
5a59a81d
JA
273 {
274 .name = "force_async",
275 .lname = "Force async",
276 .type = FIO_OPT_INT,
277 .off1 = offsetof(struct ioring_options, force_async),
278 .help = "Set IOSQE_ASYNC every N requests",
279 .category = FIO_OPT_C_ENGINE,
280 .group = FIO_OPT_G_IOURING,
281 },
855dc4d4
AG
282 {
283 .name = "cmd_type",
284 .lname = "Uring cmd type",
285 .type = FIO_OPT_STR,
286 .off1 = offsetof(struct ioring_options, cmd_type),
287 .help = "Specify uring-cmd type",
288 .def = "nvme",
289 .posval = {
290 { .ival = "nvme",
291 .oval = FIO_URING_CMD_NVME,
292 .help = "Issue nvme-uring-cmd",
293 },
294 },
295 .category = FIO_OPT_C_ENGINE,
296 .group = FIO_OPT_G_IOURING,
297 },
52885fa2
JA
298 {
299 .name = NULL,
300 },
301};
302
bffad86f 303static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit,
52885fa2
JA
304 unsigned int min_complete, unsigned int flags)
305{
c377f4f8
JA
306#ifdef FIO_ARCH_HAS_SYSCALL
307 return __do_syscall6(__NR_io_uring_enter, ld->ring_fd, to_submit,
308 min_complete, flags, NULL, 0);
309#else
bfed648c 310 return syscall(__NR_io_uring_enter, ld->ring_fd, to_submit,
521164fa 311 min_complete, flags, NULL, 0);
c377f4f8 312#endif
52885fa2
JA
313}
314
bffad86f 315static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
52885fa2 316{
bffad86f 317 struct ioring_data *ld = td->io_ops_data;
cfcc8564 318 struct ioring_options *o = td->eo;
52885fa2 319 struct fio_file *f = io_u->file;
f0403f94 320 struct io_uring_sqe *sqe;
52885fa2 321
f0403f94 322 sqe = &ld->sqes[io_u->index];
34d6090e 323
5ffd5626
JA
324 if (o->registerfiles) {
325 sqe->fd = f->engine_pos;
326 sqe->flags = IOSQE_FIXED_FILE;
327 } else {
328 sqe->fd = f->fd;
87b69ef2 329 sqe->flags = 0;
5ffd5626 330 }
52885fa2 331
e3970057 332 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
f0403f94 333 if (o->fixedbufs) {
3f1e3af7 334 sqe->opcode = fixed_ddir_to_op[io_u->ddir];
919850d2 335 sqe->addr = (unsigned long) io_u->xfer_buf;
f0403f94 336 sqe->len = io_u->xfer_buflen;
2ea53ca3 337 sqe->buf_index = io_u->index;
cfcc8564 338 } else {
832faaaf
JA
339 struct iovec *iov = &ld->iovecs[io_u->index];
340
341 /*
342 * Update based on actual io_u, requeue could have
343 * adjusted these
344 */
345 iov->iov_base = io_u->xfer_buf;
346 iov->iov_len = io_u->xfer_buflen;
347
3f1e3af7 348 sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
b10b1e70 349 if (o->nonvectored) {
832faaaf
JA
350 sqe->addr = (unsigned long) iov->iov_base;
351 sqe->len = iov->iov_len;
b10b1e70 352 } else {
832faaaf 353 sqe->addr = (unsigned long) iov;
b10b1e70
JA
354 sqe->len = 1;
355 }
cfcc8564 356 }
fd70e361 357 sqe->rw_flags = 0;
4a87b584 358 if (!td->o.odirect && o->uncached)
fd70e361 359 sqe->rw_flags |= RWF_UNCACHED;
7d42e66e
KK
360 if (o->nowait)
361 sqe->rw_flags |= RWF_NOWAIT;
8ff6b289
NC
362
363 /*
364 * Since io_uring can have a submission context (sqthread_poll)
365 * that is different from the process context, we cannot rely on
366 * the IO priority set by ioprio_set() (option prio/prioclass)
367 * to be inherited.
368 * td->ioprio will have the value of the "default prio", so set
369 * this unconditionally. This value might get overridden by
ff00f247 370 * fio_ioring_cmdprio_prep() if the option cmdprio_percentage or
8ff6b289
NC
371 * cmdprio_bssplit is used.
372 */
373 sqe->ioprio = td->ioprio;
f0403f94 374 sqe->off = io_u->offset;
48e698fa 375 } else if (ddir_sync(io_u->ddir)) {
7c70f506 376 sqe->ioprio = 0;
01387bfe
AF
377 if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
378 sqe->off = f->first_write;
379 sqe->len = f->last_write - f->first_write;
380 sqe->sync_range_flags = td->o.sync_file_range;
381 sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
382 } else {
7c70f506
JA
383 sqe->off = 0;
384 sqe->addr = 0;
385 sqe->len = 0;
01387bfe
AF
386 if (io_u->ddir == DDIR_DATASYNC)
387 sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
388 sqe->opcode = IORING_OP_FSYNC;
389 }
48e698fa 390 }
52885fa2 391
5a59a81d
JA
392 if (o->force_async && ++ld->prepped == o->force_async) {
393 ld->prepped = 0;
394 sqe->flags |= IOSQE_ASYNC;
395 }
396
48e698fa 397 sqe->user_data = (unsigned long) io_u;
52885fa2
JA
398 return 0;
399}
400
855dc4d4
AG
401static int fio_ioring_cmd_prep(struct thread_data *td, struct io_u *io_u)
402{
403 struct ioring_data *ld = td->io_ops_data;
404 struct ioring_options *o = td->eo;
405 struct fio_file *f = io_u->file;
3ce6a3de 406 struct nvme_uring_cmd *cmd;
855dc4d4 407 struct io_uring_sqe *sqe;
855dc4d4 408
3ce6a3de
JA
409 /* only supports nvme_uring_cmd */
410 if (o->cmd_type != FIO_URING_CMD_NVME)
411 return -EINVAL;
855dc4d4 412
16be6037
AK
413 if (io_u->ddir == DDIR_TRIM)
414 return 0;
415
3ce6a3de 416 sqe = &ld->sqes[(io_u->index) << 1];
855dc4d4 417
3ce6a3de
JA
418 if (o->registerfiles) {
419 sqe->fd = f->engine_pos;
420 sqe->flags = IOSQE_FIXED_FILE;
421 } else {
422 sqe->fd = f->fd;
423 }
424 sqe->rw_flags = 0;
425 if (!td->o.odirect && o->uncached)
426 sqe->rw_flags |= RWF_UNCACHED;
427 if (o->nowait)
428 sqe->rw_flags |= RWF_NOWAIT;
855dc4d4 429
3ce6a3de
JA
430 sqe->opcode = IORING_OP_URING_CMD;
431 sqe->user_data = (unsigned long) io_u;
432 if (o->nonvectored)
433 sqe->cmd_op = NVME_URING_CMD_IO;
434 else
435 sqe->cmd_op = NVME_URING_CMD_IO_VEC;
436 if (o->force_async && ++ld->prepped == o->force_async) {
437 ld->prepped = 0;
438 sqe->flags |= IOSQE_ASYNC;
855dc4d4 439 }
0ebd3bf6
AG
440 if (o->fixedbufs) {
441 sqe->uring_cmd_flags = IORING_URING_CMD_FIXED;
442 sqe->buf_index = io_u->index;
443 }
3ce6a3de
JA
444
445 cmd = (struct nvme_uring_cmd *)sqe->cmd;
446 return fio_nvme_uring_cmd_prep(cmd, io_u,
447 o->nonvectored ? NULL : &ld->iovecs[io_u->index]);
855dc4d4
AG
448}
449
bffad86f 450static struct io_u *fio_ioring_event(struct thread_data *td, int event)
52885fa2 451{
bffad86f 452 struct ioring_data *ld = td->io_ops_data;
f0403f94 453 struct io_uring_cqe *cqe;
52885fa2 454 struct io_u *io_u;
b87aa01a 455 unsigned index;
52885fa2 456
b87aa01a 457 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
52885fa2 458
f0403f94 459 cqe = &ld->cq_ring.cqes[index];
e3466352 460 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
52885fa2 461
f0403f94
JA
462 if (cqe->res != io_u->xfer_buflen) {
463 if (cqe->res > io_u->xfer_buflen)
464 io_u->error = -cqe->res;
52885fa2 465 else
f0403f94 466 io_u->resid = io_u->xfer_buflen - cqe->res;
52885fa2
JA
467 } else
468 io_u->error = 0;
469
470 return io_u;
471}
472
855dc4d4
AG
473static struct io_u *fio_ioring_cmd_event(struct thread_data *td, int event)
474{
475 struct ioring_data *ld = td->io_ops_data;
476 struct ioring_options *o = td->eo;
477 struct io_uring_cqe *cqe;
478 struct io_u *io_u;
479 unsigned index;
480
481 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
482 if (o->cmd_type == FIO_URING_CMD_NVME)
483 index <<= 1;
484
485 cqe = &ld->cq_ring.cqes[index];
486 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
487
488 if (cqe->res != 0)
489 io_u->error = -cqe->res;
490 else
491 io_u->error = 0;
492
493 return io_u;
494}
495
bffad86f 496static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events,
52885fa2
JA
497 unsigned int max)
498{
bffad86f
JA
499 struct ioring_data *ld = td->io_ops_data;
500 struct io_cq_ring *ring = &ld->cq_ring;
e2239016 501 unsigned head, reaped = 0;
52885fa2 502
9a2d78b3 503 head = *ring->head;
52885fa2 504 do {
9e26aff9 505 if (head == atomic_load_acquire(ring->tail))
52885fa2
JA
506 break;
507 reaped++;
508 head++;
52885fa2
JA
509 } while (reaped + events < max);
510
76ce63dd
AB
511 if (reaped)
512 atomic_store_release(ring->head, head);
513
52885fa2
JA
514 return reaped;
515}
516
bffad86f
JA
517static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
518 unsigned int max, const struct timespec *t)
52885fa2 519{
bffad86f 520 struct ioring_data *ld = td->io_ops_data;
52885fa2 521 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
bffad86f
JA
522 struct ioring_options *o = td->eo;
523 struct io_cq_ring *ring = &ld->cq_ring;
b87aa01a
JA
524 unsigned events = 0;
525 int r;
52885fa2 526
9a2d78b3 527 ld->cq_ring_off = *ring->head;
52885fa2 528 do {
bffad86f 529 r = fio_ioring_cqring_reap(td, events, max);
52885fa2
JA
530 if (r) {
531 events += r;
ae8646a1 532 max -= r;
f7cbbbf8
ST
533 if (actual_min != 0)
534 actual_min -= r;
52885fa2
JA
535 continue;
536 }
537
3d7d00a3 538 if (!o->sqpoll_thread) {
9a2d78b3
JA
539 r = io_uring_enter(ld, 0, actual_min,
540 IORING_ENTER_GETEVENTS);
771c9901 541 if (r < 0) {
f6abd731 542 if (errno == EAGAIN || errno == EINTR)
771c9901 543 continue;
1816895b 544 r = -errno;
9a2d78b3 545 td_verror(td, errno, "io_uring_enter");
771c9901
JA
546 break;
547 }
52885fa2
JA
548 }
549 } while (events < min);
550
551 return r < 0 ? r : events;
552}
553
127715b6
NC
554static inline void fio_ioring_cmdprio_prep(struct thread_data *td,
555 struct io_u *io_u)
b2a432bf 556{
b2a432bf 557 struct ioring_data *ld = td->io_ops_data;
d6cbeab4 558 struct cmdprio *cmdprio = &ld->cmdprio;
127715b6
NC
559
560 if (fio_cmdprio_set_ioprio(td, cmdprio, io_u))
561 ld->sqes[io_u->index].ioprio = io_u->ioprio;
b2a432bf
PC
562}
563
16be6037
AK
564static int fio_ioring_cmd_io_u_trim(const struct thread_data *td,
565 struct io_u *io_u)
566{
567 struct fio_file *f = io_u->file;
568 int ret;
569
570 if (td->o.zone_mode == ZONE_MODE_ZBD) {
571 ret = zbd_do_io_u_trim(td, io_u);
572 if (ret == io_u_completed)
573 return io_u->xfer_buflen;
574 if (ret)
575 goto err;
576 }
577
578 return fio_nvme_trim(td, f, io_u->offset, io_u->xfer_buflen);
579
580err:
581 io_u->error = ret;
582 return 0;
583}
584
bffad86f
JA
585static enum fio_q_status fio_ioring_queue(struct thread_data *td,
586 struct io_u *io_u)
52885fa2 587{
bffad86f
JA
588 struct ioring_data *ld = td->io_ops_data;
589 struct io_sq_ring *ring = &ld->sq_ring;
52885fa2
JA
590 unsigned tail, next_tail;
591
592 fio_ro_check(td, io_u);
593
b87aa01a 594 if (ld->queued == ld->iodepth)
52885fa2
JA
595 return FIO_Q_BUSY;
596
52885fa2
JA
597 if (io_u->ddir == DDIR_TRIM) {
598 if (ld->queued)
599 return FIO_Q_BUSY;
600
16be6037
AK
601 if (!strcmp(td->io_ops->name, "io_uring_cmd"))
602 fio_ioring_cmd_io_u_trim(td, io_u);
603 else
604 do_io_u_trim(td, io_u);
605
52885fa2
JA
606 io_u_mark_submit(td, 1);
607 io_u_mark_complete(td, 1);
608 return FIO_Q_COMPLETED;
609 }
610
9a2d78b3 611 tail = *ring->tail;
52885fa2 612 next_tail = tail + 1;
9e26aff9 613 if (next_tail == atomic_load_acquire(ring->head))
52885fa2
JA
614 return FIO_Q_BUSY;
615
d6cbeab4 616 if (ld->cmdprio.mode != CMDPRIO_MODE_NONE)
ff00f247
NC
617 fio_ioring_cmdprio_prep(td, io_u);
618
b87aa01a 619 ring->array[tail & ld->sq_ring_mask] = io_u->index;
9e26aff9 620 atomic_store_release(ring->tail, next_tail);
52885fa2
JA
621
622 ld->queued++;
623 return FIO_Q_QUEUED;
624}
625
bffad86f 626static void fio_ioring_queued(struct thread_data *td, int start, int nr)
52885fa2 627{
bffad86f 628 struct ioring_data *ld = td->io_ops_data;
52885fa2
JA
629 struct timespec now;
630
631 if (!fio_fill_issue_time(td))
632 return;
633
634 fio_gettime(&now, NULL);
635
636 while (nr--) {
bffad86f 637 struct io_sq_ring *ring = &ld->sq_ring;
9a2d78b3 638 int index = ring->array[start & ld->sq_ring_mask];
f8289afc 639 struct io_u *io_u = ld->io_u_index[index];
52885fa2
JA
640
641 memcpy(&io_u->issue_time, &now, sizeof(now));
642 io_u_queued(td, io_u);
643
644 start++;
52885fa2 645 }
39f56400
VF
646
647 /*
648 * only used for iolog
649 */
650 if (td->o.read_iolog_file)
651 memcpy(&td->last_issue, &now, sizeof(now));
52885fa2
JA
652}
653
bffad86f 654static int fio_ioring_commit(struct thread_data *td)
52885fa2 655{
bffad86f
JA
656 struct ioring_data *ld = td->io_ops_data;
657 struct ioring_options *o = td->eo;
52885fa2
JA
658 int ret;
659
660 if (!ld->queued)
661 return 0;
662
3d7d00a3
JA
663 /*
664 * Kernel side does submission. just need to check if the ring is
665 * flagged as needing a kick, if so, call io_uring_enter(). This
666 * only happens if we've been idle too long.
667 */
668 if (o->sqpoll_thread) {
bffad86f 669 struct io_sq_ring *ring = &ld->sq_ring;
c011bf12 670 unsigned start = *ld->sq_ring.head;
2dd96cc4 671 unsigned flags;
4cdbc048 672
2dd96cc4
JA
673 flags = atomic_load_acquire(ring->flags);
674 if (flags & IORING_SQ_NEED_WAKEUP)
b532dd6d
JA
675 io_uring_enter(ld, ld->queued, 0,
676 IORING_ENTER_SQ_WAKEUP);
c011bf12
AK
677 fio_ioring_queued(td, start, ld->queued);
678 io_u_mark_submit(td, ld->queued);
679
771c9901
JA
680 ld->queued = 0;
681 return 0;
682 }
683
52885fa2 684 do {
9a2d78b3 685 unsigned start = *ld->sq_ring.head;
52885fa2
JA
686 long nr = ld->queued;
687
9a2d78b3 688 ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS);
52885fa2 689 if (ret > 0) {
bffad86f 690 fio_ioring_queued(td, start, ret);
52885fa2
JA
691 io_u_mark_submit(td, ret);
692
693 ld->queued -= ret;
694 ret = 0;
a90cd050
JA
695 } else if (!ret) {
696 io_u_mark_submit(td, ret);
52885fa2 697 continue;
a90cd050 698 } else {
f6abd731 699 if (errno == EAGAIN || errno == EINTR) {
bffad86f 700 ret = fio_ioring_cqring_reap(td, 0, ld->queued);
a90cd050
JA
701 if (ret)
702 continue;
703 /* Shouldn't happen */
704 usleep(1);
705 continue;
52885fa2 706 }
1816895b 707 ret = -errno;
9a2d78b3 708 td_verror(td, errno, "io_uring_enter submit");
52885fa2 709 break;
a90cd050 710 }
52885fa2
JA
711 } while (ld->queued);
712
713 return ret;
714}
715
bffad86f 716static void fio_ioring_unmap(struct ioring_data *ld)
52885fa2 717{
9a2d78b3 718 int i;
52885fa2 719
59f94d26 720 for (i = 0; i < FIO_ARRAY_SIZE(ld->mmap); i++)
9a2d78b3
JA
721 munmap(ld->mmap[i].ptr, ld->mmap[i].len);
722 close(ld->ring_fd);
b87aa01a
JA
723}
724
bffad86f 725static void fio_ioring_cleanup(struct thread_data *td)
52885fa2 726{
bffad86f 727 struct ioring_data *ld = td->io_ops_data;
52885fa2
JA
728
729 if (ld) {
52885fa2 730 if (!(td->flags & TD_F_CHILD))
bffad86f 731 fio_ioring_unmap(ld);
9a2d78b3 732
d6cbeab4 733 fio_cmdprio_cleanup(&ld->cmdprio);
52885fa2 734 free(ld->io_u_index);
9a2d78b3 735 free(ld->iovecs);
5ffd5626 736 free(ld->fds);
52885fa2
JA
737 free(ld);
738 }
739}
740
bffad86f 741static int fio_ioring_mmap(struct ioring_data *ld, struct io_uring_params *p)
9a2d78b3 742{
bffad86f
JA
743 struct io_sq_ring *sring = &ld->sq_ring;
744 struct io_cq_ring *cring = &ld->cq_ring;
9a2d78b3
JA
745 void *ptr;
746
e2239016 747 ld->mmap[0].len = p->sq_off.array + p->sq_entries * sizeof(__u32);
9a2d78b3
JA
748 ptr = mmap(0, ld->mmap[0].len, PROT_READ | PROT_WRITE,
749 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
750 IORING_OFF_SQ_RING);
751 ld->mmap[0].ptr = ptr;
752 sring->head = ptr + p->sq_off.head;
753 sring->tail = ptr + p->sq_off.tail;
754 sring->ring_mask = ptr + p->sq_off.ring_mask;
755 sring->ring_entries = ptr + p->sq_off.ring_entries;
756 sring->flags = ptr + p->sq_off.flags;
ac122fea 757 sring->array = ptr + p->sq_off.array;
9a2d78b3
JA
758 ld->sq_ring_mask = *sring->ring_mask;
759
855dc4d4
AG
760 if (p->flags & IORING_SETUP_SQE128)
761 ld->mmap[1].len = 2 * p->sq_entries * sizeof(struct io_uring_sqe);
762 else
763 ld->mmap[1].len = p->sq_entries * sizeof(struct io_uring_sqe);
f0403f94 764 ld->sqes = mmap(0, ld->mmap[1].len, PROT_READ | PROT_WRITE,
9a2d78b3 765 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
f0403f94
JA
766 IORING_OFF_SQES);
767 ld->mmap[1].ptr = ld->sqes;
9a2d78b3 768
855dc4d4
AG
769 if (p->flags & IORING_SETUP_CQE32) {
770 ld->mmap[2].len = p->cq_off.cqes +
771 2 * p->cq_entries * sizeof(struct io_uring_cqe);
772 } else {
773 ld->mmap[2].len = p->cq_off.cqes +
774 p->cq_entries * sizeof(struct io_uring_cqe);
775 }
9a2d78b3
JA
776 ptr = mmap(0, ld->mmap[2].len, PROT_READ | PROT_WRITE,
777 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
778 IORING_OFF_CQ_RING);
779 ld->mmap[2].ptr = ptr;
780 cring->head = ptr + p->cq_off.head;
781 cring->tail = ptr + p->cq_off.tail;
782 cring->ring_mask = ptr + p->cq_off.ring_mask;
783 cring->ring_entries = ptr + p->cq_off.ring_entries;
f0403f94 784 cring->cqes = ptr + p->cq_off.cqes;
9a2d78b3
JA
785 ld->cq_ring_mask = *cring->ring_mask;
786 return 0;
787}
788
556d8415
JA
789static void fio_ioring_probe(struct thread_data *td)
790{
791 struct ioring_data *ld = td->io_ops_data;
792 struct ioring_options *o = td->eo;
793 struct io_uring_probe *p;
794 int ret;
795
796 /* already set by user, don't touch */
797 if (o->nonvectored != -1)
798 return;
799
800 /* default to off, as that's always safe */
801 o->nonvectored = 0;
802
803 p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
804 if (!p)
805 return;
806
807 memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
808 ret = syscall(__NR_io_uring_register, ld->ring_fd,
809 IORING_REGISTER_PROBE, p, 256);
810 if (ret < 0)
811 goto out;
812
813 if (IORING_OP_WRITE > p->ops_len)
814 goto out;
815
816 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED) &&
817 (p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED))
818 o->nonvectored = 1;
819out:
820 free(p);
821}
822
bffad86f 823static int fio_ioring_queue_init(struct thread_data *td)
52885fa2 824{
bffad86f
JA
825 struct ioring_data *ld = td->io_ops_data;
826 struct ioring_options *o = td->eo;
52885fa2 827 int depth = td->o.iodepth;
bffad86f 828 struct io_uring_params p;
9a2d78b3
JA
829 int ret;
830
831 memset(&p, 0, sizeof(p));
52885fa2
JA
832
833 if (o->hipri)
bffad86f 834 p.flags |= IORING_SETUP_IOPOLL;
3d7d00a3
JA
835 if (o->sqpoll_thread) {
836 p.flags |= IORING_SETUP_SQPOLL;
837 if (o->sqpoll_set) {
838 p.flags |= IORING_SETUP_SQ_AFF;
839 p.sq_thread_cpu = o->sqpoll_cpu;
840 }
c011bf12
AK
841
842 /*
843 * Submission latency for sqpoll_thread is just the time it
844 * takes to fill in the SQ ring entries, and any syscall if
845 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
846 * separately.
847 */
848 td->o.disable_slat = 1;
f635f1fb 849 }
a90cd050 850
1db268db
JA
851 /*
852 * Clamp CQ ring size at our SQ ring size, we don't need more entries
853 * than that.
854 */
855 p.flags |= IORING_SETUP_CQSIZE;
856 p.cq_entries = depth;
857
4d22c103
JA
858 /*
859 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
860 * completing IO operations.
861 */
862 p.flags |= IORING_SETUP_COOP_TASKRUN;
863
e453f369
JA
864 /*
865 * io_uring is always a single issuer, and we can defer task_work
866 * runs until we reap events.
867 */
868 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
869
b5e99df6 870retry:
bfed648c 871 ret = syscall(__NR_io_uring_setup, depth, &p);
b5e99df6 872 if (ret < 0) {
e453f369
JA
873 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
874 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
875 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
876 goto retry;
877 }
4d22c103
JA
878 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
879 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
880 goto retry;
881 }
b5e99df6
JA
882 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
883 p.flags &= ~IORING_SETUP_CQSIZE;
884 goto retry;
885 }
9a2d78b3 886 return ret;
b5e99df6 887 }
9a2d78b3
JA
888
889 ld->ring_fd = ret;
2ea53ca3 890
556d8415
JA
891 fio_ioring_probe(td);
892
2ea53ca3 893 if (o->fixedbufs) {
bfed648c 894 ret = syscall(__NR_io_uring_register, ld->ring_fd,
919850d2 895 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
2ea53ca3
JA
896 if (ret < 0)
897 return ret;
898 }
899
bffad86f 900 return fio_ioring_mmap(ld, &p);
52885fa2
JA
901}
902
855dc4d4
AG
903static int fio_ioring_cmd_queue_init(struct thread_data *td)
904{
905 struct ioring_data *ld = td->io_ops_data;
906 struct ioring_options *o = td->eo;
907 int depth = td->o.iodepth;
908 struct io_uring_params p;
909 int ret;
910
911 memset(&p, 0, sizeof(p));
912
913 if (o->hipri)
914 p.flags |= IORING_SETUP_IOPOLL;
915 if (o->sqpoll_thread) {
916 p.flags |= IORING_SETUP_SQPOLL;
917 if (o->sqpoll_set) {
918 p.flags |= IORING_SETUP_SQ_AFF;
919 p.sq_thread_cpu = o->sqpoll_cpu;
920 }
c011bf12
AK
921
922 /*
923 * Submission latency for sqpoll_thread is just the time it
924 * takes to fill in the SQ ring entries, and any syscall if
925 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
926 * separately.
927 */
928 td->o.disable_slat = 1;
855dc4d4
AG
929 }
930 if (o->cmd_type == FIO_URING_CMD_NVME) {
931 p.flags |= IORING_SETUP_SQE128;
932 p.flags |= IORING_SETUP_CQE32;
933 }
934
935 /*
936 * Clamp CQ ring size at our SQ ring size, we don't need more entries
937 * than that.
938 */
939 p.flags |= IORING_SETUP_CQSIZE;
940 p.cq_entries = depth;
941
07f78c37
AK
942 /*
943 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
944 * completing IO operations.
945 */
946 p.flags |= IORING_SETUP_COOP_TASKRUN;
947
948 /*
949 * io_uring is always a single issuer, and we can defer task_work
950 * runs until we reap events.
951 */
952 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
953
855dc4d4
AG
954retry:
955 ret = syscall(__NR_io_uring_setup, depth, &p);
956 if (ret < 0) {
07f78c37
AK
957 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
958 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
959 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
960 goto retry;
961 }
962 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
963 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
964 goto retry;
965 }
855dc4d4
AG
966 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
967 p.flags &= ~IORING_SETUP_CQSIZE;
968 goto retry;
969 }
970 return ret;
971 }
972
973 ld->ring_fd = ret;
974
975 fio_ioring_probe(td);
976
977 if (o->fixedbufs) {
978 ret = syscall(__NR_io_uring_register, ld->ring_fd,
979 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
980 if (ret < 0)
981 return ret;
982 }
983
984 return fio_ioring_mmap(ld, &p);
985}
986
5ffd5626
JA
987static int fio_ioring_register_files(struct thread_data *td)
988{
989 struct ioring_data *ld = td->io_ops_data;
990 struct fio_file *f;
991 unsigned int i;
992 int ret;
993
994 ld->fds = calloc(td->o.nr_files, sizeof(int));
995
996 for_each_file(td, f, i) {
997 ret = generic_open_file(td, f);
998 if (ret)
999 goto err;
1000 ld->fds[i] = f->fd;
1001 f->engine_pos = i;
1002 }
1003
bfed648c 1004 ret = syscall(__NR_io_uring_register, ld->ring_fd,
5ffd5626
JA
1005 IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
1006 if (ret) {
1007err:
1008 free(ld->fds);
1009 ld->fds = NULL;
1010 }
1011
1012 /*
1013 * Pretend the file is closed again, and really close it if we hit
1014 * an error.
1015 */
1016 for_each_file(td, f, i) {
1017 if (ret) {
1018 int fio_unused ret2;
1019 ret2 = generic_close_file(td, f);
1020 } else
1021 f->fd = -1;
1022 }
1023
1024 return ret;
1025}
1026
bffad86f 1027static int fio_ioring_post_init(struct thread_data *td)
52885fa2 1028{
bffad86f 1029 struct ioring_data *ld = td->io_ops_data;
5ffd5626 1030 struct ioring_options *o = td->eo;
52885fa2 1031 struct io_u *io_u;
650346e1 1032 int err, i;
52885fa2 1033
650346e1
JA
1034 for (i = 0; i < td->o.iodepth; i++) {
1035 struct iovec *iov = &ld->iovecs[i];
9a2d78b3 1036
650346e1
JA
1037 io_u = ld->io_u_index[i];
1038 iov->iov_base = io_u->buf;
1039 iov->iov_len = td_max_bs(td);
52885fa2
JA
1040 }
1041
bffad86f 1042 err = fio_ioring_queue_init(td);
52885fa2 1043 if (err) {
0442b53f 1044 int init_err = errno;
c4f5c92f 1045
0442b53f 1046 if (init_err == ENOSYS)
c4f5c92f 1047 log_err("fio: your kernel doesn't support io_uring\n");
0442b53f 1048 td_verror(td, init_err, "io_queue_init");
52885fa2
JA
1049 return 1;
1050 }
1051
7c70f506
JA
1052 for (i = 0; i < td->o.iodepth; i++) {
1053 struct io_uring_sqe *sqe;
1054
1055 sqe = &ld->sqes[i];
1056 memset(sqe, 0, sizeof(*sqe));
1057 }
1058
5ffd5626
JA
1059 if (o->registerfiles) {
1060 err = fio_ioring_register_files(td);
1061 if (err) {
1062 td_verror(td, errno, "ioring_register_files");
1063 return 1;
1064 }
1065 }
1066
52885fa2
JA
1067 return 0;
1068}
1069
855dc4d4
AG
1070static int fio_ioring_cmd_post_init(struct thread_data *td)
1071{
1072 struct ioring_data *ld = td->io_ops_data;
1073 struct ioring_options *o = td->eo;
1074 struct io_u *io_u;
1075 int err, i;
1076
1077 for (i = 0; i < td->o.iodepth; i++) {
1078 struct iovec *iov = &ld->iovecs[i];
1079
1080 io_u = ld->io_u_index[i];
1081 iov->iov_base = io_u->buf;
1082 iov->iov_len = td_max_bs(td);
1083 }
1084
1085 err = fio_ioring_cmd_queue_init(td);
1086 if (err) {
1087 int init_err = errno;
1088
1089 td_verror(td, init_err, "io_queue_init");
1090 return 1;
1091 }
1092
1093 for (i = 0; i < td->o.iodepth; i++) {
1094 struct io_uring_sqe *sqe;
1095
1096 if (o->cmd_type == FIO_URING_CMD_NVME) {
1097 sqe = &ld->sqes[i << 1];
1098 memset(sqe, 0, 2 * sizeof(*sqe));
1099 } else {
1100 sqe = &ld->sqes[i];
1101 memset(sqe, 0, sizeof(*sqe));
1102 }
1103 }
1104
1105 if (o->registerfiles) {
1106 err = fio_ioring_register_files(td);
1107 if (err) {
1108 td_verror(td, errno, "ioring_register_files");
1109 return 1;
1110 }
1111 }
1112
1113 return 0;
1114}
1115
bffad86f 1116static int fio_ioring_init(struct thread_data *td)
52885fa2 1117{
5ffd5626 1118 struct ioring_options *o = td->eo;
bffad86f 1119 struct ioring_data *ld;
e9f6567a 1120 int ret;
52885fa2 1121
5ffd5626
JA
1122 /* sqthread submission requires registered files */
1123 if (o->sqpoll_thread)
1124 o->registerfiles = 1;
1125
1126 if (o->registerfiles && td->o.nr_files != td->o.open_files) {
1127 log_err("fio: io_uring registered files require nr_files to "
1128 "be identical to open_files\n");
1129 return 1;
1130 }
1131
52885fa2
JA
1132 ld = calloc(1, sizeof(*ld));
1133
b87aa01a
JA
1134 /* ring depth must be a power-of-2 */
1135 ld->iodepth = td->o.iodepth;
1136 td->o.iodepth = roundup_pow2(td->o.iodepth);
1137
52885fa2
JA
1138 /* io_u index */
1139 ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
650346e1 1140 ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
52885fa2
JA
1141
1142 td->io_ops_data = ld;
b2a432bf 1143
d6cbeab4 1144 ret = fio_cmdprio_init(td, &ld->cmdprio, &o->cmdprio_options);
e9f6567a
DLM
1145 if (ret) {
1146 td_verror(td, EINVAL, "fio_ioring_init");
b2a432bf
PC
1147 return 1;
1148 }
1af44196 1149
52885fa2
JA
1150 return 0;
1151}
1152
bffad86f 1153static int fio_ioring_io_u_init(struct thread_data *td, struct io_u *io_u)
52885fa2 1154{
bffad86f 1155 struct ioring_data *ld = td->io_ops_data;
52885fa2
JA
1156
1157 ld->io_u_index[io_u->index] = io_u;
1158 return 0;
1159}
1160
5ffd5626
JA
1161static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
1162{
1163 struct ioring_data *ld = td->io_ops_data;
1164 struct ioring_options *o = td->eo;
1165
17318cf6 1166 if (!ld || !o->registerfiles)
5ffd5626
JA
1167 return generic_open_file(td, f);
1168
1169 f->fd = ld->fds[f->engine_pos];
1170 return 0;
1171}
1172
855dc4d4
AG
1173static int fio_ioring_cmd_open_file(struct thread_data *td, struct fio_file *f)
1174{
1175 struct ioring_data *ld = td->io_ops_data;
1176 struct ioring_options *o = td->eo;
1177
1178 if (o->cmd_type == FIO_URING_CMD_NVME) {
1179 struct nvme_data *data = NULL;
1180 unsigned int nsid, lba_size = 0;
671aa9f5 1181 __u64 nlba = 0;
855dc4d4
AG
1182 int ret;
1183
1184 /* Store the namespace-id and lba size. */
1185 data = FILE_ENG_DATA(f);
1186 if (data == NULL) {
1187 ret = fio_nvme_get_info(f, &nsid, &lba_size, &nlba);
1188 if (ret)
1189 return ret;
1190
1191 data = calloc(1, sizeof(struct nvme_data));
1192 data->nsid = nsid;
1193 data->lba_shift = ilog2(lba_size);
1194
1195 FILE_SET_ENG_DATA(f, data);
1196 }
1197 }
1198 if (!ld || !o->registerfiles)
1199 return generic_open_file(td, f);
1200
1201 f->fd = ld->fds[f->engine_pos];
1202 return 0;
1203}
1204
5ffd5626
JA
1205static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f)
1206{
17318cf6 1207 struct ioring_data *ld = td->io_ops_data;
5ffd5626
JA
1208 struct ioring_options *o = td->eo;
1209
17318cf6 1210 if (!ld || !o->registerfiles)
5ffd5626
JA
1211 return generic_close_file(td, f);
1212
1213 f->fd = -1;
1214 return 0;
1215}
1216
855dc4d4
AG
1217static int fio_ioring_cmd_close_file(struct thread_data *td,
1218 struct fio_file *f)
1219{
1220 struct ioring_data *ld = td->io_ops_data;
1221 struct ioring_options *o = td->eo;
1222
1223 if (o->cmd_type == FIO_URING_CMD_NVME) {
1224 struct nvme_data *data = FILE_ENG_DATA(f);
1225
1226 FILE_SET_ENG_DATA(f, NULL);
1227 free(data);
1228 }
1229 if (!ld || !o->registerfiles)
1230 return generic_close_file(td, f);
1231
1232 f->fd = -1;
1233 return 0;
1234}
1235
1236static int fio_ioring_cmd_get_file_size(struct thread_data *td,
1237 struct fio_file *f)
1238{
1239 struct ioring_options *o = td->eo;
1240
1241 if (fio_file_size_known(f))
1242 return 0;
1243
1244 if (o->cmd_type == FIO_URING_CMD_NVME) {
1245 struct nvme_data *data = NULL;
1246 unsigned int nsid, lba_size = 0;
671aa9f5 1247 __u64 nlba = 0;
855dc4d4
AG
1248 int ret;
1249
1250 ret = fio_nvme_get_info(f, &nsid, &lba_size, &nlba);
1251 if (ret)
1252 return ret;
1253
1254 data = calloc(1, sizeof(struct nvme_data));
1255 data->nsid = nsid;
1256 data->lba_shift = ilog2(lba_size);
1257
1258 f->real_file_size = lba_size * nlba;
1259 fio_file_set_size_known(f);
1260
1261 FILE_SET_ENG_DATA(f, data);
1262 return 0;
1263 }
1264 return generic_get_file_size(td, f);
1265}
1266
3d05e0ff
AK
1267static int fio_ioring_cmd_get_zoned_model(struct thread_data *td,
1268 struct fio_file *f,
1269 enum zbd_zoned_model *model)
1270{
1271 return fio_nvme_get_zoned_model(td, f, model);
1272}
1273
1274static int fio_ioring_cmd_report_zones(struct thread_data *td,
1275 struct fio_file *f, uint64_t offset,
1276 struct zbd_zone *zbdz,
1277 unsigned int nr_zones)
1278{
1279 return fio_nvme_report_zones(td, f, offset, zbdz, nr_zones);
1280}
1281
1282static int fio_ioring_cmd_reset_wp(struct thread_data *td, struct fio_file *f,
1283 uint64_t offset, uint64_t length)
1284{
1285 return fio_nvme_reset_wp(td, f, offset, length);
1286}
1287
1288static int fio_ioring_cmd_get_max_open_zones(struct thread_data *td,
1289 struct fio_file *f,
1290 unsigned int *max_open_zones)
1291{
1292 return fio_nvme_get_max_open_zones(td, f, max_open_zones);
1293}
1294
a7e8aae0
KB
1295static int fio_ioring_cmd_fetch_ruhs(struct thread_data *td, struct fio_file *f,
1296 struct fio_ruhs_info *fruhs_info)
1297{
1298 struct nvme_fdp_ruh_status *ruhs;
1299 int bytes, ret, i;
1300
1301 bytes = sizeof(*ruhs) + 128 * sizeof(struct nvme_fdp_ruh_status_desc);
1302 ruhs = scalloc(1, bytes);
1303 if (!ruhs)
1304 return -ENOMEM;
1305
1306 ret = fio_nvme_iomgmt_ruhs(td, f, ruhs, bytes);
1307 if (ret)
1308 goto free;
1309
1310 fruhs_info->nr_ruhs = le16_to_cpu(ruhs->nruhsd);
1311 for (i = 0; i < fruhs_info->nr_ruhs; i++)
1312 fruhs_info->plis[i] = le16_to_cpu(ruhs->ruhss[i].pid);
1313free:
1314 sfree(ruhs);
1315 return ret;
1316}
1317
855dc4d4 1318static struct ioengine_ops ioengine_uring = {
bffad86f 1319 .name = "io_uring",
52885fa2 1320 .version = FIO_IOOPS_VERSION,
4e7e7898
VF
1321 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
1322 FIO_ASYNCIO_SETS_ISSUE_TIME,
bffad86f
JA
1323 .init = fio_ioring_init,
1324 .post_init = fio_ioring_post_init,
1325 .io_u_init = fio_ioring_io_u_init,
1326 .prep = fio_ioring_prep,
1327 .queue = fio_ioring_queue,
1328 .commit = fio_ioring_commit,
1329 .getevents = fio_ioring_getevents,
1330 .event = fio_ioring_event,
1331 .cleanup = fio_ioring_cleanup,
5ffd5626
JA
1332 .open_file = fio_ioring_open_file,
1333 .close_file = fio_ioring_close_file,
52885fa2
JA
1334 .get_file_size = generic_get_file_size,
1335 .options = options,
bffad86f 1336 .option_struct_size = sizeof(struct ioring_options),
52885fa2
JA
1337};
1338
855dc4d4
AG
1339static struct ioengine_ops ioengine_uring_cmd = {
1340 .name = "io_uring_cmd",
1341 .version = FIO_IOOPS_VERSION,
4e7e7898
VF
1342 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
1343 FIO_MEMALIGN | FIO_RAWIO |
1344 FIO_ASYNCIO_SETS_ISSUE_TIME,
855dc4d4
AG
1345 .init = fio_ioring_init,
1346 .post_init = fio_ioring_cmd_post_init,
1347 .io_u_init = fio_ioring_io_u_init,
1348 .prep = fio_ioring_cmd_prep,
1349 .queue = fio_ioring_queue,
1350 .commit = fio_ioring_commit,
1351 .getevents = fio_ioring_getevents,
1352 .event = fio_ioring_cmd_event,
1353 .cleanup = fio_ioring_cleanup,
1354 .open_file = fio_ioring_cmd_open_file,
1355 .close_file = fio_ioring_cmd_close_file,
1356 .get_file_size = fio_ioring_cmd_get_file_size,
3d05e0ff
AK
1357 .get_zoned_model = fio_ioring_cmd_get_zoned_model,
1358 .report_zones = fio_ioring_cmd_report_zones,
1359 .reset_wp = fio_ioring_cmd_reset_wp,
1360 .get_max_open_zones = fio_ioring_cmd_get_max_open_zones,
855dc4d4
AG
1361 .options = options,
1362 .option_struct_size = sizeof(struct ioring_options),
a7e8aae0 1363 .fdp_fetch_ruhs = fio_ioring_cmd_fetch_ruhs,
855dc4d4
AG
1364};
1365
bffad86f 1366static void fio_init fio_ioring_register(void)
52885fa2 1367{
855dc4d4
AG
1368 register_ioengine(&ioengine_uring);
1369 register_ioengine(&ioengine_uring_cmd);
52885fa2
JA
1370}
1371
bffad86f 1372static void fio_exit fio_ioring_unregister(void)
52885fa2 1373{
855dc4d4
AG
1374 unregister_ioengine(&ioengine_uring);
1375 unregister_ioengine(&ioengine_uring_cmd);
52885fa2 1376}
1f90e9bb 1377#endif