docs: fix operations misspellings
[fio.git] / engines / io_uring.c
... / ...
CommitLineData
1/*
2 * io_uring engine
3 *
4 * IO engine using the new native Linux aio io_uring interface. See:
5 *
6 * http://git.kernel.dk/cgit/linux-block/log/?h=io_uring
7 *
8 */
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <sys/time.h>
13#include <sys/resource.h>
14
15#include "../fio.h"
16#include "../lib/pow2.h"
17#include "../optgroup.h"
18#include "../lib/memalign.h"
19#include "../lib/fls.h"
20#include "../lib/roundup.h"
21#include "../verify.h"
22
23#ifdef ARCH_HAVE_IOURING
24
25#include "../lib/types.h"
26#include "../os/linux/io_uring.h"
27#include "cmdprio.h"
28#include "zbd.h"
29#include "nvme.h"
30
31#include <sys/stat.h>
32
33enum uring_cmd_type {
34 FIO_URING_CMD_NVME = 1,
35};
36
37enum uring_cmd_write_mode {
38 FIO_URING_CMD_WMODE_WRITE = 1,
39 FIO_URING_CMD_WMODE_UNCOR,
40 FIO_URING_CMD_WMODE_ZEROES,
41 FIO_URING_CMD_WMODE_VERIFY,
42};
43
44struct io_sq_ring {
45 unsigned *head;
46 unsigned *tail;
47 unsigned *ring_mask;
48 unsigned *ring_entries;
49 unsigned *flags;
50 unsigned *array;
51};
52
53struct io_cq_ring {
54 unsigned *head;
55 unsigned *tail;
56 unsigned *ring_mask;
57 unsigned *ring_entries;
58 struct io_uring_cqe *cqes;
59};
60
61struct ioring_mmap {
62 void *ptr;
63 size_t len;
64};
65
66struct ioring_data {
67 int ring_fd;
68
69 struct io_u **io_u_index;
70 char *md_buf;
71
72 int *fds;
73
74 struct io_sq_ring sq_ring;
75 struct io_uring_sqe *sqes;
76 struct iovec *iovecs;
77 unsigned sq_ring_mask;
78
79 struct io_cq_ring cq_ring;
80 unsigned cq_ring_mask;
81
82 int queued;
83 int cq_ring_off;
84 unsigned iodepth;
85 int prepped;
86
87 struct ioring_mmap mmap[3];
88
89 struct cmdprio cmdprio;
90
91 struct nvme_dsm *dsm;
92 uint32_t cdw12_flags[DDIR_RWDIR_CNT];
93 uint8_t write_opcode;
94};
95
96struct ioring_options {
97 struct thread_data *td;
98 unsigned int hipri;
99 unsigned int readfua;
100 unsigned int writefua;
101 unsigned int write_mode;
102 struct cmdprio_options cmdprio_options;
103 unsigned int fixedbufs;
104 unsigned int registerfiles;
105 unsigned int sqpoll_thread;
106 unsigned int sqpoll_set;
107 unsigned int sqpoll_cpu;
108 unsigned int nonvectored;
109 unsigned int uncached;
110 unsigned int nowait;
111 unsigned int force_async;
112 unsigned int md_per_io_size;
113 unsigned int pi_act;
114 unsigned int apptag;
115 unsigned int apptag_mask;
116 unsigned int prchk;
117 char *pi_chk;
118 enum uring_cmd_type cmd_type;
119};
120
121static const int ddir_to_op[2][2] = {
122 { IORING_OP_READV, IORING_OP_READ },
123 { IORING_OP_WRITEV, IORING_OP_WRITE }
124};
125
126static const int fixed_ddir_to_op[2] = {
127 IORING_OP_READ_FIXED,
128 IORING_OP_WRITE_FIXED
129};
130
131static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
132{
133 struct ioring_options *o = data;
134
135 o->sqpoll_cpu = *val;
136 o->sqpoll_set = 1;
137 return 0;
138}
139
140static struct fio_option options[] = {
141 {
142 .name = "hipri",
143 .lname = "High Priority",
144 .type = FIO_OPT_STR_SET,
145 .off1 = offsetof(struct ioring_options, hipri),
146 .help = "Use polled IO completions",
147 .category = FIO_OPT_C_ENGINE,
148 .group = FIO_OPT_G_IOURING,
149 },
150 {
151 .name = "readfua",
152 .lname = "Read fua flag support",
153 .type = FIO_OPT_BOOL,
154 .off1 = offsetof(struct ioring_options, readfua),
155 .help = "Set FUA flag (force unit access) for all Read operations",
156 .def = "0",
157 .category = FIO_OPT_C_ENGINE,
158 .group = FIO_OPT_G_IOURING,
159 },
160 {
161 .name = "writefua",
162 .lname = "Write fua flag support",
163 .type = FIO_OPT_BOOL,
164 .off1 = offsetof(struct ioring_options, writefua),
165 .help = "Set FUA flag (force unit access) for all Write operations",
166 .def = "0",
167 .category = FIO_OPT_C_ENGINE,
168 .group = FIO_OPT_G_IOURING,
169 },
170 {
171 .name = "write_mode",
172 .lname = "Additional Write commands support (Write Uncorrectable, Write Zeores)",
173 .type = FIO_OPT_STR,
174 .off1 = offsetof(struct ioring_options, write_mode),
175 .help = "Issue Write Uncorrectable or Zeroes command instaed of Write command",
176 .def = "write",
177 .posval = {
178 { .ival = "write",
179 .oval = FIO_URING_CMD_WMODE_WRITE,
180 .help = "Issue Write commands for write operations"
181 },
182 { .ival = "uncor",
183 .oval = FIO_URING_CMD_WMODE_UNCOR,
184 .help = "Issue Write Uncorrectable commands for write operations"
185 },
186 { .ival = "zeroes",
187 .oval = FIO_URING_CMD_WMODE_ZEROES,
188 .help = "Issue Write Zeroes commands for write operations"
189 },
190 { .ival = "verify",
191 .oval = FIO_URING_CMD_WMODE_VERIFY,
192 .help = "Issue Verify commands for write operations"
193 },
194 },
195 .category = FIO_OPT_C_ENGINE,
196 .group = FIO_OPT_G_IOURING,
197 },
198 {
199 .name = "fixedbufs",
200 .lname = "Fixed (pre-mapped) IO buffers",
201 .type = FIO_OPT_STR_SET,
202 .off1 = offsetof(struct ioring_options, fixedbufs),
203 .help = "Pre map IO buffers",
204 .category = FIO_OPT_C_ENGINE,
205 .group = FIO_OPT_G_IOURING,
206 },
207 {
208 .name = "registerfiles",
209 .lname = "Register file set",
210 .type = FIO_OPT_STR_SET,
211 .off1 = offsetof(struct ioring_options, registerfiles),
212 .help = "Pre-open/register files",
213 .category = FIO_OPT_C_ENGINE,
214 .group = FIO_OPT_G_IOURING,
215 },
216 {
217 .name = "sqthread_poll",
218 .lname = "Kernel SQ thread polling",
219 .type = FIO_OPT_STR_SET,
220 .off1 = offsetof(struct ioring_options, sqpoll_thread),
221 .help = "Offload submission/completion to kernel thread",
222 .category = FIO_OPT_C_ENGINE,
223 .group = FIO_OPT_G_IOURING,
224 },
225 {
226 .name = "sqthread_poll_cpu",
227 .lname = "SQ Thread Poll CPU",
228 .type = FIO_OPT_INT,
229 .cb = fio_ioring_sqpoll_cb,
230 .help = "What CPU to run SQ thread polling on",
231 .category = FIO_OPT_C_ENGINE,
232 .group = FIO_OPT_G_IOURING,
233 },
234 {
235 .name = "nonvectored",
236 .lname = "Non-vectored",
237 .type = FIO_OPT_INT,
238 .off1 = offsetof(struct ioring_options, nonvectored),
239 .def = "-1",
240 .help = "Use non-vectored read/write commands",
241 .category = FIO_OPT_C_ENGINE,
242 .group = FIO_OPT_G_IOURING,
243 },
244 {
245 .name = "uncached",
246 .lname = "Uncached",
247 .type = FIO_OPT_INT,
248 .off1 = offsetof(struct ioring_options, uncached),
249 .help = "Use RWF_UNCACHED for buffered read/writes",
250 .category = FIO_OPT_C_ENGINE,
251 .group = FIO_OPT_G_IOURING,
252 },
253 {
254 .name = "nowait",
255 .lname = "RWF_NOWAIT",
256 .type = FIO_OPT_BOOL,
257 .off1 = offsetof(struct ioring_options, nowait),
258 .help = "Use RWF_NOWAIT for reads/writes",
259 .category = FIO_OPT_C_ENGINE,
260 .group = FIO_OPT_G_IOURING,
261 },
262 {
263 .name = "force_async",
264 .lname = "Force async",
265 .type = FIO_OPT_INT,
266 .off1 = offsetof(struct ioring_options, force_async),
267 .help = "Set IOSQE_ASYNC every N requests",
268 .category = FIO_OPT_C_ENGINE,
269 .group = FIO_OPT_G_IOURING,
270 },
271 {
272 .name = "cmd_type",
273 .lname = "Uring cmd type",
274 .type = FIO_OPT_STR,
275 .off1 = offsetof(struct ioring_options, cmd_type),
276 .help = "Specify uring-cmd type",
277 .def = "nvme",
278 .posval = {
279 { .ival = "nvme",
280 .oval = FIO_URING_CMD_NVME,
281 .help = "Issue nvme-uring-cmd",
282 },
283 },
284 .category = FIO_OPT_C_ENGINE,
285 .group = FIO_OPT_G_IOURING,
286 },
287 CMDPRIO_OPTIONS(struct ioring_options, FIO_OPT_G_IOURING),
288 {
289 .name = "md_per_io_size",
290 .lname = "Separate Metadata Buffer Size per I/O",
291 .type = FIO_OPT_INT,
292 .off1 = offsetof(struct ioring_options, md_per_io_size),
293 .def = "0",
294 .help = "Size of separate metadata buffer per I/O (Default: 0)",
295 .category = FIO_OPT_C_ENGINE,
296 .group = FIO_OPT_G_IOURING,
297 },
298 {
299 .name = "pi_act",
300 .lname = "Protection Information Action",
301 .type = FIO_OPT_BOOL,
302 .off1 = offsetof(struct ioring_options, pi_act),
303 .def = "1",
304 .help = "Protection Information Action bit (pi_act=1 or pi_act=0)",
305 .category = FIO_OPT_C_ENGINE,
306 .group = FIO_OPT_G_IOURING,
307 },
308 {
309 .name = "pi_chk",
310 .lname = "Protection Information Check",
311 .type = FIO_OPT_STR_STORE,
312 .off1 = offsetof(struct ioring_options, pi_chk),
313 .def = NULL,
314 .help = "Control of Protection Information Checking (pi_chk=GUARD,REFTAG,APPTAG)",
315 .category = FIO_OPT_C_ENGINE,
316 .group = FIO_OPT_G_IOURING,
317 },
318 {
319 .name = "apptag",
320 .lname = "Application Tag used in Protection Information",
321 .type = FIO_OPT_INT,
322 .off1 = offsetof(struct ioring_options, apptag),
323 .def = "0x1234",
324 .help = "Application Tag used in Protection Information field (Default: 0x1234)",
325 .category = FIO_OPT_C_ENGINE,
326 .group = FIO_OPT_G_IOURING,
327 },
328 {
329 .name = "apptag_mask",
330 .lname = "Application Tag Mask",
331 .type = FIO_OPT_INT,
332 .off1 = offsetof(struct ioring_options, apptag_mask),
333 .def = "0xffff",
334 .help = "Application Tag Mask used with Application Tag (Default: 0xffff)",
335 .category = FIO_OPT_C_ENGINE,
336 .group = FIO_OPT_G_IOURING,
337 },
338 {
339 .name = NULL,
340 },
341};
342
343static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit,
344 unsigned int min_complete, unsigned int flags)
345{
346#ifdef FIO_ARCH_HAS_SYSCALL
347 return __do_syscall6(__NR_io_uring_enter, ld->ring_fd, to_submit,
348 min_complete, flags, NULL, 0);
349#else
350 return syscall(__NR_io_uring_enter, ld->ring_fd, to_submit,
351 min_complete, flags, NULL, 0);
352#endif
353}
354
355static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
356{
357 struct ioring_data *ld = td->io_ops_data;
358 struct ioring_options *o = td->eo;
359 struct fio_file *f = io_u->file;
360 struct io_uring_sqe *sqe;
361
362 sqe = &ld->sqes[io_u->index];
363
364 if (o->registerfiles) {
365 sqe->fd = f->engine_pos;
366 sqe->flags = IOSQE_FIXED_FILE;
367 } else {
368 sqe->fd = f->fd;
369 sqe->flags = 0;
370 }
371
372 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
373 if (o->fixedbufs) {
374 sqe->opcode = fixed_ddir_to_op[io_u->ddir];
375 sqe->addr = (unsigned long) io_u->xfer_buf;
376 sqe->len = io_u->xfer_buflen;
377 sqe->buf_index = io_u->index;
378 } else {
379 struct iovec *iov = &ld->iovecs[io_u->index];
380
381 /*
382 * Update based on actual io_u, requeue could have
383 * adjusted these
384 */
385 iov->iov_base = io_u->xfer_buf;
386 iov->iov_len = io_u->xfer_buflen;
387
388 sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
389 if (o->nonvectored) {
390 sqe->addr = (unsigned long) iov->iov_base;
391 sqe->len = iov->iov_len;
392 } else {
393 sqe->addr = (unsigned long) iov;
394 sqe->len = 1;
395 }
396 }
397 sqe->rw_flags = 0;
398 if (!td->o.odirect && o->uncached)
399 sqe->rw_flags |= RWF_UNCACHED;
400 if (o->nowait)
401 sqe->rw_flags |= RWF_NOWAIT;
402
403 /*
404 * Since io_uring can have a submission context (sqthread_poll)
405 * that is different from the process context, we cannot rely on
406 * the IO priority set by ioprio_set() (options prio, prioclass,
407 * and priohint) to be inherited.
408 * td->ioprio will have the value of the "default prio", so set
409 * this unconditionally. This value might get overridden by
410 * fio_ioring_cmdprio_prep() if the option cmdprio_percentage or
411 * cmdprio_bssplit is used.
412 */
413 sqe->ioprio = td->ioprio;
414 sqe->off = io_u->offset;
415 } else if (ddir_sync(io_u->ddir)) {
416 sqe->ioprio = 0;
417 if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
418 sqe->off = f->first_write;
419 sqe->len = f->last_write - f->first_write;
420 sqe->sync_range_flags = td->o.sync_file_range;
421 sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
422 } else {
423 sqe->off = 0;
424 sqe->addr = 0;
425 sqe->len = 0;
426 if (io_u->ddir == DDIR_DATASYNC)
427 sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
428 sqe->opcode = IORING_OP_FSYNC;
429 }
430 }
431
432 if (o->force_async && ++ld->prepped == o->force_async) {
433 ld->prepped = 0;
434 sqe->flags |= IOSQE_ASYNC;
435 }
436
437 sqe->user_data = (unsigned long) io_u;
438 return 0;
439}
440
441static int fio_ioring_cmd_prep(struct thread_data *td, struct io_u *io_u)
442{
443 struct ioring_data *ld = td->io_ops_data;
444 struct ioring_options *o = td->eo;
445 struct fio_file *f = io_u->file;
446 struct nvme_uring_cmd *cmd;
447 struct io_uring_sqe *sqe;
448 struct nvme_dsm *dsm;
449 void *ptr = ld->dsm;
450 unsigned int dsm_size;
451
452 /* only supports nvme_uring_cmd */
453 if (o->cmd_type != FIO_URING_CMD_NVME)
454 return -EINVAL;
455
456 if (io_u->ddir == DDIR_TRIM && td->io_ops->flags & FIO_ASYNCIO_SYNC_TRIM)
457 return 0;
458
459 sqe = &ld->sqes[(io_u->index) << 1];
460
461 if (o->registerfiles) {
462 sqe->fd = f->engine_pos;
463 sqe->flags = IOSQE_FIXED_FILE;
464 } else {
465 sqe->fd = f->fd;
466 }
467 sqe->rw_flags = 0;
468 if (!td->o.odirect && o->uncached)
469 sqe->rw_flags |= RWF_UNCACHED;
470 if (o->nowait)
471 sqe->rw_flags |= RWF_NOWAIT;
472
473 sqe->opcode = IORING_OP_URING_CMD;
474 sqe->user_data = (unsigned long) io_u;
475 if (o->nonvectored)
476 sqe->cmd_op = NVME_URING_CMD_IO;
477 else
478 sqe->cmd_op = NVME_URING_CMD_IO_VEC;
479 if (o->force_async && ++ld->prepped == o->force_async) {
480 ld->prepped = 0;
481 sqe->flags |= IOSQE_ASYNC;
482 }
483 if (o->fixedbufs) {
484 sqe->uring_cmd_flags = IORING_URING_CMD_FIXED;
485 sqe->buf_index = io_u->index;
486 }
487
488 cmd = (struct nvme_uring_cmd *)sqe->cmd;
489 dsm_size = sizeof(*ld->dsm) + td->o.num_range * sizeof(struct nvme_dsm_range);
490 ptr += io_u->index * dsm_size;
491 dsm = (struct nvme_dsm *)ptr;
492
493 return fio_nvme_uring_cmd_prep(cmd, io_u,
494 o->nonvectored ? NULL : &ld->iovecs[io_u->index],
495 dsm, ld->write_opcode, ld->cdw12_flags[io_u->ddir]);
496}
497
498static struct io_u *fio_ioring_event(struct thread_data *td, int event)
499{
500 struct ioring_data *ld = td->io_ops_data;
501 struct io_uring_cqe *cqe;
502 struct io_u *io_u;
503 unsigned index;
504
505 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
506
507 cqe = &ld->cq_ring.cqes[index];
508 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
509
510 if (cqe->res != io_u->xfer_buflen) {
511 if (cqe->res > io_u->xfer_buflen)
512 io_u->error = -cqe->res;
513 else
514 io_u->resid = io_u->xfer_buflen - cqe->res;
515 } else
516 io_u->error = 0;
517
518 return io_u;
519}
520
521static struct io_u *fio_ioring_cmd_event(struct thread_data *td, int event)
522{
523 struct ioring_data *ld = td->io_ops_data;
524 struct ioring_options *o = td->eo;
525 struct io_uring_cqe *cqe;
526 struct io_u *io_u;
527 struct nvme_data *data;
528 unsigned index;
529 int ret;
530
531 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
532 if (o->cmd_type == FIO_URING_CMD_NVME)
533 index <<= 1;
534
535 cqe = &ld->cq_ring.cqes[index];
536 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
537
538 if (cqe->res != 0) {
539 io_u->error = abs(cqe->res);
540 return io_u;
541 } else {
542 io_u->error = 0;
543 }
544
545 if (o->cmd_type == FIO_URING_CMD_NVME) {
546 data = FILE_ENG_DATA(io_u->file);
547 if (data->pi_type && (io_u->ddir == DDIR_READ) && !o->pi_act) {
548 ret = fio_nvme_pi_verify(data, io_u);
549 if (ret)
550 io_u->error = ret;
551 }
552 }
553
554 return io_u;
555}
556
557static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events,
558 unsigned int max)
559{
560 struct ioring_data *ld = td->io_ops_data;
561 struct io_cq_ring *ring = &ld->cq_ring;
562 unsigned head, reaped = 0;
563
564 head = *ring->head;
565 do {
566 if (head == atomic_load_acquire(ring->tail))
567 break;
568 reaped++;
569 head++;
570 } while (reaped + events < max);
571
572 if (reaped)
573 atomic_store_release(ring->head, head);
574
575 return reaped;
576}
577
578static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
579 unsigned int max, const struct timespec *t)
580{
581 struct ioring_data *ld = td->io_ops_data;
582 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
583 struct ioring_options *o = td->eo;
584 struct io_cq_ring *ring = &ld->cq_ring;
585 unsigned events = 0;
586 int r;
587
588 ld->cq_ring_off = *ring->head;
589 do {
590 r = fio_ioring_cqring_reap(td, events, max);
591 if (r) {
592 events += r;
593 max -= r;
594 if (actual_min != 0)
595 actual_min -= r;
596 continue;
597 }
598
599 if (!o->sqpoll_thread) {
600 r = io_uring_enter(ld, 0, actual_min,
601 IORING_ENTER_GETEVENTS);
602 if (r < 0) {
603 if (errno == EAGAIN || errno == EINTR)
604 continue;
605 r = -errno;
606 td_verror(td, errno, "io_uring_enter");
607 break;
608 }
609 }
610 } while (events < min);
611
612 return r < 0 ? r : events;
613}
614
615static inline void fio_ioring_cmd_nvme_pi(struct thread_data *td,
616 struct io_u *io_u)
617{
618 struct ioring_data *ld = td->io_ops_data;
619 struct ioring_options *o = td->eo;
620 struct nvme_uring_cmd *cmd;
621 struct io_uring_sqe *sqe;
622 struct nvme_cmd_ext_io_opts ext_opts = {0};
623 struct nvme_data *data = FILE_ENG_DATA(io_u->file);
624
625 if (io_u->ddir == DDIR_TRIM)
626 return;
627
628 sqe = &ld->sqes[(io_u->index) << 1];
629 cmd = (struct nvme_uring_cmd *)sqe->cmd;
630
631 if (data->pi_type) {
632 if (o->pi_act)
633 ext_opts.io_flags |= NVME_IO_PRINFO_PRACT;
634 ext_opts.io_flags |= o->prchk;
635 ext_opts.apptag = o->apptag;
636 ext_opts.apptag_mask = o->apptag_mask;
637 }
638
639 fio_nvme_pi_fill(cmd, io_u, &ext_opts);
640}
641
642static inline void fio_ioring_cmdprio_prep(struct thread_data *td,
643 struct io_u *io_u)
644{
645 struct ioring_data *ld = td->io_ops_data;
646 struct cmdprio *cmdprio = &ld->cmdprio;
647
648 if (fio_cmdprio_set_ioprio(td, cmdprio, io_u))
649 ld->sqes[io_u->index].ioprio = io_u->ioprio;
650}
651
652static enum fio_q_status fio_ioring_queue(struct thread_data *td,
653 struct io_u *io_u)
654{
655 struct ioring_data *ld = td->io_ops_data;
656 struct ioring_options *o = td->eo;
657 struct io_sq_ring *ring = &ld->sq_ring;
658 unsigned tail, next_tail;
659
660 fio_ro_check(td, io_u);
661
662 if (ld->queued == ld->iodepth)
663 return FIO_Q_BUSY;
664
665 if (io_u->ddir == DDIR_TRIM && td->io_ops->flags & FIO_ASYNCIO_SYNC_TRIM) {
666 if (ld->queued)
667 return FIO_Q_BUSY;
668
669 do_io_u_trim(td, io_u);
670
671 io_u_mark_submit(td, 1);
672 io_u_mark_complete(td, 1);
673 return FIO_Q_COMPLETED;
674 }
675
676 tail = *ring->tail;
677 next_tail = tail + 1;
678 if (next_tail == atomic_load_relaxed(ring->head))
679 return FIO_Q_BUSY;
680
681 if (ld->cmdprio.mode != CMDPRIO_MODE_NONE)
682 fio_ioring_cmdprio_prep(td, io_u);
683
684 if (!strcmp(td->io_ops->name, "io_uring_cmd") &&
685 o->cmd_type == FIO_URING_CMD_NVME)
686 fio_ioring_cmd_nvme_pi(td, io_u);
687
688 ring->array[tail & ld->sq_ring_mask] = io_u->index;
689 atomic_store_release(ring->tail, next_tail);
690
691 ld->queued++;
692 return FIO_Q_QUEUED;
693}
694
695static void fio_ioring_queued(struct thread_data *td, int start, int nr)
696{
697 struct ioring_data *ld = td->io_ops_data;
698 struct timespec now;
699
700 if (!fio_fill_issue_time(td))
701 return;
702
703 fio_gettime(&now, NULL);
704
705 while (nr--) {
706 struct io_sq_ring *ring = &ld->sq_ring;
707 int index = ring->array[start & ld->sq_ring_mask];
708 struct io_u *io_u = ld->io_u_index[index];
709
710 memcpy(&io_u->issue_time, &now, sizeof(now));
711 io_u_queued(td, io_u);
712
713 start++;
714 }
715
716 /*
717 * only used for iolog
718 */
719 if (td->o.read_iolog_file)
720 memcpy(&td->last_issue, &now, sizeof(now));
721}
722
723static int fio_ioring_commit(struct thread_data *td)
724{
725 struct ioring_data *ld = td->io_ops_data;
726 struct ioring_options *o = td->eo;
727 int ret;
728
729 if (!ld->queued)
730 return 0;
731
732 /*
733 * Kernel side does submission. just need to check if the ring is
734 * flagged as needing a kick, if so, call io_uring_enter(). This
735 * only happens if we've been idle too long.
736 */
737 if (o->sqpoll_thread) {
738 struct io_sq_ring *ring = &ld->sq_ring;
739 unsigned start = *ld->sq_ring.tail - ld->queued;
740 unsigned flags;
741
742 flags = atomic_load_relaxed(ring->flags);
743 if (flags & IORING_SQ_NEED_WAKEUP)
744 io_uring_enter(ld, ld->queued, 0,
745 IORING_ENTER_SQ_WAKEUP);
746 fio_ioring_queued(td, start, ld->queued);
747 io_u_mark_submit(td, ld->queued);
748
749 ld->queued = 0;
750 return 0;
751 }
752
753 do {
754 unsigned start = *ld->sq_ring.head;
755 long nr = ld->queued;
756
757 ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS);
758 if (ret > 0) {
759 fio_ioring_queued(td, start, ret);
760 io_u_mark_submit(td, ret);
761
762 ld->queued -= ret;
763 ret = 0;
764 } else if (!ret) {
765 io_u_mark_submit(td, ret);
766 continue;
767 } else {
768 if (errno == EAGAIN || errno == EINTR) {
769 ret = fio_ioring_cqring_reap(td, 0, ld->queued);
770 if (ret)
771 continue;
772 /* Shouldn't happen */
773 usleep(1);
774 continue;
775 }
776 ret = -errno;
777 td_verror(td, errno, "io_uring_enter submit");
778 break;
779 }
780 } while (ld->queued);
781
782 return ret;
783}
784
785static void fio_ioring_unmap(struct ioring_data *ld)
786{
787 int i;
788
789 for (i = 0; i < FIO_ARRAY_SIZE(ld->mmap); i++)
790 munmap(ld->mmap[i].ptr, ld->mmap[i].len);
791 close(ld->ring_fd);
792}
793
794static void fio_ioring_cleanup(struct thread_data *td)
795{
796 struct ioring_data *ld = td->io_ops_data;
797
798 if (ld) {
799 if (!(td->flags & TD_F_CHILD))
800 fio_ioring_unmap(ld);
801
802 fio_cmdprio_cleanup(&ld->cmdprio);
803 free(ld->io_u_index);
804 free(ld->md_buf);
805 free(ld->iovecs);
806 free(ld->fds);
807 free(ld->dsm);
808 free(ld);
809 }
810}
811
812static int fio_ioring_mmap(struct ioring_data *ld, struct io_uring_params *p)
813{
814 struct io_sq_ring *sring = &ld->sq_ring;
815 struct io_cq_ring *cring = &ld->cq_ring;
816 void *ptr;
817
818 ld->mmap[0].len = p->sq_off.array + p->sq_entries * sizeof(__u32);
819 ptr = mmap(0, ld->mmap[0].len, PROT_READ | PROT_WRITE,
820 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
821 IORING_OFF_SQ_RING);
822 ld->mmap[0].ptr = ptr;
823 sring->head = ptr + p->sq_off.head;
824 sring->tail = ptr + p->sq_off.tail;
825 sring->ring_mask = ptr + p->sq_off.ring_mask;
826 sring->ring_entries = ptr + p->sq_off.ring_entries;
827 sring->flags = ptr + p->sq_off.flags;
828 sring->array = ptr + p->sq_off.array;
829 ld->sq_ring_mask = *sring->ring_mask;
830
831 if (p->flags & IORING_SETUP_SQE128)
832 ld->mmap[1].len = 2 * p->sq_entries * sizeof(struct io_uring_sqe);
833 else
834 ld->mmap[1].len = p->sq_entries * sizeof(struct io_uring_sqe);
835 ld->sqes = mmap(0, ld->mmap[1].len, PROT_READ | PROT_WRITE,
836 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
837 IORING_OFF_SQES);
838 ld->mmap[1].ptr = ld->sqes;
839
840 if (p->flags & IORING_SETUP_CQE32) {
841 ld->mmap[2].len = p->cq_off.cqes +
842 2 * p->cq_entries * sizeof(struct io_uring_cqe);
843 } else {
844 ld->mmap[2].len = p->cq_off.cqes +
845 p->cq_entries * sizeof(struct io_uring_cqe);
846 }
847 ptr = mmap(0, ld->mmap[2].len, PROT_READ | PROT_WRITE,
848 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
849 IORING_OFF_CQ_RING);
850 ld->mmap[2].ptr = ptr;
851 cring->head = ptr + p->cq_off.head;
852 cring->tail = ptr + p->cq_off.tail;
853 cring->ring_mask = ptr + p->cq_off.ring_mask;
854 cring->ring_entries = ptr + p->cq_off.ring_entries;
855 cring->cqes = ptr + p->cq_off.cqes;
856 ld->cq_ring_mask = *cring->ring_mask;
857 return 0;
858}
859
860static void fio_ioring_probe(struct thread_data *td)
861{
862 struct ioring_data *ld = td->io_ops_data;
863 struct ioring_options *o = td->eo;
864 struct io_uring_probe *p;
865 int ret;
866
867 /* already set by user, don't touch */
868 if (o->nonvectored != -1)
869 return;
870
871 /* default to off, as that's always safe */
872 o->nonvectored = 0;
873
874 p = calloc(1, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
875 if (!p)
876 return;
877
878 ret = syscall(__NR_io_uring_register, ld->ring_fd,
879 IORING_REGISTER_PROBE, p, 256);
880 if (ret < 0)
881 goto out;
882
883 if (IORING_OP_WRITE > p->ops_len)
884 goto out;
885
886 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED) &&
887 (p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED))
888 o->nonvectored = 1;
889out:
890 free(p);
891}
892
893static int fio_ioring_queue_init(struct thread_data *td)
894{
895 struct ioring_data *ld = td->io_ops_data;
896 struct ioring_options *o = td->eo;
897 int depth = td->o.iodepth;
898 struct io_uring_params p;
899 int ret;
900
901 memset(&p, 0, sizeof(p));
902
903 if (o->hipri)
904 p.flags |= IORING_SETUP_IOPOLL;
905 if (o->sqpoll_thread) {
906 p.flags |= IORING_SETUP_SQPOLL;
907 if (o->sqpoll_set) {
908 p.flags |= IORING_SETUP_SQ_AFF;
909 p.sq_thread_cpu = o->sqpoll_cpu;
910 }
911
912 /*
913 * Submission latency for sqpoll_thread is just the time it
914 * takes to fill in the SQ ring entries, and any syscall if
915 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
916 * separately.
917 */
918 td->o.disable_slat = 1;
919 }
920
921 /*
922 * Clamp CQ ring size at our SQ ring size, we don't need more entries
923 * than that.
924 */
925 p.flags |= IORING_SETUP_CQSIZE;
926 p.cq_entries = depth;
927
928 /*
929 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
930 * completing IO operations.
931 */
932 p.flags |= IORING_SETUP_COOP_TASKRUN;
933
934 /*
935 * io_uring is always a single issuer, and we can defer task_work
936 * runs until we reap events.
937 */
938 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
939
940retry:
941 ret = syscall(__NR_io_uring_setup, depth, &p);
942 if (ret < 0) {
943 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
944 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
945 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
946 goto retry;
947 }
948 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
949 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
950 goto retry;
951 }
952 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
953 p.flags &= ~IORING_SETUP_CQSIZE;
954 goto retry;
955 }
956 return ret;
957 }
958
959 ld->ring_fd = ret;
960
961 fio_ioring_probe(td);
962
963 if (o->fixedbufs) {
964 ret = syscall(__NR_io_uring_register, ld->ring_fd,
965 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
966 if (ret < 0)
967 return ret;
968 }
969
970 return fio_ioring_mmap(ld, &p);
971}
972
973static int fio_ioring_cmd_queue_init(struct thread_data *td)
974{
975 struct ioring_data *ld = td->io_ops_data;
976 struct ioring_options *o = td->eo;
977 int depth = td->o.iodepth;
978 struct io_uring_params p;
979 int ret;
980
981 memset(&p, 0, sizeof(p));
982
983 if (o->hipri)
984 p.flags |= IORING_SETUP_IOPOLL;
985 if (o->sqpoll_thread) {
986 p.flags |= IORING_SETUP_SQPOLL;
987 if (o->sqpoll_set) {
988 p.flags |= IORING_SETUP_SQ_AFF;
989 p.sq_thread_cpu = o->sqpoll_cpu;
990 }
991
992 /*
993 * Submission latency for sqpoll_thread is just the time it
994 * takes to fill in the SQ ring entries, and any syscall if
995 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
996 * separately.
997 */
998 td->o.disable_slat = 1;
999 }
1000 if (o->cmd_type == FIO_URING_CMD_NVME) {
1001 p.flags |= IORING_SETUP_SQE128;
1002 p.flags |= IORING_SETUP_CQE32;
1003 }
1004
1005 /*
1006 * Clamp CQ ring size at our SQ ring size, we don't need more entries
1007 * than that.
1008 */
1009 p.flags |= IORING_SETUP_CQSIZE;
1010 p.cq_entries = depth;
1011
1012 /*
1013 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
1014 * completing IO operations.
1015 */
1016 p.flags |= IORING_SETUP_COOP_TASKRUN;
1017
1018 /*
1019 * io_uring is always a single issuer, and we can defer task_work
1020 * runs until we reap events.
1021 */
1022 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
1023
1024retry:
1025 ret = syscall(__NR_io_uring_setup, depth, &p);
1026 if (ret < 0) {
1027 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
1028 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
1029 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
1030 goto retry;
1031 }
1032 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
1033 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
1034 goto retry;
1035 }
1036 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
1037 p.flags &= ~IORING_SETUP_CQSIZE;
1038 goto retry;
1039 }
1040 return ret;
1041 }
1042
1043 ld->ring_fd = ret;
1044
1045 fio_ioring_probe(td);
1046
1047 if (o->fixedbufs) {
1048 ret = syscall(__NR_io_uring_register, ld->ring_fd,
1049 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
1050 if (ret < 0)
1051 return ret;
1052 }
1053
1054 return fio_ioring_mmap(ld, &p);
1055}
1056
1057static int fio_ioring_register_files(struct thread_data *td)
1058{
1059 struct ioring_data *ld = td->io_ops_data;
1060 struct fio_file *f;
1061 unsigned int i;
1062 int ret;
1063
1064 ld->fds = calloc(td->o.nr_files, sizeof(int));
1065
1066 for_each_file(td, f, i) {
1067 ret = generic_open_file(td, f);
1068 if (ret)
1069 goto err;
1070 ld->fds[i] = f->fd;
1071 f->engine_pos = i;
1072 }
1073
1074 ret = syscall(__NR_io_uring_register, ld->ring_fd,
1075 IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
1076 if (ret) {
1077err:
1078 free(ld->fds);
1079 ld->fds = NULL;
1080 }
1081
1082 /*
1083 * Pretend the file is closed again, and really close it if we hit
1084 * an error.
1085 */
1086 for_each_file(td, f, i) {
1087 if (ret) {
1088 int fio_unused ret2;
1089 ret2 = generic_close_file(td, f);
1090 } else
1091 f->fd = -1;
1092 }
1093
1094 return ret;
1095}
1096
1097static int fio_ioring_post_init(struct thread_data *td)
1098{
1099 struct ioring_data *ld = td->io_ops_data;
1100 struct ioring_options *o = td->eo;
1101 struct io_u *io_u;
1102 int err, i;
1103
1104 for (i = 0; i < td->o.iodepth; i++) {
1105 struct iovec *iov = &ld->iovecs[i];
1106
1107 io_u = ld->io_u_index[i];
1108 iov->iov_base = io_u->buf;
1109 iov->iov_len = td_max_bs(td);
1110 }
1111
1112 err = fio_ioring_queue_init(td);
1113 if (err) {
1114 int init_err = errno;
1115
1116 if (init_err == ENOSYS)
1117 log_err("fio: your kernel doesn't support io_uring\n");
1118 td_verror(td, init_err, "io_queue_init");
1119 return 1;
1120 }
1121
1122 for (i = 0; i < td->o.iodepth; i++) {
1123 struct io_uring_sqe *sqe;
1124
1125 sqe = &ld->sqes[i];
1126 memset(sqe, 0, sizeof(*sqe));
1127 }
1128
1129 if (o->registerfiles) {
1130 err = fio_ioring_register_files(td);
1131 if (err) {
1132 td_verror(td, errno, "ioring_register_files");
1133 return 1;
1134 }
1135 }
1136
1137 return 0;
1138}
1139
1140static int fio_ioring_cmd_post_init(struct thread_data *td)
1141{
1142 struct ioring_data *ld = td->io_ops_data;
1143 struct ioring_options *o = td->eo;
1144 struct io_u *io_u;
1145 int err, i;
1146
1147 for (i = 0; i < td->o.iodepth; i++) {
1148 struct iovec *iov = &ld->iovecs[i];
1149
1150 io_u = ld->io_u_index[i];
1151 iov->iov_base = io_u->buf;
1152 iov->iov_len = td_max_bs(td);
1153 }
1154
1155 err = fio_ioring_cmd_queue_init(td);
1156 if (err) {
1157 int init_err = errno;
1158
1159 td_verror(td, init_err, "io_queue_init");
1160 return 1;
1161 }
1162
1163 for (i = 0; i < td->o.iodepth; i++) {
1164 struct io_uring_sqe *sqe;
1165
1166 if (o->cmd_type == FIO_URING_CMD_NVME) {
1167 sqe = &ld->sqes[i << 1];
1168 memset(sqe, 0, 2 * sizeof(*sqe));
1169 } else {
1170 sqe = &ld->sqes[i];
1171 memset(sqe, 0, sizeof(*sqe));
1172 }
1173 }
1174
1175 if (o->registerfiles) {
1176 err = fio_ioring_register_files(td);
1177 if (err) {
1178 td_verror(td, errno, "ioring_register_files");
1179 return 1;
1180 }
1181 }
1182
1183 return 0;
1184}
1185
1186static void parse_prchk_flags(struct ioring_options *o)
1187{
1188 if (!o->pi_chk)
1189 return;
1190
1191 if (strstr(o->pi_chk, "GUARD") != NULL)
1192 o->prchk = NVME_IO_PRINFO_PRCHK_GUARD;
1193 if (strstr(o->pi_chk, "REFTAG") != NULL)
1194 o->prchk |= NVME_IO_PRINFO_PRCHK_REF;
1195 if (strstr(o->pi_chk, "APPTAG") != NULL)
1196 o->prchk |= NVME_IO_PRINFO_PRCHK_APP;
1197}
1198
1199static int fio_ioring_init(struct thread_data *td)
1200{
1201 struct ioring_options *o = td->eo;
1202 struct ioring_data *ld;
1203 struct nvme_dsm *dsm;
1204 void *ptr;
1205 unsigned int dsm_size;
1206 unsigned long long md_size;
1207 int ret, i;
1208
1209 /* sqthread submission requires registered files */
1210 if (o->sqpoll_thread)
1211 o->registerfiles = 1;
1212
1213 if (o->registerfiles && td->o.nr_files != td->o.open_files) {
1214 log_err("fio: io_uring registered files require nr_files to "
1215 "be identical to open_files\n");
1216 return 1;
1217 }
1218
1219 ld = calloc(1, sizeof(*ld));
1220
1221 /* ring depth must be a power-of-2 */
1222 ld->iodepth = td->o.iodepth;
1223 td->o.iodepth = roundup_pow2(td->o.iodepth);
1224
1225 /* io_u index */
1226 ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
1227
1228 /*
1229 * metadata buffer for nvme command.
1230 * We are only supporting iomem=malloc / mem=malloc as of now.
1231 */
1232 if (!strcmp(td->io_ops->name, "io_uring_cmd") &&
1233 (o->cmd_type == FIO_URING_CMD_NVME) && o->md_per_io_size) {
1234 md_size = (unsigned long long) o->md_per_io_size
1235 * (unsigned long long) td->o.iodepth;
1236 md_size += page_mask + td->o.mem_align;
1237 if (td->o.mem_align && td->o.mem_align > page_size)
1238 md_size += td->o.mem_align - page_size;
1239 if (td->o.mem_type == MEM_MALLOC) {
1240 ld->md_buf = malloc(md_size);
1241 if (!ld->md_buf) {
1242 free(ld);
1243 return 1;
1244 }
1245 } else {
1246 log_err("fio: Only iomem=malloc or mem=malloc is supported\n");
1247 free(ld);
1248 return 1;
1249 }
1250 }
1251 parse_prchk_flags(o);
1252
1253 ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
1254
1255 td->io_ops_data = ld;
1256
1257 ret = fio_cmdprio_init(td, &ld->cmdprio, &o->cmdprio_options);
1258 if (ret) {
1259 td_verror(td, EINVAL, "fio_ioring_init");
1260 return 1;
1261 }
1262
1263 /*
1264 * For io_uring_cmd, trims are async operations unless we are operating
1265 * in zbd mode where trim means zone reset.
1266 */
1267 if (!strcmp(td->io_ops->name, "io_uring_cmd") && td_trim(td) &&
1268 td->o.zone_mode == ZONE_MODE_ZBD) {
1269 td->io_ops->flags |= FIO_ASYNCIO_SYNC_TRIM;
1270 } else {
1271 dsm_size = sizeof(*ld->dsm) +
1272 td->o.num_range * sizeof(struct nvme_dsm_range);
1273 ld->dsm = calloc(td->o.iodepth, dsm_size);
1274 ptr = ld->dsm;
1275 for (i = 0; i < td->o.iodepth; i++) {
1276 dsm = (struct nvme_dsm *)ptr;
1277 dsm->nr_ranges = td->o.num_range;
1278 ptr += dsm_size;
1279 }
1280 }
1281
1282 if (!strcmp(td->io_ops->name, "io_uring_cmd")) {
1283 if (td_write(td)) {
1284 switch (o->write_mode) {
1285 case FIO_URING_CMD_WMODE_UNCOR:
1286 ld->write_opcode = nvme_cmd_write_uncor;
1287 break;
1288 case FIO_URING_CMD_WMODE_ZEROES:
1289 ld->write_opcode = nvme_cmd_write_zeroes;
1290 break;
1291 case FIO_URING_CMD_WMODE_VERIFY:
1292 ld->write_opcode = nvme_cmd_verify;
1293 break;
1294 default:
1295 ld->write_opcode = nvme_cmd_write;
1296 break;
1297 }
1298 }
1299
1300 if (o->readfua)
1301 ld->cdw12_flags[DDIR_READ] = 1 << 30;
1302 if (o->writefua)
1303 ld->cdw12_flags[DDIR_WRITE] = 1 << 30;
1304 }
1305
1306 return 0;
1307}
1308
1309static int fio_ioring_io_u_init(struct thread_data *td, struct io_u *io_u)
1310{
1311 struct ioring_data *ld = td->io_ops_data;
1312 struct ioring_options *o = td->eo;
1313 struct nvme_pi_data *pi_data;
1314 char *p;
1315
1316 ld->io_u_index[io_u->index] = io_u;
1317
1318 if (!strcmp(td->io_ops->name, "io_uring_cmd")) {
1319 p = PTR_ALIGN(ld->md_buf, page_mask) + td->o.mem_align;
1320 p += o->md_per_io_size * io_u->index;
1321 io_u->mmap_data = p;
1322
1323 if (!o->pi_act) {
1324 pi_data = calloc(1, sizeof(*pi_data));
1325 pi_data->io_flags |= o->prchk;
1326 pi_data->apptag_mask = o->apptag_mask;
1327 pi_data->apptag = o->apptag;
1328 io_u->engine_data = pi_data;
1329 }
1330 }
1331
1332 return 0;
1333}
1334
1335static void fio_ioring_io_u_free(struct thread_data *td, struct io_u *io_u)
1336{
1337 struct ioring_options *o = td->eo;
1338 struct nvme_pi *pi;
1339
1340 if (!strcmp(td->io_ops->name, "io_uring_cmd") &&
1341 (o->cmd_type == FIO_URING_CMD_NVME)) {
1342 pi = io_u->engine_data;
1343 free(pi);
1344 io_u->engine_data = NULL;
1345 }
1346}
1347
1348static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
1349{
1350 struct ioring_data *ld = td->io_ops_data;
1351 struct ioring_options *o = td->eo;
1352
1353 if (!ld || !o->registerfiles)
1354 return generic_open_file(td, f);
1355
1356 f->fd = ld->fds[f->engine_pos];
1357 return 0;
1358}
1359
1360static int fio_ioring_cmd_open_file(struct thread_data *td, struct fio_file *f)
1361{
1362 struct ioring_data *ld = td->io_ops_data;
1363 struct ioring_options *o = td->eo;
1364
1365 if (o->cmd_type == FIO_URING_CMD_NVME) {
1366 struct nvme_data *data = NULL;
1367 unsigned int lba_size = 0;
1368 __u64 nlba = 0;
1369 int ret;
1370
1371 /* Store the namespace-id and lba size. */
1372 data = FILE_ENG_DATA(f);
1373 if (data == NULL) {
1374 data = calloc(1, sizeof(struct nvme_data));
1375 ret = fio_nvme_get_info(f, &nlba, o->pi_act, data);
1376 if (ret) {
1377 free(data);
1378 return ret;
1379 }
1380
1381 FILE_SET_ENG_DATA(f, data);
1382 }
1383
1384 lba_size = data->lba_ext ? data->lba_ext : data->lba_size;
1385
1386 for_each_rw_ddir(ddir) {
1387 if (td->o.min_bs[ddir] % lba_size || td->o.max_bs[ddir] % lba_size) {
1388 if (data->lba_ext) {
1389 log_err("%s: block size must be a multiple of %u "
1390 "(LBA data size + Metadata size)\n", f->file_name, lba_size);
1391 if (td->o.min_bs[ddir] == td->o.max_bs[ddir] &&
1392 !(td->o.min_bs[ddir] % data->lba_size)) {
1393 /* fixed block size is actually a multiple of LBA data size */
1394 unsigned long long suggestion = lba_size *
1395 (td->o.min_bs[ddir] / data->lba_size);
1396 log_err("Did you mean to use a block size of %llu?\n", suggestion);
1397 }
1398 } else {
1399 log_err("%s: block size must be a multiple of LBA data size\n",
1400 f->file_name);
1401 }
1402 td_verror(td, EINVAL, "fio_ioring_cmd_open_file");
1403 return 1;
1404 }
1405 if (data->ms && !data->lba_ext && ddir != DDIR_TRIM &&
1406 (o->md_per_io_size < ((td->o.max_bs[ddir] / data->lba_size) *
1407 data->ms))) {
1408 log_err("%s: md_per_io_size should be at least %llu bytes\n",
1409 f->file_name,
1410 ((td->o.max_bs[ddir] / data->lba_size) * data->ms));
1411 td_verror(td, EINVAL, "fio_ioring_cmd_open_file");
1412 return 1;
1413 }
1414 }
1415
1416 /*
1417 * For extended logical block sizes we cannot use verify when
1418 * end to end data protection checks are enabled, as the PI
1419 * section of data buffer conflicts with verify.
1420 */
1421 if (data->ms && data->pi_type && data->lba_ext &&
1422 td->o.verify != VERIFY_NONE) {
1423 log_err("%s: for extended LBA, verify cannot be used when E2E data protection is enabled\n",
1424 f->file_name);
1425 td_verror(td, EINVAL, "fio_ioring_cmd_open_file");
1426 return 1;
1427 }
1428
1429 if (o->write_mode != FIO_URING_CMD_WMODE_WRITE &&
1430 !td_write(td)) {
1431 log_err("%s: 'readwrite=|rw=' has no write\n",
1432 f->file_name);
1433 td_verror(td, EINVAL, "fio_ioring_cmd_open_file");
1434 return 1;
1435 }
1436 }
1437 if (!ld || !o->registerfiles)
1438 return generic_open_file(td, f);
1439
1440 f->fd = ld->fds[f->engine_pos];
1441 return 0;
1442}
1443
1444static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f)
1445{
1446 struct ioring_data *ld = td->io_ops_data;
1447 struct ioring_options *o = td->eo;
1448
1449 if (!ld || !o->registerfiles)
1450 return generic_close_file(td, f);
1451
1452 f->fd = -1;
1453 return 0;
1454}
1455
1456static int fio_ioring_cmd_close_file(struct thread_data *td,
1457 struct fio_file *f)
1458{
1459 struct ioring_data *ld = td->io_ops_data;
1460 struct ioring_options *o = td->eo;
1461
1462 if (o->cmd_type == FIO_URING_CMD_NVME) {
1463 struct nvme_data *data = FILE_ENG_DATA(f);
1464
1465 FILE_SET_ENG_DATA(f, NULL);
1466 free(data);
1467 }
1468 if (!ld || !o->registerfiles)
1469 return generic_close_file(td, f);
1470
1471 f->fd = -1;
1472 return 0;
1473}
1474
1475static int fio_ioring_cmd_get_file_size(struct thread_data *td,
1476 struct fio_file *f)
1477{
1478 struct ioring_options *o = td->eo;
1479
1480 if (fio_file_size_known(f))
1481 return 0;
1482
1483 if (o->cmd_type == FIO_URING_CMD_NVME) {
1484 struct nvme_data *data = NULL;
1485 __u64 nlba = 0;
1486 int ret;
1487
1488 data = calloc(1, sizeof(struct nvme_data));
1489 ret = fio_nvme_get_info(f, &nlba, o->pi_act, data);
1490 if (ret) {
1491 free(data);
1492 return ret;
1493 }
1494
1495 f->real_file_size = data->lba_size * nlba;
1496 fio_file_set_size_known(f);
1497
1498 FILE_SET_ENG_DATA(f, data);
1499 return 0;
1500 }
1501 return generic_get_file_size(td, f);
1502}
1503
1504static int fio_ioring_cmd_get_zoned_model(struct thread_data *td,
1505 struct fio_file *f,
1506 enum zbd_zoned_model *model)
1507{
1508 return fio_nvme_get_zoned_model(td, f, model);
1509}
1510
1511static int fio_ioring_cmd_report_zones(struct thread_data *td,
1512 struct fio_file *f, uint64_t offset,
1513 struct zbd_zone *zbdz,
1514 unsigned int nr_zones)
1515{
1516 return fio_nvme_report_zones(td, f, offset, zbdz, nr_zones);
1517}
1518
1519static int fio_ioring_cmd_reset_wp(struct thread_data *td, struct fio_file *f,
1520 uint64_t offset, uint64_t length)
1521{
1522 return fio_nvme_reset_wp(td, f, offset, length);
1523}
1524
1525static int fio_ioring_cmd_get_max_open_zones(struct thread_data *td,
1526 struct fio_file *f,
1527 unsigned int *max_open_zones)
1528{
1529 return fio_nvme_get_max_open_zones(td, f, max_open_zones);
1530}
1531
1532static int fio_ioring_cmd_fetch_ruhs(struct thread_data *td, struct fio_file *f,
1533 struct fio_ruhs_info *fruhs_info)
1534{
1535 struct nvme_fdp_ruh_status *ruhs;
1536 int bytes, ret, i;
1537
1538 bytes = sizeof(*ruhs) + FDP_MAX_RUHS * sizeof(struct nvme_fdp_ruh_status_desc);
1539 ruhs = scalloc(1, bytes);
1540 if (!ruhs)
1541 return -ENOMEM;
1542
1543 ret = fio_nvme_iomgmt_ruhs(td, f, ruhs, bytes);
1544 if (ret)
1545 goto free;
1546
1547 fruhs_info->nr_ruhs = le16_to_cpu(ruhs->nruhsd);
1548 for (i = 0; i < fruhs_info->nr_ruhs; i++)
1549 fruhs_info->plis[i] = le16_to_cpu(ruhs->ruhss[i].pid);
1550free:
1551 sfree(ruhs);
1552 return ret;
1553}
1554
1555static struct ioengine_ops ioengine_uring = {
1556 .name = "io_uring",
1557 .version = FIO_IOOPS_VERSION,
1558 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
1559 FIO_ASYNCIO_SETS_ISSUE_TIME,
1560 .init = fio_ioring_init,
1561 .post_init = fio_ioring_post_init,
1562 .io_u_init = fio_ioring_io_u_init,
1563 .prep = fio_ioring_prep,
1564 .queue = fio_ioring_queue,
1565 .commit = fio_ioring_commit,
1566 .getevents = fio_ioring_getevents,
1567 .event = fio_ioring_event,
1568 .cleanup = fio_ioring_cleanup,
1569 .open_file = fio_ioring_open_file,
1570 .close_file = fio_ioring_close_file,
1571 .get_file_size = generic_get_file_size,
1572 .options = options,
1573 .option_struct_size = sizeof(struct ioring_options),
1574};
1575
1576static struct ioengine_ops ioengine_uring_cmd = {
1577 .name = "io_uring_cmd",
1578 .version = FIO_IOOPS_VERSION,
1579 .flags = FIO_NO_OFFLOAD | FIO_MEMALIGN | FIO_RAWIO |
1580 FIO_ASYNCIO_SETS_ISSUE_TIME |
1581 FIO_MULTI_RANGE_TRIM,
1582 .init = fio_ioring_init,
1583 .post_init = fio_ioring_cmd_post_init,
1584 .io_u_init = fio_ioring_io_u_init,
1585 .io_u_free = fio_ioring_io_u_free,
1586 .prep = fio_ioring_cmd_prep,
1587 .queue = fio_ioring_queue,
1588 .commit = fio_ioring_commit,
1589 .getevents = fio_ioring_getevents,
1590 .event = fio_ioring_cmd_event,
1591 .cleanup = fio_ioring_cleanup,
1592 .open_file = fio_ioring_cmd_open_file,
1593 .close_file = fio_ioring_cmd_close_file,
1594 .get_file_size = fio_ioring_cmd_get_file_size,
1595 .get_zoned_model = fio_ioring_cmd_get_zoned_model,
1596 .report_zones = fio_ioring_cmd_report_zones,
1597 .reset_wp = fio_ioring_cmd_reset_wp,
1598 .get_max_open_zones = fio_ioring_cmd_get_max_open_zones,
1599 .options = options,
1600 .option_struct_size = sizeof(struct ioring_options),
1601 .fdp_fetch_ruhs = fio_ioring_cmd_fetch_ruhs,
1602};
1603
1604static void fio_init fio_ioring_register(void)
1605{
1606 register_ioengine(&ioengine_uring);
1607 register_ioengine(&ioengine_uring_cmd);
1608}
1609
1610static void fio_exit fio_ioring_unregister(void)
1611{
1612 unregister_ioengine(&ioengine_uring);
1613 unregister_ioengine(&ioengine_uring_cmd);
1614}
1615#endif