Kill off old GUASI IO engine
[fio.git] / engines / io_uring.c
... / ...
CommitLineData
1/*
2 * io_uring engine
3 *
4 * IO engine using the new native Linux aio io_uring interface. See:
5 *
6 * http://git.kernel.dk/cgit/linux-block/log/?h=io_uring
7 *
8 */
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <sys/time.h>
13#include <sys/resource.h>
14
15#include "../fio.h"
16#include "../lib/pow2.h"
17#include "../optgroup.h"
18#include "../lib/memalign.h"
19#include "../lib/fls.h"
20#include "../lib/roundup.h"
21
22#ifdef ARCH_HAVE_IOURING
23
24#include "../lib/types.h"
25#include "../os/linux/io_uring.h"
26
27struct io_sq_ring {
28 unsigned *head;
29 unsigned *tail;
30 unsigned *ring_mask;
31 unsigned *ring_entries;
32 unsigned *flags;
33 unsigned *array;
34};
35
36struct io_cq_ring {
37 unsigned *head;
38 unsigned *tail;
39 unsigned *ring_mask;
40 unsigned *ring_entries;
41 struct io_uring_cqe *cqes;
42};
43
44struct ioring_mmap {
45 void *ptr;
46 size_t len;
47};
48
49struct ioring_data {
50 int ring_fd;
51
52 struct io_u **io_u_index;
53
54 int *fds;
55
56 struct io_sq_ring sq_ring;
57 struct io_uring_sqe *sqes;
58 struct iovec *iovecs;
59 unsigned sq_ring_mask;
60
61 struct io_cq_ring cq_ring;
62 unsigned cq_ring_mask;
63
64 int queued;
65 int cq_ring_off;
66 unsigned iodepth;
67 bool ioprio_class_set;
68 bool ioprio_set;
69
70 struct ioring_mmap mmap[3];
71};
72
73struct ioring_options {
74 void *pad;
75 unsigned int hipri;
76 unsigned int cmdprio_percentage;
77 unsigned int fixedbufs;
78 unsigned int registerfiles;
79 unsigned int sqpoll_thread;
80 unsigned int sqpoll_set;
81 unsigned int sqpoll_cpu;
82 unsigned int nonvectored;
83 unsigned int uncached;
84 unsigned int nowait;
85};
86
87static const int ddir_to_op[2][2] = {
88 { IORING_OP_READV, IORING_OP_READ },
89 { IORING_OP_WRITEV, IORING_OP_WRITE }
90};
91
92static const int fixed_ddir_to_op[2] = {
93 IORING_OP_READ_FIXED,
94 IORING_OP_WRITE_FIXED
95};
96
97static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
98{
99 struct ioring_options *o = data;
100
101 o->sqpoll_cpu = *val;
102 o->sqpoll_set = 1;
103 return 0;
104}
105
106static struct fio_option options[] = {
107 {
108 .name = "hipri",
109 .lname = "High Priority",
110 .type = FIO_OPT_STR_SET,
111 .off1 = offsetof(struct ioring_options, hipri),
112 .help = "Use polled IO completions",
113 .category = FIO_OPT_C_ENGINE,
114 .group = FIO_OPT_G_IOURING,
115 },
116#ifdef FIO_HAVE_IOPRIO_CLASS
117 {
118 .name = "cmdprio_percentage",
119 .lname = "high priority percentage",
120 .type = FIO_OPT_INT,
121 .off1 = offsetof(struct ioring_options, cmdprio_percentage),
122 .minval = 1,
123 .maxval = 100,
124 .help = "Send high priority I/O this percentage of the time",
125 .category = FIO_OPT_C_ENGINE,
126 .group = FIO_OPT_G_IOURING,
127 },
128#else
129 {
130 .name = "cmdprio_percentage",
131 .lname = "high priority percentage",
132 .type = FIO_OPT_UNSUPPORTED,
133 .help = "Your platform does not support I/O priority classes",
134 },
135#endif
136 {
137 .name = "fixedbufs",
138 .lname = "Fixed (pre-mapped) IO buffers",
139 .type = FIO_OPT_STR_SET,
140 .off1 = offsetof(struct ioring_options, fixedbufs),
141 .help = "Pre map IO buffers",
142 .category = FIO_OPT_C_ENGINE,
143 .group = FIO_OPT_G_IOURING,
144 },
145 {
146 .name = "registerfiles",
147 .lname = "Register file set",
148 .type = FIO_OPT_STR_SET,
149 .off1 = offsetof(struct ioring_options, registerfiles),
150 .help = "Pre-open/register files",
151 .category = FIO_OPT_C_ENGINE,
152 .group = FIO_OPT_G_IOURING,
153 },
154 {
155 .name = "sqthread_poll",
156 .lname = "Kernel SQ thread polling",
157 .type = FIO_OPT_INT,
158 .off1 = offsetof(struct ioring_options, sqpoll_thread),
159 .help = "Offload submission/completion to kernel thread",
160 .category = FIO_OPT_C_ENGINE,
161 .group = FIO_OPT_G_IOURING,
162 },
163 {
164 .name = "sqthread_poll_cpu",
165 .lname = "SQ Thread Poll CPU",
166 .type = FIO_OPT_INT,
167 .cb = fio_ioring_sqpoll_cb,
168 .help = "What CPU to run SQ thread polling on",
169 .category = FIO_OPT_C_ENGINE,
170 .group = FIO_OPT_G_IOURING,
171 },
172 {
173 .name = "nonvectored",
174 .lname = "Non-vectored",
175 .type = FIO_OPT_INT,
176 .off1 = offsetof(struct ioring_options, nonvectored),
177 .def = "-1",
178 .help = "Use non-vectored read/write commands",
179 .category = FIO_OPT_C_ENGINE,
180 .group = FIO_OPT_G_IOURING,
181 },
182 {
183 .name = "uncached",
184 .lname = "Uncached",
185 .type = FIO_OPT_INT,
186 .off1 = offsetof(struct ioring_options, uncached),
187 .help = "Use RWF_UNCACHED for buffered read/writes",
188 .category = FIO_OPT_C_ENGINE,
189 .group = FIO_OPT_G_IOURING,
190 },
191 {
192 .name = "nowait",
193 .lname = "RWF_NOWAIT",
194 .type = FIO_OPT_BOOL,
195 .off1 = offsetof(struct ioring_options, nowait),
196 .help = "Use RWF_NOWAIT for reads/writes",
197 .category = FIO_OPT_C_ENGINE,
198 .group = FIO_OPT_G_IOURING,
199 },
200 {
201 .name = NULL,
202 },
203};
204
205static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit,
206 unsigned int min_complete, unsigned int flags)
207{
208 return syscall(__NR_io_uring_enter, ld->ring_fd, to_submit,
209 min_complete, flags, NULL, 0);
210}
211
212static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
213{
214 struct ioring_data *ld = td->io_ops_data;
215 struct ioring_options *o = td->eo;
216 struct fio_file *f = io_u->file;
217 struct io_uring_sqe *sqe;
218
219 sqe = &ld->sqes[io_u->index];
220
221 if (o->registerfiles) {
222 sqe->fd = f->engine_pos;
223 sqe->flags = IOSQE_FIXED_FILE;
224 } else {
225 sqe->fd = f->fd;
226 }
227
228 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
229 if (o->fixedbufs) {
230 sqe->opcode = fixed_ddir_to_op[io_u->ddir];
231 sqe->addr = (unsigned long) io_u->xfer_buf;
232 sqe->len = io_u->xfer_buflen;
233 sqe->buf_index = io_u->index;
234 } else {
235 struct iovec *iov = &ld->iovecs[io_u->index];
236
237 /*
238 * Update based on actual io_u, requeue could have
239 * adjusted these
240 */
241 iov->iov_base = io_u->xfer_buf;
242 iov->iov_len = io_u->xfer_buflen;
243
244 sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
245 if (o->nonvectored) {
246 sqe->addr = (unsigned long) iov->iov_base;
247 sqe->len = iov->iov_len;
248 } else {
249 sqe->addr = (unsigned long) iov;
250 sqe->len = 1;
251 }
252 }
253 if (!td->o.odirect && o->uncached)
254 sqe->rw_flags = RWF_UNCACHED;
255 if (o->nowait)
256 sqe->rw_flags |= RWF_NOWAIT;
257 if (ld->ioprio_class_set)
258 sqe->ioprio = td->o.ioprio_class << 13;
259 if (ld->ioprio_set)
260 sqe->ioprio |= td->o.ioprio;
261 sqe->off = io_u->offset;
262 sqe->rw_flags = 0;
263 } else if (ddir_sync(io_u->ddir)) {
264 sqe->ioprio = 0;
265 if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
266 sqe->off = f->first_write;
267 sqe->len = f->last_write - f->first_write;
268 sqe->sync_range_flags = td->o.sync_file_range;
269 sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
270 } else {
271 sqe->off = 0;
272 sqe->addr = 0;
273 sqe->len = 0;
274 if (io_u->ddir == DDIR_DATASYNC)
275 sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
276 sqe->opcode = IORING_OP_FSYNC;
277 }
278 }
279
280 sqe->user_data = (unsigned long) io_u;
281 return 0;
282}
283
284static struct io_u *fio_ioring_event(struct thread_data *td, int event)
285{
286 struct ioring_data *ld = td->io_ops_data;
287 struct io_uring_cqe *cqe;
288 struct io_u *io_u;
289 unsigned index;
290
291 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
292
293 cqe = &ld->cq_ring.cqes[index];
294 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
295
296 if (cqe->res != io_u->xfer_buflen) {
297 if (cqe->res > io_u->xfer_buflen)
298 io_u->error = -cqe->res;
299 else
300 io_u->resid = io_u->xfer_buflen - cqe->res;
301 } else
302 io_u->error = 0;
303
304 return io_u;
305}
306
307static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events,
308 unsigned int max)
309{
310 struct ioring_data *ld = td->io_ops_data;
311 struct io_cq_ring *ring = &ld->cq_ring;
312 unsigned head, reaped = 0;
313
314 head = *ring->head;
315 do {
316 if (head == atomic_load_acquire(ring->tail))
317 break;
318 reaped++;
319 head++;
320 } while (reaped + events < max);
321
322 if (reaped)
323 atomic_store_release(ring->head, head);
324
325 return reaped;
326}
327
328static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
329 unsigned int max, const struct timespec *t)
330{
331 struct ioring_data *ld = td->io_ops_data;
332 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
333 struct ioring_options *o = td->eo;
334 struct io_cq_ring *ring = &ld->cq_ring;
335 unsigned events = 0;
336 int r;
337
338 ld->cq_ring_off = *ring->head;
339 do {
340 r = fio_ioring_cqring_reap(td, events, max);
341 if (r) {
342 events += r;
343 if (actual_min != 0)
344 actual_min -= r;
345 continue;
346 }
347
348 if (!o->sqpoll_thread) {
349 r = io_uring_enter(ld, 0, actual_min,
350 IORING_ENTER_GETEVENTS);
351 if (r < 0) {
352 if (errno == EAGAIN || errno == EINTR)
353 continue;
354 td_verror(td, errno, "io_uring_enter");
355 break;
356 }
357 }
358 } while (events < min);
359
360 return r < 0 ? r : events;
361}
362
363static void fio_ioring_prio_prep(struct thread_data *td, struct io_u *io_u)
364{
365 struct ioring_options *o = td->eo;
366 struct ioring_data *ld = td->io_ops_data;
367 if (rand_between(&td->prio_state, 0, 99) < o->cmdprio_percentage) {
368 ld->sqes[io_u->index].ioprio = IOPRIO_CLASS_RT << IOPRIO_CLASS_SHIFT;
369 io_u->flags |= IO_U_F_PRIORITY;
370 }
371 return;
372}
373
374static enum fio_q_status fio_ioring_queue(struct thread_data *td,
375 struct io_u *io_u)
376{
377 struct ioring_data *ld = td->io_ops_data;
378 struct io_sq_ring *ring = &ld->sq_ring;
379 struct ioring_options *o = td->eo;
380 unsigned tail, next_tail;
381
382 fio_ro_check(td, io_u);
383
384 if (ld->queued == ld->iodepth)
385 return FIO_Q_BUSY;
386
387 if (io_u->ddir == DDIR_TRIM) {
388 if (ld->queued)
389 return FIO_Q_BUSY;
390
391 do_io_u_trim(td, io_u);
392 io_u_mark_submit(td, 1);
393 io_u_mark_complete(td, 1);
394 return FIO_Q_COMPLETED;
395 }
396
397 tail = *ring->tail;
398 next_tail = tail + 1;
399 if (next_tail == atomic_load_acquire(ring->head))
400 return FIO_Q_BUSY;
401
402 if (o->cmdprio_percentage)
403 fio_ioring_prio_prep(td, io_u);
404 ring->array[tail & ld->sq_ring_mask] = io_u->index;
405 atomic_store_release(ring->tail, next_tail);
406
407 ld->queued++;
408 return FIO_Q_QUEUED;
409}
410
411static void fio_ioring_queued(struct thread_data *td, int start, int nr)
412{
413 struct ioring_data *ld = td->io_ops_data;
414 struct timespec now;
415
416 if (!fio_fill_issue_time(td))
417 return;
418
419 fio_gettime(&now, NULL);
420
421 while (nr--) {
422 struct io_sq_ring *ring = &ld->sq_ring;
423 int index = ring->array[start & ld->sq_ring_mask];
424 struct io_u *io_u = ld->io_u_index[index];
425
426 memcpy(&io_u->issue_time, &now, sizeof(now));
427 io_u_queued(td, io_u);
428
429 start++;
430 }
431}
432
433static int fio_ioring_commit(struct thread_data *td)
434{
435 struct ioring_data *ld = td->io_ops_data;
436 struct ioring_options *o = td->eo;
437 int ret;
438
439 if (!ld->queued)
440 return 0;
441
442 /*
443 * Kernel side does submission. just need to check if the ring is
444 * flagged as needing a kick, if so, call io_uring_enter(). This
445 * only happens if we've been idle too long.
446 */
447 if (o->sqpoll_thread) {
448 struct io_sq_ring *ring = &ld->sq_ring;
449 unsigned flags;
450
451 flags = atomic_load_acquire(ring->flags);
452 if (flags & IORING_SQ_NEED_WAKEUP)
453 io_uring_enter(ld, ld->queued, 0,
454 IORING_ENTER_SQ_WAKEUP);
455 ld->queued = 0;
456 return 0;
457 }
458
459 do {
460 unsigned start = *ld->sq_ring.head;
461 long nr = ld->queued;
462
463 ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS);
464 if (ret > 0) {
465 fio_ioring_queued(td, start, ret);
466 io_u_mark_submit(td, ret);
467
468 ld->queued -= ret;
469 ret = 0;
470 } else if (!ret) {
471 io_u_mark_submit(td, ret);
472 continue;
473 } else {
474 if (errno == EAGAIN || errno == EINTR) {
475 ret = fio_ioring_cqring_reap(td, 0, ld->queued);
476 if (ret)
477 continue;
478 /* Shouldn't happen */
479 usleep(1);
480 continue;
481 }
482 td_verror(td, errno, "io_uring_enter submit");
483 break;
484 }
485 } while (ld->queued);
486
487 return ret;
488}
489
490static void fio_ioring_unmap(struct ioring_data *ld)
491{
492 int i;
493
494 for (i = 0; i < ARRAY_SIZE(ld->mmap); i++)
495 munmap(ld->mmap[i].ptr, ld->mmap[i].len);
496 close(ld->ring_fd);
497}
498
499static void fio_ioring_cleanup(struct thread_data *td)
500{
501 struct ioring_data *ld = td->io_ops_data;
502
503 if (ld) {
504 if (!(td->flags & TD_F_CHILD))
505 fio_ioring_unmap(ld);
506
507 free(ld->io_u_index);
508 free(ld->iovecs);
509 free(ld->fds);
510 free(ld);
511 }
512}
513
514static int fio_ioring_mmap(struct ioring_data *ld, struct io_uring_params *p)
515{
516 struct io_sq_ring *sring = &ld->sq_ring;
517 struct io_cq_ring *cring = &ld->cq_ring;
518 void *ptr;
519
520 ld->mmap[0].len = p->sq_off.array + p->sq_entries * sizeof(__u32);
521 ptr = mmap(0, ld->mmap[0].len, PROT_READ | PROT_WRITE,
522 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
523 IORING_OFF_SQ_RING);
524 ld->mmap[0].ptr = ptr;
525 sring->head = ptr + p->sq_off.head;
526 sring->tail = ptr + p->sq_off.tail;
527 sring->ring_mask = ptr + p->sq_off.ring_mask;
528 sring->ring_entries = ptr + p->sq_off.ring_entries;
529 sring->flags = ptr + p->sq_off.flags;
530 sring->array = ptr + p->sq_off.array;
531 ld->sq_ring_mask = *sring->ring_mask;
532
533 ld->mmap[1].len = p->sq_entries * sizeof(struct io_uring_sqe);
534 ld->sqes = mmap(0, ld->mmap[1].len, PROT_READ | PROT_WRITE,
535 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
536 IORING_OFF_SQES);
537 ld->mmap[1].ptr = ld->sqes;
538
539 ld->mmap[2].len = p->cq_off.cqes +
540 p->cq_entries * sizeof(struct io_uring_cqe);
541 ptr = mmap(0, ld->mmap[2].len, PROT_READ | PROT_WRITE,
542 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
543 IORING_OFF_CQ_RING);
544 ld->mmap[2].ptr = ptr;
545 cring->head = ptr + p->cq_off.head;
546 cring->tail = ptr + p->cq_off.tail;
547 cring->ring_mask = ptr + p->cq_off.ring_mask;
548 cring->ring_entries = ptr + p->cq_off.ring_entries;
549 cring->cqes = ptr + p->cq_off.cqes;
550 ld->cq_ring_mask = *cring->ring_mask;
551 return 0;
552}
553
554static void fio_ioring_probe(struct thread_data *td)
555{
556 struct ioring_data *ld = td->io_ops_data;
557 struct ioring_options *o = td->eo;
558 struct io_uring_probe *p;
559 int ret;
560
561 /* already set by user, don't touch */
562 if (o->nonvectored != -1)
563 return;
564
565 /* default to off, as that's always safe */
566 o->nonvectored = 0;
567
568 p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
569 if (!p)
570 return;
571
572 memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
573 ret = syscall(__NR_io_uring_register, ld->ring_fd,
574 IORING_REGISTER_PROBE, p, 256);
575 if (ret < 0)
576 goto out;
577
578 if (IORING_OP_WRITE > p->ops_len)
579 goto out;
580
581 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED) &&
582 (p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED))
583 o->nonvectored = 1;
584out:
585 free(p);
586}
587
588static int fio_ioring_queue_init(struct thread_data *td)
589{
590 struct ioring_data *ld = td->io_ops_data;
591 struct ioring_options *o = td->eo;
592 int depth = td->o.iodepth;
593 struct io_uring_params p;
594 int ret;
595
596 memset(&p, 0, sizeof(p));
597
598 if (o->hipri)
599 p.flags |= IORING_SETUP_IOPOLL;
600 if (o->sqpoll_thread) {
601 p.flags |= IORING_SETUP_SQPOLL;
602 if (o->sqpoll_set) {
603 p.flags |= IORING_SETUP_SQ_AFF;
604 p.sq_thread_cpu = o->sqpoll_cpu;
605 }
606 }
607
608 ret = syscall(__NR_io_uring_setup, depth, &p);
609 if (ret < 0)
610 return ret;
611
612 ld->ring_fd = ret;
613
614 fio_ioring_probe(td);
615
616 if (o->fixedbufs) {
617 ret = syscall(__NR_io_uring_register, ld->ring_fd,
618 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
619 if (ret < 0)
620 return ret;
621 }
622
623 return fio_ioring_mmap(ld, &p);
624}
625
626static int fio_ioring_register_files(struct thread_data *td)
627{
628 struct ioring_data *ld = td->io_ops_data;
629 struct fio_file *f;
630 unsigned int i;
631 int ret;
632
633 ld->fds = calloc(td->o.nr_files, sizeof(int));
634
635 for_each_file(td, f, i) {
636 ret = generic_open_file(td, f);
637 if (ret)
638 goto err;
639 ld->fds[i] = f->fd;
640 f->engine_pos = i;
641 }
642
643 ret = syscall(__NR_io_uring_register, ld->ring_fd,
644 IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
645 if (ret) {
646err:
647 free(ld->fds);
648 ld->fds = NULL;
649 }
650
651 /*
652 * Pretend the file is closed again, and really close it if we hit
653 * an error.
654 */
655 for_each_file(td, f, i) {
656 if (ret) {
657 int fio_unused ret2;
658 ret2 = generic_close_file(td, f);
659 } else
660 f->fd = -1;
661 }
662
663 return ret;
664}
665
666static int fio_ioring_post_init(struct thread_data *td)
667{
668 struct ioring_data *ld = td->io_ops_data;
669 struct ioring_options *o = td->eo;
670 struct io_u *io_u;
671 int err, i;
672
673 for (i = 0; i < td->o.iodepth; i++) {
674 struct iovec *iov = &ld->iovecs[i];
675
676 io_u = ld->io_u_index[i];
677 iov->iov_base = io_u->buf;
678 iov->iov_len = td_max_bs(td);
679 }
680
681 err = fio_ioring_queue_init(td);
682 if (err) {
683 td_verror(td, errno, "io_queue_init");
684 return 1;
685 }
686
687 for (i = 0; i < td->o.iodepth; i++) {
688 struct io_uring_sqe *sqe;
689
690 sqe = &ld->sqes[i];
691 memset(sqe, 0, sizeof(*sqe));
692 }
693
694 if (o->registerfiles) {
695 err = fio_ioring_register_files(td);
696 if (err) {
697 td_verror(td, errno, "ioring_register_files");
698 return 1;
699 }
700 }
701
702 return 0;
703}
704
705static int fio_ioring_init(struct thread_data *td)
706{
707 struct ioring_options *o = td->eo;
708 struct ioring_data *ld;
709 struct thread_options *to = &td->o;
710
711 /* sqthread submission requires registered files */
712 if (o->sqpoll_thread)
713 o->registerfiles = 1;
714
715 if (o->registerfiles && td->o.nr_files != td->o.open_files) {
716 log_err("fio: io_uring registered files require nr_files to "
717 "be identical to open_files\n");
718 return 1;
719 }
720
721 ld = calloc(1, sizeof(*ld));
722
723 /* ring depth must be a power-of-2 */
724 ld->iodepth = td->o.iodepth;
725 td->o.iodepth = roundup_pow2(td->o.iodepth);
726
727 /* io_u index */
728 ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
729 ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
730
731 td->io_ops_data = ld;
732
733 /*
734 * Check for option conflicts
735 */
736 if ((fio_option_is_set(to, ioprio) || fio_option_is_set(to, ioprio_class)) &&
737 o->cmdprio_percentage != 0) {
738 log_err("%s: cmdprio_percentage option and mutually exclusive "
739 "prio or prioclass option is set, exiting\n", to->name);
740 td_verror(td, EINVAL, "fio_io_uring_init");
741 return 1;
742 }
743
744 if (fio_option_is_set(&td->o, ioprio_class))
745 ld->ioprio_class_set = true;
746 if (fio_option_is_set(&td->o, ioprio))
747 ld->ioprio_set = true;
748
749 return 0;
750}
751
752static int fio_ioring_io_u_init(struct thread_data *td, struct io_u *io_u)
753{
754 struct ioring_data *ld = td->io_ops_data;
755
756 ld->io_u_index[io_u->index] = io_u;
757 return 0;
758}
759
760static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
761{
762 struct ioring_data *ld = td->io_ops_data;
763 struct ioring_options *o = td->eo;
764
765 if (!ld || !o->registerfiles)
766 return generic_open_file(td, f);
767
768 f->fd = ld->fds[f->engine_pos];
769 return 0;
770}
771
772static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f)
773{
774 struct ioring_data *ld = td->io_ops_data;
775 struct ioring_options *o = td->eo;
776
777 if (!ld || !o->registerfiles)
778 return generic_close_file(td, f);
779
780 f->fd = -1;
781 return 0;
782}
783
784static struct ioengine_ops ioengine = {
785 .name = "io_uring",
786 .version = FIO_IOOPS_VERSION,
787 .flags = FIO_ASYNCIO_SYNC_TRIM,
788 .init = fio_ioring_init,
789 .post_init = fio_ioring_post_init,
790 .io_u_init = fio_ioring_io_u_init,
791 .prep = fio_ioring_prep,
792 .queue = fio_ioring_queue,
793 .commit = fio_ioring_commit,
794 .getevents = fio_ioring_getevents,
795 .event = fio_ioring_event,
796 .cleanup = fio_ioring_cleanup,
797 .open_file = fio_ioring_open_file,
798 .close_file = fio_ioring_close_file,
799 .get_file_size = generic_get_file_size,
800 .options = options,
801 .option_struct_size = sizeof(struct ioring_options),
802};
803
804static void fio_init fio_ioring_register(void)
805{
806 register_ioengine(&ioengine);
807}
808
809static void fio_exit fio_ioring_unregister(void)
810{
811 unregister_ioengine(&ioengine);
812}
813#endif