io_uring: add option for non-vectored read/write commands
[fio.git] / engines / io_uring.c
CommitLineData
52885fa2 1/*
bffad86f 2 * io_uring engine
52885fa2 3 *
bffad86f 4 * IO engine using the new native Linux aio io_uring interface. See:
a90cd050 5 *
bffad86f 6 * http://git.kernel.dk/cgit/linux-block/log/?h=io_uring
52885fa2
JA
7 *
8 */
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
52885fa2
JA
12#include <sys/time.h>
13#include <sys/resource.h>
14
15#include "../fio.h"
16#include "../lib/pow2.h"
17#include "../optgroup.h"
18#include "../lib/memalign.h"
b87aa01a 19#include "../lib/fls.h"
52885fa2 20
bffad86f 21#ifdef ARCH_HAVE_IOURING
52885fa2 22
57fa61f0 23#include "../lib/types.h"
f3e769a4 24#include "../os/linux/io_uring.h"
9a2d78b3 25
bffad86f 26struct io_sq_ring {
e2239016
JA
27 unsigned *head;
28 unsigned *tail;
29 unsigned *ring_mask;
30 unsigned *ring_entries;
31 unsigned *flags;
32 unsigned *array;
52885fa2
JA
33};
34
bffad86f 35struct io_cq_ring {
e2239016
JA
36 unsigned *head;
37 unsigned *tail;
38 unsigned *ring_mask;
39 unsigned *ring_entries;
f0403f94 40 struct io_uring_cqe *cqes;
9a2d78b3
JA
41};
42
bffad86f 43struct ioring_mmap {
9a2d78b3
JA
44 void *ptr;
45 size_t len;
52885fa2
JA
46};
47
bffad86f 48struct ioring_data {
9a2d78b3
JA
49 int ring_fd;
50
52885fa2
JA
51 struct io_u **io_u_index;
52
5ffd5626
JA
53 int *fds;
54
bffad86f 55 struct io_sq_ring sq_ring;
f0403f94 56 struct io_uring_sqe *sqes;
9a2d78b3 57 struct iovec *iovecs;
b87aa01a 58 unsigned sq_ring_mask;
52885fa2 59
bffad86f 60 struct io_cq_ring cq_ring;
b87aa01a 61 unsigned cq_ring_mask;
52885fa2
JA
62
63 int queued;
64 int cq_ring_off;
b87aa01a 65 unsigned iodepth;
96563db9 66
bffad86f 67 struct ioring_mmap mmap[3];
52885fa2
JA
68};
69
bffad86f 70struct ioring_options {
52885fa2
JA
71 void *pad;
72 unsigned int hipri;
73 unsigned int fixedbufs;
5ffd5626 74 unsigned int registerfiles;
3d7d00a3 75 unsigned int sqpoll_thread;
2ea53ca3
JA
76 unsigned int sqpoll_set;
77 unsigned int sqpoll_cpu;
b10b1e70 78 unsigned int nonvectored;
4a87b584 79 unsigned int uncached;
52885fa2
JA
80};
81
b10b1e70
JA
82static const int ddir_to_op[2][2] = {
83 { IORING_OP_READV, IORING_OP_READ },
84 { IORING_OP_WRITEV, IORING_OP_WRITE }
85};
86
2ea53ca3 87static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
a90cd050 88{
bffad86f 89 struct ioring_options *o = data;
a90cd050 90
2ea53ca3
JA
91 o->sqpoll_cpu = *val;
92 o->sqpoll_set = 1;
a90cd050
JA
93 return 0;
94}
95
52885fa2
JA
96static struct fio_option options[] = {
97 {
98 .name = "hipri",
99 .lname = "High Priority",
100 .type = FIO_OPT_STR_SET,
bffad86f 101 .off1 = offsetof(struct ioring_options, hipri),
52885fa2
JA
102 .help = "Use polled IO completions",
103 .category = FIO_OPT_C_ENGINE,
27f436d9 104 .group = FIO_OPT_G_IOURING,
52885fa2
JA
105 },
106 {
107 .name = "fixedbufs",
108 .lname = "Fixed (pre-mapped) IO buffers",
109 .type = FIO_OPT_STR_SET,
bffad86f 110 .off1 = offsetof(struct ioring_options, fixedbufs),
52885fa2
JA
111 .help = "Pre map IO buffers",
112 .category = FIO_OPT_C_ENGINE,
27f436d9 113 .group = FIO_OPT_G_IOURING,
52885fa2 114 },
5ffd5626
JA
115 {
116 .name = "registerfiles",
117 .lname = "Register file set",
118 .type = FIO_OPT_STR_SET,
119 .off1 = offsetof(struct ioring_options, registerfiles),
120 .help = "Pre-open/register files",
121 .category = FIO_OPT_C_ENGINE,
27f436d9 122 .group = FIO_OPT_G_IOURING,
5ffd5626 123 },
771c9901
JA
124 {
125 .name = "sqthread_poll",
3d7d00a3
JA
126 .lname = "Kernel SQ thread polling",
127 .type = FIO_OPT_INT,
128 .off1 = offsetof(struct ioring_options, sqpoll_thread),
129 .help = "Offload submission/completion to kernel thread",
130 .category = FIO_OPT_C_ENGINE,
27f436d9 131 .group = FIO_OPT_G_IOURING,
3d7d00a3
JA
132 },
133 {
134 .name = "sqthread_poll_cpu",
135 .lname = "SQ Thread Poll CPU",
2ea53ca3
JA
136 .type = FIO_OPT_INT,
137 .cb = fio_ioring_sqpoll_cb,
3d7d00a3 138 .help = "What CPU to run SQ thread polling on",
a90cd050 139 .category = FIO_OPT_C_ENGINE,
27f436d9 140 .group = FIO_OPT_G_IOURING,
a90cd050 141 },
b10b1e70
JA
142 {
143 .name = "nonvectored",
144 .lname = "Non-vectored",
145 .type = FIO_OPT_INT,
146 .off1 = offsetof(struct ioring_options, nonvectored),
147 .help = "Use non-vectored read/write commands",
148 .category = FIO_OPT_C_ENGINE,
149 .group = FIO_OPT_G_IOURING,
150 },
4a87b584
JA
151 {
152 .name = "uncached",
153 .lname = "Uncached",
154 .type = FIO_OPT_INT,
155 .off1 = offsetof(struct ioring_options, uncached),
156 .help = "Use RWF_UNCACHED for buffered read/writes",
157 .category = FIO_OPT_C_ENGINE,
158 .group = FIO_OPT_G_IOURING,
159 },
52885fa2
JA
160 {
161 .name = NULL,
162 },
163};
164
bffad86f 165static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit,
52885fa2
JA
166 unsigned int min_complete, unsigned int flags)
167{
9a2d78b3 168 return syscall(__NR_sys_io_uring_enter, ld->ring_fd, to_submit,
521164fa 169 min_complete, flags, NULL, 0);
52885fa2
JA
170}
171
bffad86f 172static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
52885fa2 173{
bffad86f 174 struct ioring_data *ld = td->io_ops_data;
cfcc8564 175 struct ioring_options *o = td->eo;
52885fa2 176 struct fio_file *f = io_u->file;
f0403f94 177 struct io_uring_sqe *sqe;
52885fa2 178
f0403f94 179 sqe = &ld->sqes[io_u->index];
34d6090e
AF
180
181 /* zero out fields not used in this submission */
182 memset(sqe, 0, sizeof(*sqe));
183
5ffd5626
JA
184 if (o->registerfiles) {
185 sqe->fd = f->engine_pos;
186 sqe->flags = IOSQE_FIXED_FILE;
187 } else {
188 sqe->fd = f->fd;
5ffd5626 189 }
52885fa2 190
e3970057 191 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
b10b1e70 192 sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
f0403f94 193 if (o->fixedbufs) {
919850d2 194 sqe->addr = (unsigned long) io_u->xfer_buf;
f0403f94 195 sqe->len = io_u->xfer_buflen;
2ea53ca3 196 sqe->buf_index = io_u->index;
cfcc8564 197 } else {
b10b1e70
JA
198 if (o->nonvectored) {
199 sqe->addr = (unsigned long)
200 ld->iovecs[io_u->index].iov_base;
201 sqe->len = ld->iovecs[io_u->index].iov_len;
202 } else {
203 sqe->addr = (unsigned long) &ld->iovecs[io_u->index];
204 sqe->len = 1;
205 }
cfcc8564 206 }
4a87b584
JA
207 if (!td->o.odirect && o->uncached)
208 sqe->rw_flags = RWF_UNCACHED;
f0403f94 209 sqe->off = io_u->offset;
48e698fa 210 } else if (ddir_sync(io_u->ddir)) {
01387bfe
AF
211 if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
212 sqe->off = f->first_write;
213 sqe->len = f->last_write - f->first_write;
214 sqe->sync_range_flags = td->o.sync_file_range;
215 sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
216 } else {
01387bfe
AF
217 if (io_u->ddir == DDIR_DATASYNC)
218 sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
219 sqe->opcode = IORING_OP_FSYNC;
220 }
48e698fa 221 }
52885fa2 222
48e698fa 223 sqe->user_data = (unsigned long) io_u;
52885fa2
JA
224 return 0;
225}
226
bffad86f 227static struct io_u *fio_ioring_event(struct thread_data *td, int event)
52885fa2 228{
bffad86f 229 struct ioring_data *ld = td->io_ops_data;
f0403f94 230 struct io_uring_cqe *cqe;
52885fa2 231 struct io_u *io_u;
b87aa01a 232 unsigned index;
52885fa2 233
b87aa01a 234 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
52885fa2 235
f0403f94 236 cqe = &ld->cq_ring.cqes[index];
e3466352 237 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
52885fa2 238
f0403f94
JA
239 if (cqe->res != io_u->xfer_buflen) {
240 if (cqe->res > io_u->xfer_buflen)
241 io_u->error = -cqe->res;
52885fa2 242 else
f0403f94 243 io_u->resid = io_u->xfer_buflen - cqe->res;
52885fa2
JA
244 } else
245 io_u->error = 0;
246
247 return io_u;
248}
249
bffad86f 250static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events,
52885fa2
JA
251 unsigned int max)
252{
bffad86f
JA
253 struct ioring_data *ld = td->io_ops_data;
254 struct io_cq_ring *ring = &ld->cq_ring;
e2239016 255 unsigned head, reaped = 0;
52885fa2 256
9a2d78b3 257 head = *ring->head;
52885fa2
JA
258 do {
259 read_barrier();
9a2d78b3 260 if (head == *ring->tail)
52885fa2
JA
261 break;
262 reaped++;
263 head++;
52885fa2
JA
264 } while (reaped + events < max);
265
9a2d78b3 266 *ring->head = head;
52885fa2
JA
267 write_barrier();
268 return reaped;
269}
270
bffad86f
JA
271static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
272 unsigned int max, const struct timespec *t)
52885fa2 273{
bffad86f 274 struct ioring_data *ld = td->io_ops_data;
52885fa2 275 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
bffad86f
JA
276 struct ioring_options *o = td->eo;
277 struct io_cq_ring *ring = &ld->cq_ring;
b87aa01a
JA
278 unsigned events = 0;
279 int r;
52885fa2 280
9a2d78b3 281 ld->cq_ring_off = *ring->head;
52885fa2 282 do {
bffad86f 283 r = fio_ioring_cqring_reap(td, events, max);
52885fa2
JA
284 if (r) {
285 events += r;
f7cbbbf8
ST
286 if (actual_min != 0)
287 actual_min -= r;
52885fa2
JA
288 continue;
289 }
290
3d7d00a3 291 if (!o->sqpoll_thread) {
9a2d78b3
JA
292 r = io_uring_enter(ld, 0, actual_min,
293 IORING_ENTER_GETEVENTS);
771c9901 294 if (r < 0) {
f6abd731 295 if (errno == EAGAIN || errno == EINTR)
771c9901 296 continue;
9a2d78b3 297 td_verror(td, errno, "io_uring_enter");
771c9901
JA
298 break;
299 }
52885fa2
JA
300 }
301 } while (events < min);
302
303 return r < 0 ? r : events;
304}
305
bffad86f
JA
306static enum fio_q_status fio_ioring_queue(struct thread_data *td,
307 struct io_u *io_u)
52885fa2 308{
bffad86f
JA
309 struct ioring_data *ld = td->io_ops_data;
310 struct io_sq_ring *ring = &ld->sq_ring;
52885fa2
JA
311 unsigned tail, next_tail;
312
313 fio_ro_check(td, io_u);
314
b87aa01a 315 if (ld->queued == ld->iodepth)
52885fa2
JA
316 return FIO_Q_BUSY;
317
52885fa2
JA
318 if (io_u->ddir == DDIR_TRIM) {
319 if (ld->queued)
320 return FIO_Q_BUSY;
321
322 do_io_u_trim(td, io_u);
323 io_u_mark_submit(td, 1);
324 io_u_mark_complete(td, 1);
325 return FIO_Q_COMPLETED;
326 }
327
9a2d78b3 328 tail = *ring->tail;
52885fa2 329 next_tail = tail + 1;
52885fa2 330 read_barrier();
9a2d78b3 331 if (next_tail == *ring->head)
52885fa2
JA
332 return FIO_Q_BUSY;
333
2b9415dd
JA
334 /* ensure sqe stores are ordered with tail update */
335 write_barrier();
b87aa01a 336 ring->array[tail & ld->sq_ring_mask] = io_u->index;
9a2d78b3 337 *ring->tail = next_tail;
52885fa2
JA
338 write_barrier();
339
340 ld->queued++;
341 return FIO_Q_QUEUED;
342}
343
bffad86f 344static void fio_ioring_queued(struct thread_data *td, int start, int nr)
52885fa2 345{
bffad86f 346 struct ioring_data *ld = td->io_ops_data;
52885fa2
JA
347 struct timespec now;
348
349 if (!fio_fill_issue_time(td))
350 return;
351
352 fio_gettime(&now, NULL);
353
354 while (nr--) {
bffad86f 355 struct io_sq_ring *ring = &ld->sq_ring;
9a2d78b3 356 int index = ring->array[start & ld->sq_ring_mask];
f8289afc 357 struct io_u *io_u = ld->io_u_index[index];
52885fa2
JA
358
359 memcpy(&io_u->issue_time, &now, sizeof(now));
360 io_u_queued(td, io_u);
361
362 start++;
52885fa2
JA
363 }
364}
365
bffad86f 366static int fio_ioring_commit(struct thread_data *td)
52885fa2 367{
bffad86f
JA
368 struct ioring_data *ld = td->io_ops_data;
369 struct ioring_options *o = td->eo;
52885fa2
JA
370 int ret;
371
372 if (!ld->queued)
373 return 0;
374
3d7d00a3
JA
375 /*
376 * Kernel side does submission. just need to check if the ring is
377 * flagged as needing a kick, if so, call io_uring_enter(). This
378 * only happens if we've been idle too long.
379 */
380 if (o->sqpoll_thread) {
bffad86f 381 struct io_sq_ring *ring = &ld->sq_ring;
4cdbc048 382
2ea53ca3 383 read_barrier();
9a2d78b3 384 if (*ring->flags & IORING_SQ_NEED_WAKEUP)
b532dd6d
JA
385 io_uring_enter(ld, ld->queued, 0,
386 IORING_ENTER_SQ_WAKEUP);
771c9901
JA
387 ld->queued = 0;
388 return 0;
389 }
390
52885fa2 391 do {
9a2d78b3 392 unsigned start = *ld->sq_ring.head;
52885fa2
JA
393 long nr = ld->queued;
394
9a2d78b3 395 ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS);
52885fa2 396 if (ret > 0) {
bffad86f 397 fio_ioring_queued(td, start, ret);
52885fa2
JA
398 io_u_mark_submit(td, ret);
399
400 ld->queued -= ret;
401 ret = 0;
a90cd050
JA
402 } else if (!ret) {
403 io_u_mark_submit(td, ret);
52885fa2 404 continue;
a90cd050 405 } else {
f6abd731 406 if (errno == EAGAIN || errno == EINTR) {
bffad86f 407 ret = fio_ioring_cqring_reap(td, 0, ld->queued);
a90cd050
JA
408 if (ret)
409 continue;
410 /* Shouldn't happen */
411 usleep(1);
412 continue;
52885fa2 413 }
9a2d78b3 414 td_verror(td, errno, "io_uring_enter submit");
52885fa2 415 break;
a90cd050 416 }
52885fa2
JA
417 } while (ld->queued);
418
419 return ret;
420}
421
bffad86f 422static void fio_ioring_unmap(struct ioring_data *ld)
52885fa2 423{
9a2d78b3 424 int i;
52885fa2 425
9a2d78b3
JA
426 for (i = 0; i < ARRAY_SIZE(ld->mmap); i++)
427 munmap(ld->mmap[i].ptr, ld->mmap[i].len);
428 close(ld->ring_fd);
b87aa01a
JA
429}
430
bffad86f 431static void fio_ioring_cleanup(struct thread_data *td)
52885fa2 432{
bffad86f 433 struct ioring_data *ld = td->io_ops_data;
52885fa2
JA
434
435 if (ld) {
52885fa2 436 if (!(td->flags & TD_F_CHILD))
bffad86f 437 fio_ioring_unmap(ld);
9a2d78b3 438
52885fa2 439 free(ld->io_u_index);
9a2d78b3 440 free(ld->iovecs);
5ffd5626 441 free(ld->fds);
52885fa2
JA
442 free(ld);
443 }
444}
445
bffad86f 446static int fio_ioring_mmap(struct ioring_data *ld, struct io_uring_params *p)
9a2d78b3 447{
bffad86f
JA
448 struct io_sq_ring *sring = &ld->sq_ring;
449 struct io_cq_ring *cring = &ld->cq_ring;
9a2d78b3
JA
450 void *ptr;
451
e2239016 452 ld->mmap[0].len = p->sq_off.array + p->sq_entries * sizeof(__u32);
9a2d78b3
JA
453 ptr = mmap(0, ld->mmap[0].len, PROT_READ | PROT_WRITE,
454 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
455 IORING_OFF_SQ_RING);
456 ld->mmap[0].ptr = ptr;
457 sring->head = ptr + p->sq_off.head;
458 sring->tail = ptr + p->sq_off.tail;
459 sring->ring_mask = ptr + p->sq_off.ring_mask;
460 sring->ring_entries = ptr + p->sq_off.ring_entries;
461 sring->flags = ptr + p->sq_off.flags;
ac122fea 462 sring->array = ptr + p->sq_off.array;
9a2d78b3
JA
463 ld->sq_ring_mask = *sring->ring_mask;
464
f0403f94
JA
465 ld->mmap[1].len = p->sq_entries * sizeof(struct io_uring_sqe);
466 ld->sqes = mmap(0, ld->mmap[1].len, PROT_READ | PROT_WRITE,
9a2d78b3 467 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
f0403f94
JA
468 IORING_OFF_SQES);
469 ld->mmap[1].ptr = ld->sqes;
9a2d78b3 470
f0403f94
JA
471 ld->mmap[2].len = p->cq_off.cqes +
472 p->cq_entries * sizeof(struct io_uring_cqe);
9a2d78b3
JA
473 ptr = mmap(0, ld->mmap[2].len, PROT_READ | PROT_WRITE,
474 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
475 IORING_OFF_CQ_RING);
476 ld->mmap[2].ptr = ptr;
477 cring->head = ptr + p->cq_off.head;
478 cring->tail = ptr + p->cq_off.tail;
479 cring->ring_mask = ptr + p->cq_off.ring_mask;
480 cring->ring_entries = ptr + p->cq_off.ring_entries;
f0403f94 481 cring->cqes = ptr + p->cq_off.cqes;
9a2d78b3
JA
482 ld->cq_ring_mask = *cring->ring_mask;
483 return 0;
484}
485
bffad86f 486static int fio_ioring_queue_init(struct thread_data *td)
52885fa2 487{
bffad86f
JA
488 struct ioring_data *ld = td->io_ops_data;
489 struct ioring_options *o = td->eo;
52885fa2 490 int depth = td->o.iodepth;
bffad86f 491 struct io_uring_params p;
9a2d78b3
JA
492 int ret;
493
494 memset(&p, 0, sizeof(p));
52885fa2
JA
495
496 if (o->hipri)
bffad86f 497 p.flags |= IORING_SETUP_IOPOLL;
3d7d00a3
JA
498 if (o->sqpoll_thread) {
499 p.flags |= IORING_SETUP_SQPOLL;
500 if (o->sqpoll_set) {
501 p.flags |= IORING_SETUP_SQ_AFF;
502 p.sq_thread_cpu = o->sqpoll_cpu;
503 }
f635f1fb 504 }
a90cd050 505
2ea53ca3 506 ret = syscall(__NR_sys_io_uring_setup, depth, &p);
9a2d78b3
JA
507 if (ret < 0)
508 return ret;
509
510 ld->ring_fd = ret;
2ea53ca3
JA
511
512 if (o->fixedbufs) {
2d644205
JA
513 struct rlimit rlim = {
514 .rlim_cur = RLIM_INFINITY,
515 .rlim_max = RLIM_INFINITY,
516 };
517
518 if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0)
519 return -1;
520
2ea53ca3 521 ret = syscall(__NR_sys_io_uring_register, ld->ring_fd,
919850d2 522 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
2ea53ca3
JA
523 if (ret < 0)
524 return ret;
525 }
526
bffad86f 527 return fio_ioring_mmap(ld, &p);
52885fa2
JA
528}
529
5ffd5626
JA
530static int fio_ioring_register_files(struct thread_data *td)
531{
532 struct ioring_data *ld = td->io_ops_data;
533 struct fio_file *f;
534 unsigned int i;
535 int ret;
536
537 ld->fds = calloc(td->o.nr_files, sizeof(int));
538
539 for_each_file(td, f, i) {
540 ret = generic_open_file(td, f);
541 if (ret)
542 goto err;
543 ld->fds[i] = f->fd;
544 f->engine_pos = i;
545 }
546
547 ret = syscall(__NR_sys_io_uring_register, ld->ring_fd,
548 IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
549 if (ret) {
550err:
551 free(ld->fds);
552 ld->fds = NULL;
553 }
554
555 /*
556 * Pretend the file is closed again, and really close it if we hit
557 * an error.
558 */
559 for_each_file(td, f, i) {
560 if (ret) {
561 int fio_unused ret2;
562 ret2 = generic_close_file(td, f);
563 } else
564 f->fd = -1;
565 }
566
567 return ret;
568}
569
bffad86f 570static int fio_ioring_post_init(struct thread_data *td)
52885fa2 571{
bffad86f 572 struct ioring_data *ld = td->io_ops_data;
5ffd5626 573 struct ioring_options *o = td->eo;
52885fa2 574 struct io_u *io_u;
650346e1 575 int err, i;
52885fa2 576
650346e1
JA
577 for (i = 0; i < td->o.iodepth; i++) {
578 struct iovec *iov = &ld->iovecs[i];
9a2d78b3 579
650346e1
JA
580 io_u = ld->io_u_index[i];
581 iov->iov_base = io_u->buf;
582 iov->iov_len = td_max_bs(td);
52885fa2
JA
583 }
584
bffad86f 585 err = fio_ioring_queue_init(td);
52885fa2 586 if (err) {
d63a472d 587 td_verror(td, errno, "io_queue_init");
52885fa2
JA
588 return 1;
589 }
590
5ffd5626
JA
591 if (o->registerfiles) {
592 err = fio_ioring_register_files(td);
593 if (err) {
594 td_verror(td, errno, "ioring_register_files");
595 return 1;
596 }
597 }
598
52885fa2
JA
599 return 0;
600}
601
9a2d78b3
JA
602static unsigned roundup_pow2(unsigned depth)
603{
604 return 1UL << __fls(depth - 1);
605}
606
bffad86f 607static int fio_ioring_init(struct thread_data *td)
52885fa2 608{
5ffd5626 609 struct ioring_options *o = td->eo;
bffad86f 610 struct ioring_data *ld;
52885fa2 611
5ffd5626
JA
612 /* sqthread submission requires registered files */
613 if (o->sqpoll_thread)
614 o->registerfiles = 1;
615
616 if (o->registerfiles && td->o.nr_files != td->o.open_files) {
617 log_err("fio: io_uring registered files require nr_files to "
618 "be identical to open_files\n");
619 return 1;
620 }
621
52885fa2
JA
622 ld = calloc(1, sizeof(*ld));
623
b87aa01a
JA
624 /* ring depth must be a power-of-2 */
625 ld->iodepth = td->o.iodepth;
626 td->o.iodepth = roundup_pow2(td->o.iodepth);
627
52885fa2
JA
628 /* io_u index */
629 ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
650346e1 630 ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
52885fa2
JA
631
632 td->io_ops_data = ld;
633 return 0;
634}
635
bffad86f 636static int fio_ioring_io_u_init(struct thread_data *td, struct io_u *io_u)
52885fa2 637{
bffad86f 638 struct ioring_data *ld = td->io_ops_data;
52885fa2
JA
639
640 ld->io_u_index[io_u->index] = io_u;
641 return 0;
642}
643
5ffd5626
JA
644static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
645{
646 struct ioring_data *ld = td->io_ops_data;
647 struct ioring_options *o = td->eo;
648
17318cf6 649 if (!ld || !o->registerfiles)
5ffd5626
JA
650 return generic_open_file(td, f);
651
652 f->fd = ld->fds[f->engine_pos];
653 return 0;
654}
655
656static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f)
657{
17318cf6 658 struct ioring_data *ld = td->io_ops_data;
5ffd5626
JA
659 struct ioring_options *o = td->eo;
660
17318cf6 661 if (!ld || !o->registerfiles)
5ffd5626
JA
662 return generic_close_file(td, f);
663
664 f->fd = -1;
665 return 0;
666}
667
52885fa2 668static struct ioengine_ops ioengine = {
bffad86f 669 .name = "io_uring",
52885fa2 670 .version = FIO_IOOPS_VERSION,
04ba61df 671 .flags = FIO_ASYNCIO_SYNC_TRIM,
bffad86f
JA
672 .init = fio_ioring_init,
673 .post_init = fio_ioring_post_init,
674 .io_u_init = fio_ioring_io_u_init,
675 .prep = fio_ioring_prep,
676 .queue = fio_ioring_queue,
677 .commit = fio_ioring_commit,
678 .getevents = fio_ioring_getevents,
679 .event = fio_ioring_event,
680 .cleanup = fio_ioring_cleanup,
5ffd5626
JA
681 .open_file = fio_ioring_open_file,
682 .close_file = fio_ioring_close_file,
52885fa2
JA
683 .get_file_size = generic_get_file_size,
684 .options = options,
bffad86f 685 .option_struct_size = sizeof(struct ioring_options),
52885fa2
JA
686};
687
bffad86f 688static void fio_init fio_ioring_register(void)
52885fa2 689{
52885fa2 690 register_ioengine(&ioengine);
52885fa2
JA
691}
692
bffad86f 693static void fio_exit fio_ioring_unregister(void)
52885fa2 694{
52885fa2 695 unregister_ioengine(&ioengine);
52885fa2 696}
1f90e9bb 697#endif