t/io_uring: allow multiple IO threads
[fio.git] / t / io_uring.c
... / ...
CommitLineData
1#include <stdio.h>
2#include <errno.h>
3#include <assert.h>
4#include <stdlib.h>
5#include <stddef.h>
6#include <signal.h>
7#include <inttypes.h>
8
9#include <sys/types.h>
10#include <sys/stat.h>
11#include <sys/ioctl.h>
12#include <sys/syscall.h>
13#include <sys/resource.h>
14#include <sys/mman.h>
15#include <sys/uio.h>
16#include <linux/fs.h>
17#include <fcntl.h>
18#include <unistd.h>
19#include <string.h>
20#include <pthread.h>
21#include <sched.h>
22
23#include "../arch/arch.h"
24#include "../lib/types.h"
25#include "../os/linux/io_uring.h"
26
27#define min(a, b) ((a < b) ? (a) : (b))
28
29struct io_sq_ring {
30 unsigned *head;
31 unsigned *tail;
32 unsigned *ring_mask;
33 unsigned *ring_entries;
34 unsigned *flags;
35 unsigned *array;
36};
37
38struct io_cq_ring {
39 unsigned *head;
40 unsigned *tail;
41 unsigned *ring_mask;
42 unsigned *ring_entries;
43 struct io_uring_cqe *cqes;
44};
45
46#define DEPTH 128
47#define BATCH_SUBMIT 32
48#define BATCH_COMPLETE 32
49#define BS 4096
50
51#define MAX_FDS 16
52
53static unsigned sq_ring_mask, cq_ring_mask;
54
55struct file {
56 unsigned long max_blocks;
57 unsigned pending_ios;
58 int real_fd;
59 int fixed_fd;
60};
61
62struct submitter {
63 pthread_t thread;
64 int ring_fd;
65 int index;
66 struct io_sq_ring sq_ring;
67 struct io_uring_sqe *sqes;
68 struct io_cq_ring cq_ring;
69 int inflight;
70 unsigned long reaps;
71 unsigned long done;
72 unsigned long calls;
73 volatile int finish;
74
75 __s32 *fds;
76
77 struct file files[MAX_FDS];
78 unsigned nr_files;
79 unsigned cur_file;
80 struct iovec iovecs[];
81};
82
83static struct submitter *submitter;
84static volatile int finish;
85
86static int depth = DEPTH;
87static int batch_submit = BATCH_SUBMIT;
88static int batch_complete = BATCH_COMPLETE;
89static int bs = BS;
90static int polled = 1; /* use IO polling */
91static int fixedbufs = 1; /* use fixed user buffers */
92static int register_files = 1; /* use fixed files */
93static int buffered = 0; /* use buffered IO, not O_DIRECT */
94static int sq_thread_poll = 0; /* use kernel submission/poller thread */
95static int sq_thread_cpu = -1; /* pin above thread to this CPU */
96static int do_nop = 0; /* no-op SQ ring commands */
97static int nthreads = 1;
98
99static int vectored = 1;
100
101static int io_uring_register_buffers(struct submitter *s)
102{
103 if (do_nop)
104 return 0;
105
106 return syscall(__NR_io_uring_register, s->ring_fd,
107 IORING_REGISTER_BUFFERS, s->iovecs, depth);
108}
109
110static int io_uring_register_files(struct submitter *s)
111{
112 int i;
113
114 if (do_nop)
115 return 0;
116
117 s->fds = calloc(s->nr_files, sizeof(__s32));
118 for (i = 0; i < s->nr_files; i++) {
119 s->fds[i] = s->files[i].real_fd;
120 s->files[i].fixed_fd = i;
121 }
122
123 return syscall(__NR_io_uring_register, s->ring_fd,
124 IORING_REGISTER_FILES, s->fds, s->nr_files);
125}
126
127static int io_uring_setup(unsigned entries, struct io_uring_params *p)
128{
129 return syscall(__NR_io_uring_setup, entries, p);
130}
131
132static void io_uring_probe(int fd)
133{
134 struct io_uring_probe *p;
135 int ret;
136
137 p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
138 if (!p)
139 return;
140
141 memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
142 ret = syscall(__NR_io_uring_register, fd, IORING_REGISTER_PROBE, p, 256);
143 if (ret < 0)
144 goto out;
145
146 if (IORING_OP_READ > p->ops_len)
147 goto out;
148
149 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED))
150 vectored = 0;
151out:
152 free(p);
153}
154
155static int io_uring_enter(struct submitter *s, unsigned int to_submit,
156 unsigned int min_complete, unsigned int flags)
157{
158 return syscall(__NR_io_uring_enter, s->ring_fd, to_submit, min_complete,
159 flags, NULL, 0);
160}
161
162#ifndef CONFIG_HAVE_GETTID
163static int gettid(void)
164{
165 return syscall(__NR_gettid);
166}
167#endif
168
169static unsigned file_depth(struct submitter *s)
170{
171 return (depth + s->nr_files - 1) / s->nr_files;
172}
173
174static void init_io(struct submitter *s, unsigned index)
175{
176 struct io_uring_sqe *sqe = &s->sqes[index];
177 unsigned long offset;
178 struct file *f;
179 long r;
180
181 if (do_nop) {
182 sqe->opcode = IORING_OP_NOP;
183 return;
184 }
185
186 if (s->nr_files == 1) {
187 f = &s->files[0];
188 } else {
189 f = &s->files[s->cur_file];
190 if (f->pending_ios >= file_depth(s)) {
191 s->cur_file++;
192 if (s->cur_file == s->nr_files)
193 s->cur_file = 0;
194 f = &s->files[s->cur_file];
195 }
196 }
197 f->pending_ios++;
198
199 r = lrand48();
200 offset = (r % (f->max_blocks - 1)) * bs;
201
202 if (register_files) {
203 sqe->flags = IOSQE_FIXED_FILE;
204 sqe->fd = f->fixed_fd;
205 } else {
206 sqe->flags = 0;
207 sqe->fd = f->real_fd;
208 }
209 if (fixedbufs) {
210 sqe->opcode = IORING_OP_READ_FIXED;
211 sqe->addr = (unsigned long) s->iovecs[index].iov_base;
212 sqe->len = bs;
213 sqe->buf_index = index;
214 } else if (!vectored) {
215 sqe->opcode = IORING_OP_READ;
216 sqe->addr = (unsigned long) s->iovecs[index].iov_base;
217 sqe->len = bs;
218 sqe->buf_index = 0;
219 } else {
220 sqe->opcode = IORING_OP_READV;
221 sqe->addr = (unsigned long) &s->iovecs[index];
222 sqe->len = 1;
223 sqe->buf_index = 0;
224 }
225 sqe->ioprio = 0;
226 sqe->off = offset;
227 sqe->user_data = (unsigned long) f;
228}
229
230static int prep_more_ios(struct submitter *s, int max_ios)
231{
232 struct io_sq_ring *ring = &s->sq_ring;
233 unsigned index, tail, next_tail, prepped = 0;
234
235 next_tail = tail = *ring->tail;
236 do {
237 next_tail++;
238 if (next_tail == atomic_load_acquire(ring->head))
239 break;
240
241 index = tail & sq_ring_mask;
242 init_io(s, index);
243 ring->array[index] = index;
244 prepped++;
245 tail = next_tail;
246 } while (prepped < max_ios);
247
248 if (prepped)
249 atomic_store_release(ring->tail, tail);
250 return prepped;
251}
252
253static int get_file_size(struct file *f)
254{
255 struct stat st;
256
257 if (fstat(f->real_fd, &st) < 0)
258 return -1;
259 if (S_ISBLK(st.st_mode)) {
260 unsigned long long bytes;
261
262 if (ioctl(f->real_fd, BLKGETSIZE64, &bytes) != 0)
263 return -1;
264
265 f->max_blocks = bytes / bs;
266 return 0;
267 } else if (S_ISREG(st.st_mode)) {
268 f->max_blocks = st.st_size / bs;
269 return 0;
270 }
271
272 return -1;
273}
274
275static int reap_events(struct submitter *s)
276{
277 struct io_cq_ring *ring = &s->cq_ring;
278 struct io_uring_cqe *cqe;
279 unsigned head, reaped = 0;
280
281 head = *ring->head;
282 do {
283 struct file *f;
284
285 read_barrier();
286 if (head == atomic_load_acquire(ring->tail))
287 break;
288 cqe = &ring->cqes[head & cq_ring_mask];
289 if (!do_nop) {
290 f = (struct file *) (uintptr_t) cqe->user_data;
291 f->pending_ios--;
292 if (cqe->res != bs) {
293 printf("io: unexpected ret=%d\n", cqe->res);
294 if (polled && cqe->res == -EOPNOTSUPP)
295 printf("Your filesystem/driver/kernel doesn't support polled IO\n");
296 return -1;
297 }
298 }
299 reaped++;
300 head++;
301 } while (1);
302
303 if (reaped) {
304 s->inflight -= reaped;
305 atomic_store_release(ring->head, head);
306 }
307 return reaped;
308}
309
310static void *submitter_fn(void *data)
311{
312 struct submitter *s = data;
313 struct io_sq_ring *ring = &s->sq_ring;
314 int ret, prepped;
315
316 printf("submitter=%d\n", gettid());
317
318 srand48(pthread_self());
319
320 prepped = 0;
321 do {
322 int to_wait, to_submit, this_reap, to_prep;
323 unsigned ring_flags = 0;
324
325 if (!prepped && s->inflight < depth) {
326 to_prep = min(depth - s->inflight, batch_submit);
327 prepped = prep_more_ios(s, to_prep);
328 }
329 s->inflight += prepped;
330submit_more:
331 to_submit = prepped;
332submit:
333 if (to_submit && (s->inflight + to_submit <= depth))
334 to_wait = 0;
335 else
336 to_wait = min(s->inflight + to_submit, batch_complete);
337
338 /*
339 * Only need to call io_uring_enter if we're not using SQ thread
340 * poll, or if IORING_SQ_NEED_WAKEUP is set.
341 */
342 if (sq_thread_poll)
343 ring_flags = atomic_load_acquire(ring->flags);
344 if (!sq_thread_poll || ring_flags & IORING_SQ_NEED_WAKEUP) {
345 unsigned flags = 0;
346
347 if (to_wait)
348 flags = IORING_ENTER_GETEVENTS;
349 if (ring_flags & IORING_SQ_NEED_WAKEUP)
350 flags |= IORING_ENTER_SQ_WAKEUP;
351 ret = io_uring_enter(s, to_submit, to_wait, flags);
352 s->calls++;
353 } else {
354 /* for SQPOLL, we submitted it all effectively */
355 ret = to_submit;
356 }
357
358 /*
359 * For non SQ thread poll, we already got the events we needed
360 * through the io_uring_enter() above. For SQ thread poll, we
361 * need to loop here until we find enough events.
362 */
363 this_reap = 0;
364 do {
365 int r;
366 r = reap_events(s);
367 if (r == -1) {
368 s->finish = 1;
369 break;
370 } else if (r > 0)
371 this_reap += r;
372 } while (sq_thread_poll && this_reap < to_wait);
373 s->reaps += this_reap;
374
375 if (ret >= 0) {
376 if (!ret) {
377 to_submit = 0;
378 if (s->inflight)
379 goto submit;
380 continue;
381 } else if (ret < to_submit) {
382 int diff = to_submit - ret;
383
384 s->done += ret;
385 prepped -= diff;
386 goto submit_more;
387 }
388 s->done += ret;
389 prepped = 0;
390 continue;
391 } else if (ret < 0) {
392 if (errno == EAGAIN) {
393 if (s->finish)
394 break;
395 if (this_reap)
396 goto submit;
397 to_submit = 0;
398 goto submit;
399 }
400 printf("io_submit: %s\n", strerror(errno));
401 break;
402 }
403 } while (!s->finish);
404
405 finish = 1;
406 return NULL;
407}
408
409static struct submitter *get_submitter(int offset)
410{
411 void *ret;
412
413 ret = submitter;
414 if (offset)
415 ret += offset * (sizeof(*submitter) + depth * sizeof(struct iovec));
416 return ret;
417}
418
419static void sig_int(int sig)
420{
421 int j;
422
423 printf("Exiting on signal %d\n", sig);
424 for (j = 0; j < nthreads; j++) {
425 struct submitter *s = get_submitter(j);
426 s->finish = 1;
427 }
428 finish = 1;
429}
430
431static void arm_sig_int(void)
432{
433 struct sigaction act;
434
435 memset(&act, 0, sizeof(act));
436 act.sa_handler = sig_int;
437 act.sa_flags = SA_RESTART;
438 sigaction(SIGINT, &act, NULL);
439}
440
441static int setup_ring(struct submitter *s)
442{
443 struct io_sq_ring *sring = &s->sq_ring;
444 struct io_cq_ring *cring = &s->cq_ring;
445 struct io_uring_params p;
446 int ret, fd;
447 void *ptr;
448
449 memset(&p, 0, sizeof(p));
450
451 if (polled && !do_nop)
452 p.flags |= IORING_SETUP_IOPOLL;
453 if (sq_thread_poll) {
454 p.flags |= IORING_SETUP_SQPOLL;
455 if (sq_thread_cpu != -1) {
456 p.flags |= IORING_SETUP_SQ_AFF;
457 p.sq_thread_cpu = sq_thread_cpu;
458 }
459 }
460
461 fd = io_uring_setup(depth, &p);
462 if (fd < 0) {
463 perror("io_uring_setup");
464 return 1;
465 }
466 s->ring_fd = fd;
467
468 io_uring_probe(fd);
469
470 if (fixedbufs) {
471 ret = io_uring_register_buffers(s);
472 if (ret < 0) {
473 perror("io_uring_register_buffers");
474 return 1;
475 }
476 }
477
478 if (register_files) {
479 ret = io_uring_register_files(s);
480 if (ret < 0) {
481 perror("io_uring_register_files");
482 return 1;
483 }
484 }
485
486 ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(__u32),
487 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
488 IORING_OFF_SQ_RING);
489 printf("sq_ring ptr = 0x%p\n", ptr);
490 sring->head = ptr + p.sq_off.head;
491 sring->tail = ptr + p.sq_off.tail;
492 sring->ring_mask = ptr + p.sq_off.ring_mask;
493 sring->ring_entries = ptr + p.sq_off.ring_entries;
494 sring->flags = ptr + p.sq_off.flags;
495 sring->array = ptr + p.sq_off.array;
496 sq_ring_mask = *sring->ring_mask;
497
498 s->sqes = mmap(0, p.sq_entries * sizeof(struct io_uring_sqe),
499 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
500 IORING_OFF_SQES);
501 printf("sqes ptr = 0x%p\n", s->sqes);
502
503 ptr = mmap(0, p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe),
504 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
505 IORING_OFF_CQ_RING);
506 printf("cq_ring ptr = 0x%p\n", ptr);
507 cring->head = ptr + p.cq_off.head;
508 cring->tail = ptr + p.cq_off.tail;
509 cring->ring_mask = ptr + p.cq_off.ring_mask;
510 cring->ring_entries = ptr + p.cq_off.ring_entries;
511 cring->cqes = ptr + p.cq_off.cqes;
512 cq_ring_mask = *cring->ring_mask;
513 return 0;
514}
515
516static void file_depths(char *buf)
517{
518 char *p;
519 int i, j;
520
521 buf[0] = '\0';
522 p = buf;
523 for (j = 0; j < nthreads; j++) {
524 struct submitter *s = get_submitter(j);
525
526 for (i = 0; i < s->nr_files; i++) {
527 struct file *f = &s->files[i];
528
529 if (i + 1 == s->nr_files)
530 p += sprintf(p, "%d", f->pending_ios);
531 else
532 p += sprintf(p, "%d, ", f->pending_ios);
533 }
534 }
535}
536
537static void usage(char *argv)
538{
539 printf("%s [options] -- [filenames]\n"
540 " -d <int> : IO Depth, default %d\n"
541 " -s <int> : Batch submit, default %d\n"
542 " -c <int> : Batch complete, default %d\n"
543 " -b <int> : Block size, default %d\n"
544 " -p <bool> : Polled IO, default %d\n",
545 argv, DEPTH, BATCH_SUBMIT, BATCH_COMPLETE, BS, polled);
546 exit(0);
547}
548
549int main(int argc, char *argv[])
550{
551 struct submitter *s;
552 unsigned long done, calls, reap;
553 int err, i, j, flags, fd, opt;
554 char *fdepths;
555 void *ret;
556
557 if (!do_nop && argc < 2) {
558 printf("%s: filename [options]\n", argv[0]);
559 return 1;
560 }
561
562 while ((opt = getopt(argc, argv, "d:s:c:b:p:B:F:n:h?")) != -1) {
563 switch (opt) {
564 case 'd':
565 depth = atoi(optarg);
566 break;
567 case 's':
568 batch_submit = atoi(optarg);
569 break;
570 case 'c':
571 batch_complete = atoi(optarg);
572 break;
573 case 'b':
574 bs = atoi(optarg);
575 break;
576 case 'p':
577 polled = !!atoi(optarg);
578 break;
579 case 'B':
580 fixedbufs = !!atoi(optarg);
581 break;
582 case 'F':
583 register_files = !!atoi(optarg);
584 break;
585 case 'n':
586 nthreads = atoi(optarg);
587 break;
588 case 'h':
589 case '?':
590 default:
591 usage(argv[0]);
592 break;
593 }
594 }
595
596 submitter = calloc(nthreads, sizeof(*submitter) +
597 depth * sizeof(struct iovec));
598 for (j = 0; j < nthreads; j++) {
599 s = get_submitter(j);
600 s->index = j;
601 s->done = s->calls = s->reaps = 0;
602 }
603
604 flags = O_RDONLY | O_NOATIME;
605 if (!buffered)
606 flags |= O_DIRECT;
607
608 j = 0;
609 i = optind;
610 printf("i %d, argc %d\n", i, argc);
611 while (!do_nop && i < argc) {
612 struct file *f;
613
614 s = get_submitter(j);
615 if (s->nr_files == MAX_FDS) {
616 printf("Max number of files (%d) reached\n", MAX_FDS);
617 break;
618 }
619 fd = open(argv[i], flags);
620 if (fd < 0) {
621 perror("open");
622 return 1;
623 }
624
625 f = &s->files[s->nr_files];
626 f->real_fd = fd;
627 if (get_file_size(f)) {
628 printf("failed getting size of device/file\n");
629 return 1;
630 }
631 if (f->max_blocks <= 1) {
632 printf("Zero file/device size?\n");
633 return 1;
634 }
635 f->max_blocks--;
636
637 printf("Added file %s (submitter %d)\n", argv[i], s->index);
638 s->nr_files++;
639 i++;
640 if (++j >= nthreads)
641 j = 0;
642 }
643
644 if (fixedbufs) {
645 struct rlimit rlim;
646
647 rlim.rlim_cur = RLIM_INFINITY;
648 rlim.rlim_max = RLIM_INFINITY;
649 if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0) {
650 perror("setrlimit");
651 return 1;
652 }
653 }
654
655 arm_sig_int();
656
657 for (j = 0; j < nthreads; j++) {
658 s = get_submitter(j);
659 for (i = 0; i < depth; i++) {
660 void *buf;
661
662 if (posix_memalign(&buf, bs, bs)) {
663 printf("failed alloc\n");
664 return 1;
665 }
666 s->iovecs[i].iov_base = buf;
667 s->iovecs[i].iov_len = bs;
668 }
669 }
670
671 for (j = 0; j < nthreads; j++) {
672 s = get_submitter(j);
673
674 err = setup_ring(s);
675 if (err) {
676 printf("ring setup failed: %s, %d\n", strerror(errno), err);
677 return 1;
678 }
679 }
680 s = get_submitter(0);
681 printf("polled=%d, fixedbufs=%d, register_files=%d, buffered=%d", polled, fixedbufs, register_files, buffered);
682 printf(" QD=%d, sq_ring=%d, cq_ring=%d\n", depth, *s->sq_ring.ring_entries, *s->cq_ring.ring_entries);
683
684 for (j = 0; j < nthreads; j++) {
685 s = get_submitter(j);
686 pthread_create(&s->thread, NULL, submitter_fn, s);
687 }
688
689 fdepths = malloc(8 * s->nr_files * nthreads);
690 reap = calls = done = 0;
691 do {
692 unsigned long this_done = 0;
693 unsigned long this_reap = 0;
694 unsigned long this_call = 0;
695 unsigned long rpc = 0, ipc = 0;
696
697 sleep(1);
698 for (j = 0; j < nthreads; j++) {
699 this_done += s->done;
700 this_call += s->calls;
701 this_reap += s->reaps;
702 }
703 if (this_call - calls) {
704 rpc = (this_done - done) / (this_call - calls);
705 ipc = (this_reap - reap) / (this_call - calls);
706 } else
707 rpc = ipc = -1;
708 file_depths(fdepths);
709 printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (%s)\n",
710 this_done - done, rpc, ipc, s->inflight,
711 fdepths);
712 done = this_done;
713 calls = this_call;
714 reap = this_reap;
715 } while (!finish);
716
717 for (j = 0; j < nthreads; j++) {
718 s = get_submitter(j);
719 pthread_join(s->thread, &ret);
720 close(s->ring_fd);
721 }
722 free(fdepths);
723 free(submitter);
724 return 0;
725}