t/io_uring: Reporting bandwidth
[fio.git] / t / io_uring.c
... / ...
CommitLineData
1#include <stdio.h>
2#include <errno.h>
3#include <assert.h>
4#include <stdlib.h>
5#include <stddef.h>
6#include <signal.h>
7#include <inttypes.h>
8
9#include <sys/types.h>
10#include <sys/stat.h>
11#include <sys/ioctl.h>
12#include <sys/syscall.h>
13#include <sys/resource.h>
14#include <sys/mman.h>
15#include <sys/uio.h>
16#include <linux/fs.h>
17#include <fcntl.h>
18#include <unistd.h>
19#include <string.h>
20#include <pthread.h>
21#include <sched.h>
22
23#include "../arch/arch.h"
24#include "../lib/types.h"
25#include "../os/linux/io_uring.h"
26
27#define min(a, b) ((a < b) ? (a) : (b))
28
29struct io_sq_ring {
30 unsigned *head;
31 unsigned *tail;
32 unsigned *ring_mask;
33 unsigned *ring_entries;
34 unsigned *flags;
35 unsigned *array;
36};
37
38struct io_cq_ring {
39 unsigned *head;
40 unsigned *tail;
41 unsigned *ring_mask;
42 unsigned *ring_entries;
43 struct io_uring_cqe *cqes;
44};
45
46#define DEPTH 128
47#define BATCH_SUBMIT 32
48#define BATCH_COMPLETE 32
49#define BS 4096
50
51#define MAX_FDS 16
52
53static unsigned sq_ring_mask, cq_ring_mask;
54
55struct file {
56 unsigned long max_blocks;
57 unsigned pending_ios;
58 int real_fd;
59 int fixed_fd;
60};
61
62struct submitter {
63 pthread_t thread;
64 int ring_fd;
65 int index;
66 struct io_sq_ring sq_ring;
67 struct io_uring_sqe *sqes;
68 struct io_cq_ring cq_ring;
69 int inflight;
70 unsigned long reaps;
71 unsigned long done;
72 unsigned long calls;
73 volatile int finish;
74
75 __s32 *fds;
76
77 struct file files[MAX_FDS];
78 unsigned nr_files;
79 unsigned cur_file;
80 struct iovec iovecs[];
81};
82
83static struct submitter *submitter;
84static volatile int finish;
85
86static int depth = DEPTH;
87static int batch_submit = BATCH_SUBMIT;
88static int batch_complete = BATCH_COMPLETE;
89static int bs = BS;
90static int polled = 1; /* use IO polling */
91static int fixedbufs = 1; /* use fixed user buffers */
92static int register_files = 1; /* use fixed files */
93static int buffered = 0; /* use buffered IO, not O_DIRECT */
94static int sq_thread_poll = 0; /* use kernel submission/poller thread */
95static int sq_thread_cpu = -1; /* pin above thread to this CPU */
96static int do_nop = 0; /* no-op SQ ring commands */
97static int nthreads = 1;
98
99static int vectored = 1;
100
101static int io_uring_register_buffers(struct submitter *s)
102{
103 if (do_nop)
104 return 0;
105
106 return syscall(__NR_io_uring_register, s->ring_fd,
107 IORING_REGISTER_BUFFERS, s->iovecs, depth);
108}
109
110static int io_uring_register_files(struct submitter *s)
111{
112 int i;
113
114 if (do_nop)
115 return 0;
116
117 s->fds = calloc(s->nr_files, sizeof(__s32));
118 for (i = 0; i < s->nr_files; i++) {
119 s->fds[i] = s->files[i].real_fd;
120 s->files[i].fixed_fd = i;
121 }
122
123 return syscall(__NR_io_uring_register, s->ring_fd,
124 IORING_REGISTER_FILES, s->fds, s->nr_files);
125}
126
127static int io_uring_setup(unsigned entries, struct io_uring_params *p)
128{
129 return syscall(__NR_io_uring_setup, entries, p);
130}
131
132static void io_uring_probe(int fd)
133{
134 struct io_uring_probe *p;
135 int ret;
136
137 p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
138 if (!p)
139 return;
140
141 memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
142 ret = syscall(__NR_io_uring_register, fd, IORING_REGISTER_PROBE, p, 256);
143 if (ret < 0)
144 goto out;
145
146 if (IORING_OP_READ > p->ops_len)
147 goto out;
148
149 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED))
150 vectored = 0;
151out:
152 free(p);
153}
154
155static int io_uring_enter(struct submitter *s, unsigned int to_submit,
156 unsigned int min_complete, unsigned int flags)
157{
158 return syscall(__NR_io_uring_enter, s->ring_fd, to_submit, min_complete,
159 flags, NULL, 0);
160}
161
162#ifndef CONFIG_HAVE_GETTID
163static int gettid(void)
164{
165 return syscall(__NR_gettid);
166}
167#endif
168
169static unsigned file_depth(struct submitter *s)
170{
171 return (depth + s->nr_files - 1) / s->nr_files;
172}
173
174static void init_io(struct submitter *s, unsigned index)
175{
176 struct io_uring_sqe *sqe = &s->sqes[index];
177 unsigned long offset;
178 struct file *f;
179 long r;
180
181 if (do_nop) {
182 sqe->opcode = IORING_OP_NOP;
183 return;
184 }
185
186 if (s->nr_files == 1) {
187 f = &s->files[0];
188 } else {
189 f = &s->files[s->cur_file];
190 if (f->pending_ios >= file_depth(s)) {
191 s->cur_file++;
192 if (s->cur_file == s->nr_files)
193 s->cur_file = 0;
194 f = &s->files[s->cur_file];
195 }
196 }
197 f->pending_ios++;
198
199 r = lrand48();
200 offset = (r % (f->max_blocks - 1)) * bs;
201
202 if (register_files) {
203 sqe->flags = IOSQE_FIXED_FILE;
204 sqe->fd = f->fixed_fd;
205 } else {
206 sqe->flags = 0;
207 sqe->fd = f->real_fd;
208 }
209 if (fixedbufs) {
210 sqe->opcode = IORING_OP_READ_FIXED;
211 sqe->addr = (unsigned long) s->iovecs[index].iov_base;
212 sqe->len = bs;
213 sqe->buf_index = index;
214 } else if (!vectored) {
215 sqe->opcode = IORING_OP_READ;
216 sqe->addr = (unsigned long) s->iovecs[index].iov_base;
217 sqe->len = bs;
218 sqe->buf_index = 0;
219 } else {
220 sqe->opcode = IORING_OP_READV;
221 sqe->addr = (unsigned long) &s->iovecs[index];
222 sqe->len = 1;
223 sqe->buf_index = 0;
224 }
225 sqe->ioprio = 0;
226 sqe->off = offset;
227 sqe->user_data = (unsigned long) f;
228}
229
230static int prep_more_ios(struct submitter *s, int max_ios)
231{
232 struct io_sq_ring *ring = &s->sq_ring;
233 unsigned index, tail, next_tail, prepped = 0;
234
235 next_tail = tail = *ring->tail;
236 do {
237 next_tail++;
238 if (next_tail == atomic_load_acquire(ring->head))
239 break;
240
241 index = tail & sq_ring_mask;
242 init_io(s, index);
243 ring->array[index] = index;
244 prepped++;
245 tail = next_tail;
246 } while (prepped < max_ios);
247
248 if (prepped)
249 atomic_store_release(ring->tail, tail);
250 return prepped;
251}
252
253static int get_file_size(struct file *f)
254{
255 struct stat st;
256
257 if (fstat(f->real_fd, &st) < 0)
258 return -1;
259 if (S_ISBLK(st.st_mode)) {
260 unsigned long long bytes;
261
262 if (ioctl(f->real_fd, BLKGETSIZE64, &bytes) != 0)
263 return -1;
264
265 f->max_blocks = bytes / bs;
266 return 0;
267 } else if (S_ISREG(st.st_mode)) {
268 f->max_blocks = st.st_size / bs;
269 return 0;
270 }
271
272 return -1;
273}
274
275static int reap_events(struct submitter *s)
276{
277 struct io_cq_ring *ring = &s->cq_ring;
278 struct io_uring_cqe *cqe;
279 unsigned head, reaped = 0;
280
281 head = *ring->head;
282 do {
283 struct file *f;
284
285 read_barrier();
286 if (head == atomic_load_acquire(ring->tail))
287 break;
288 cqe = &ring->cqes[head & cq_ring_mask];
289 if (!do_nop) {
290 f = (struct file *) (uintptr_t) cqe->user_data;
291 f->pending_ios--;
292 if (cqe->res != bs) {
293 printf("io: unexpected ret=%d\n", cqe->res);
294 if (polled && cqe->res == -EOPNOTSUPP)
295 printf("Your filesystem/driver/kernel doesn't support polled IO\n");
296 return -1;
297 }
298 }
299 reaped++;
300 head++;
301 } while (1);
302
303 if (reaped) {
304 s->inflight -= reaped;
305 atomic_store_release(ring->head, head);
306 }
307 return reaped;
308}
309
310static void *submitter_fn(void *data)
311{
312 struct submitter *s = data;
313 struct io_sq_ring *ring = &s->sq_ring;
314 int ret, prepped;
315
316 printf("submitter=%d\n", gettid());
317
318 srand48(pthread_self());
319
320 prepped = 0;
321 do {
322 int to_wait, to_submit, this_reap, to_prep;
323 unsigned ring_flags = 0;
324
325 if (!prepped && s->inflight < depth) {
326 to_prep = min(depth - s->inflight, batch_submit);
327 prepped = prep_more_ios(s, to_prep);
328 }
329 s->inflight += prepped;
330submit_more:
331 to_submit = prepped;
332submit:
333 if (to_submit && (s->inflight + to_submit <= depth))
334 to_wait = 0;
335 else
336 to_wait = min(s->inflight + to_submit, batch_complete);
337
338 /*
339 * Only need to call io_uring_enter if we're not using SQ thread
340 * poll, or if IORING_SQ_NEED_WAKEUP is set.
341 */
342 if (sq_thread_poll)
343 ring_flags = atomic_load_acquire(ring->flags);
344 if (!sq_thread_poll || ring_flags & IORING_SQ_NEED_WAKEUP) {
345 unsigned flags = 0;
346
347 if (to_wait)
348 flags = IORING_ENTER_GETEVENTS;
349 if (ring_flags & IORING_SQ_NEED_WAKEUP)
350 flags |= IORING_ENTER_SQ_WAKEUP;
351 ret = io_uring_enter(s, to_submit, to_wait, flags);
352 s->calls++;
353 } else {
354 /* for SQPOLL, we submitted it all effectively */
355 ret = to_submit;
356 }
357
358 /*
359 * For non SQ thread poll, we already got the events we needed
360 * through the io_uring_enter() above. For SQ thread poll, we
361 * need to loop here until we find enough events.
362 */
363 this_reap = 0;
364 do {
365 int r;
366 r = reap_events(s);
367 if (r == -1) {
368 s->finish = 1;
369 break;
370 } else if (r > 0)
371 this_reap += r;
372 } while (sq_thread_poll && this_reap < to_wait);
373 s->reaps += this_reap;
374
375 if (ret >= 0) {
376 if (!ret) {
377 to_submit = 0;
378 if (s->inflight)
379 goto submit;
380 continue;
381 } else if (ret < to_submit) {
382 int diff = to_submit - ret;
383
384 s->done += ret;
385 prepped -= diff;
386 goto submit_more;
387 }
388 s->done += ret;
389 prepped = 0;
390 continue;
391 } else if (ret < 0) {
392 if (errno == EAGAIN) {
393 if (s->finish)
394 break;
395 if (this_reap)
396 goto submit;
397 to_submit = 0;
398 goto submit;
399 }
400 printf("io_submit: %s\n", strerror(errno));
401 break;
402 }
403 } while (!s->finish);
404
405 finish = 1;
406 return NULL;
407}
408
409static struct submitter *get_submitter(int offset)
410{
411 void *ret;
412
413 ret = submitter;
414 if (offset)
415 ret += offset * (sizeof(*submitter) + depth * sizeof(struct iovec));
416 return ret;
417}
418
419static void sig_int(int sig)
420{
421 int j;
422
423 printf("Exiting on signal %d\n", sig);
424 for (j = 0; j < nthreads; j++) {
425 struct submitter *s = get_submitter(j);
426 s->finish = 1;
427 }
428 finish = 1;
429}
430
431static void arm_sig_int(void)
432{
433 struct sigaction act;
434
435 memset(&act, 0, sizeof(act));
436 act.sa_handler = sig_int;
437 act.sa_flags = SA_RESTART;
438 sigaction(SIGINT, &act, NULL);
439}
440
441static int setup_ring(struct submitter *s)
442{
443 struct io_sq_ring *sring = &s->sq_ring;
444 struct io_cq_ring *cring = &s->cq_ring;
445 struct io_uring_params p;
446 int ret, fd;
447 void *ptr;
448
449 memset(&p, 0, sizeof(p));
450
451 if (polled && !do_nop)
452 p.flags |= IORING_SETUP_IOPOLL;
453 if (sq_thread_poll) {
454 p.flags |= IORING_SETUP_SQPOLL;
455 if (sq_thread_cpu != -1) {
456 p.flags |= IORING_SETUP_SQ_AFF;
457 p.sq_thread_cpu = sq_thread_cpu;
458 }
459 }
460
461 fd = io_uring_setup(depth, &p);
462 if (fd < 0) {
463 perror("io_uring_setup");
464 return 1;
465 }
466 s->ring_fd = fd;
467
468 io_uring_probe(fd);
469
470 if (fixedbufs) {
471 struct rlimit rlim;
472
473 rlim.rlim_cur = RLIM_INFINITY;
474 rlim.rlim_max = RLIM_INFINITY;
475 /* ignore potential error, not needed on newer kernels */
476 setrlimit(RLIMIT_MEMLOCK, &rlim);
477
478 ret = io_uring_register_buffers(s);
479 if (ret < 0) {
480 perror("io_uring_register_buffers");
481 return 1;
482 }
483 }
484
485 if (register_files) {
486 ret = io_uring_register_files(s);
487 if (ret < 0) {
488 perror("io_uring_register_files");
489 return 1;
490 }
491 }
492
493 ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(__u32),
494 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
495 IORING_OFF_SQ_RING);
496 printf("sq_ring ptr = 0x%p\n", ptr);
497 sring->head = ptr + p.sq_off.head;
498 sring->tail = ptr + p.sq_off.tail;
499 sring->ring_mask = ptr + p.sq_off.ring_mask;
500 sring->ring_entries = ptr + p.sq_off.ring_entries;
501 sring->flags = ptr + p.sq_off.flags;
502 sring->array = ptr + p.sq_off.array;
503 sq_ring_mask = *sring->ring_mask;
504
505 s->sqes = mmap(0, p.sq_entries * sizeof(struct io_uring_sqe),
506 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
507 IORING_OFF_SQES);
508 printf("sqes ptr = 0x%p\n", s->sqes);
509
510 ptr = mmap(0, p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe),
511 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
512 IORING_OFF_CQ_RING);
513 printf("cq_ring ptr = 0x%p\n", ptr);
514 cring->head = ptr + p.cq_off.head;
515 cring->tail = ptr + p.cq_off.tail;
516 cring->ring_mask = ptr + p.cq_off.ring_mask;
517 cring->ring_entries = ptr + p.cq_off.ring_entries;
518 cring->cqes = ptr + p.cq_off.cqes;
519 cq_ring_mask = *cring->ring_mask;
520 return 0;
521}
522
523static void file_depths(char *buf)
524{
525 bool prev = false;
526 char *p;
527 int i, j;
528
529 buf[0] = '\0';
530 p = buf;
531 for (j = 0; j < nthreads; j++) {
532 struct submitter *s = get_submitter(j);
533
534 for (i = 0; i < s->nr_files; i++) {
535 struct file *f = &s->files[i];
536
537 if (prev)
538 p += sprintf(p, " %d", f->pending_ios);
539 else
540 p += sprintf(p, "%d", f->pending_ios);
541 prev = true;
542 }
543 }
544}
545
546static void usage(char *argv, int status)
547{
548 printf("%s [options] -- [filenames]\n"
549 " -d <int> : IO Depth, default %d\n"
550 " -s <int> : Batch submit, default %d\n"
551 " -c <int> : Batch complete, default %d\n"
552 " -b <int> : Block size, default %d\n"
553 " -p <bool> : Polled IO, default %d\n"
554 " -B <bool> : Fixed buffers, default %d\n"
555 " -F <bool> : Register files, default %d\n"
556 " -n <int> : Number of threads, default %d\n"
557 " -O <bool> : Use O_DIRECT, default %d\n"
558 " -N <bool> : Perform just no-op requests, default %d\n",
559 argv, DEPTH, BATCH_SUBMIT, BATCH_COMPLETE, BS, polled,
560 fixedbufs, register_files, nthreads, !buffered, do_nop);
561 exit(status);
562}
563
564int main(int argc, char *argv[])
565{
566 struct submitter *s;
567 unsigned long done, calls, reap;
568 int err, i, j, flags, fd, opt, threads_per_f, threads_rem = 0, nfiles;
569 struct file f;
570 char *fdepths;
571 void *ret;
572
573 if (!do_nop && argc < 2)
574 usage(argv[0], 1);
575
576 while ((opt = getopt(argc, argv, "d:s:c:b:p:B:F:n:N:O:h?")) != -1) {
577 switch (opt) {
578 case 'd':
579 depth = atoi(optarg);
580 break;
581 case 's':
582 batch_submit = atoi(optarg);
583 break;
584 case 'c':
585 batch_complete = atoi(optarg);
586 break;
587 case 'b':
588 bs = atoi(optarg);
589 break;
590 case 'p':
591 polled = !!atoi(optarg);
592 break;
593 case 'B':
594 fixedbufs = !!atoi(optarg);
595 break;
596 case 'F':
597 register_files = !!atoi(optarg);
598 break;
599 case 'n':
600 nthreads = atoi(optarg);
601 if (!nthreads) {
602 printf("Threads must be non-zero\n");
603 usage(argv[0], 1);
604 }
605 break;
606 case 'N':
607 do_nop = !!atoi(optarg);
608 break;
609 case 'O':
610 buffered = !atoi(optarg);
611 break;
612 case 'h':
613 case '?':
614 default:
615 usage(argv[0], 0);
616 break;
617 }
618 }
619
620 submitter = calloc(nthreads, sizeof(*submitter) +
621 depth * sizeof(struct iovec));
622 for (j = 0; j < nthreads; j++) {
623 s = get_submitter(j);
624 s->index = j;
625 s->done = s->calls = s->reaps = 0;
626 }
627
628 flags = O_RDONLY | O_NOATIME;
629 if (!buffered)
630 flags |= O_DIRECT;
631
632 j = 0;
633 i = optind;
634 nfiles = argc - i;
635 if (!do_nop) {
636 if (!nfiles) {
637 printf("No files specified\n");
638 usage(argv[0], 1);
639 }
640 threads_per_f = nthreads / nfiles;
641 /* make sure each thread gets assigned files */
642 if (threads_per_f == 0) {
643 threads_per_f = 1;
644 } else {
645 threads_rem = nthreads - threads_per_f * nfiles;
646 }
647 }
648 while (!do_nop && i < argc) {
649 int k, limit;
650
651 memset(&f, 0, sizeof(f));
652
653 fd = open(argv[i], flags);
654 if (fd < 0) {
655 perror("open");
656 return 1;
657 }
658 f.real_fd = fd;
659 if (get_file_size(&f)) {
660 printf("failed getting size of device/file\n");
661 return 1;
662 }
663 if (f.max_blocks <= 1) {
664 printf("Zero file/device size?\n");
665 return 1;
666 }
667 f.max_blocks--;
668
669 limit = threads_per_f;
670 limit += threads_rem > 0 ? 1 : 0;
671 for (k = 0; k < limit; k++) {
672 s = get_submitter((j + k) % nthreads);
673
674 if (s->nr_files == MAX_FDS) {
675 printf("Max number of files (%d) reached\n", MAX_FDS);
676 break;
677 }
678
679 memcpy(&s->files[s->nr_files], &f, sizeof(f));
680
681 printf("Added file %s (submitter %d)\n", argv[i], s->index);
682 s->nr_files++;
683 }
684 threads_rem--;
685 i++;
686 j += limit;
687 }
688
689 arm_sig_int();
690
691 for (j = 0; j < nthreads; j++) {
692 s = get_submitter(j);
693 for (i = 0; i < depth; i++) {
694 void *buf;
695
696 if (posix_memalign(&buf, bs, bs)) {
697 printf("failed alloc\n");
698 return 1;
699 }
700 s->iovecs[i].iov_base = buf;
701 s->iovecs[i].iov_len = bs;
702 }
703 }
704
705 for (j = 0; j < nthreads; j++) {
706 s = get_submitter(j);
707
708 err = setup_ring(s);
709 if (err) {
710 printf("ring setup failed: %s, %d\n", strerror(errno), err);
711 return 1;
712 }
713 }
714 s = get_submitter(0);
715 printf("polled=%d, fixedbufs=%d, register_files=%d, buffered=%d", polled, fixedbufs, register_files, buffered);
716 printf(" QD=%d, sq_ring=%d, cq_ring=%d\n", depth, *s->sq_ring.ring_entries, *s->cq_ring.ring_entries);
717
718 for (j = 0; j < nthreads; j++) {
719 s = get_submitter(j);
720 pthread_create(&s->thread, NULL, submitter_fn, s);
721 }
722
723 fdepths = malloc(8 * s->nr_files * nthreads);
724 reap = calls = done = 0;
725 do {
726 unsigned long this_done = 0;
727 unsigned long this_reap = 0;
728 unsigned long this_call = 0;
729 unsigned long rpc = 0, ipc = 0;
730 unsigned long iops;
731
732 sleep(1);
733 for (j = 0; j < nthreads; j++) {
734 this_done += s->done;
735 this_call += s->calls;
736 this_reap += s->reaps;
737 }
738 if (this_call - calls) {
739 rpc = (this_done - done) / (this_call - calls);
740 ipc = (this_reap - reap) / (this_call - calls);
741 } else
742 rpc = ipc = -1;
743 file_depths(fdepths);
744 iops = this_done - done;
745 printf("IOPS=%lu, BW=%luMiB/s, IOS/call=%ld/%ld, inflight=(%s)\n",
746 iops, iops * (1048576 / bs), rpc, ipc, fdepths);
747 done = this_done;
748 calls = this_call;
749 reap = this_reap;
750 } while (!finish);
751
752 for (j = 0; j < nthreads; j++) {
753 s = get_submitter(j);
754 pthread_join(s->thread, &ret);
755 close(s->ring_fd);
756 }
757 free(fdepths);
758 free(submitter);
759 return 0;
760}