11 #include <sys/ioctl.h>
12 #include <sys/syscall.h>
13 #include <sys/resource.h>
23 #include "../arch/arch.h"
24 #include "../lib/types.h"
25 #include "../os/linux/io_uring.h"
27 #define min(a, b) ((a < b) ? (a) : (b))
33 unsigned *ring_entries;
42 unsigned *ring_entries;
43 struct io_uring_cqe *cqes;
47 #define BATCH_SUBMIT 32
48 #define BATCH_COMPLETE 32
53 static unsigned sq_ring_mask, cq_ring_mask;
56 unsigned long max_blocks;
66 struct io_sq_ring sq_ring;
67 struct io_uring_sqe *sqes;
68 struct io_cq_ring cq_ring;
77 struct file files[MAX_FDS];
80 struct iovec iovecs[];
83 static struct submitter *submitter;
84 static volatile int finish;
86 static int depth = DEPTH;
87 static int batch_submit = BATCH_SUBMIT;
88 static int batch_complete = BATCH_COMPLETE;
90 static int polled = 1; /* use IO polling */
91 static int fixedbufs = 1; /* use fixed user buffers */
92 static int register_files = 1; /* use fixed files */
93 static int buffered = 0; /* use buffered IO, not O_DIRECT */
94 static int sq_thread_poll = 0; /* use kernel submission/poller thread */
95 static int sq_thread_cpu = -1; /* pin above thread to this CPU */
96 static int do_nop = 0; /* no-op SQ ring commands */
97 static int nthreads = 1;
99 static int vectored = 1;
101 static int io_uring_register_buffers(struct submitter *s)
106 return syscall(__NR_io_uring_register, s->ring_fd,
107 IORING_REGISTER_BUFFERS, s->iovecs, depth);
110 static int io_uring_register_files(struct submitter *s)
117 s->fds = calloc(s->nr_files, sizeof(__s32));
118 for (i = 0; i < s->nr_files; i++) {
119 s->fds[i] = s->files[i].real_fd;
120 s->files[i].fixed_fd = i;
123 return syscall(__NR_io_uring_register, s->ring_fd,
124 IORING_REGISTER_FILES, s->fds, s->nr_files);
127 static int io_uring_setup(unsigned entries, struct io_uring_params *p)
129 return syscall(__NR_io_uring_setup, entries, p);
132 static void io_uring_probe(int fd)
134 struct io_uring_probe *p;
137 p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
141 memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
142 ret = syscall(__NR_io_uring_register, fd, IORING_REGISTER_PROBE, p, 256);
146 if (IORING_OP_READ > p->ops_len)
149 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED))
155 static int io_uring_enter(struct submitter *s, unsigned int to_submit,
156 unsigned int min_complete, unsigned int flags)
158 return syscall(__NR_io_uring_enter, s->ring_fd, to_submit, min_complete,
162 #ifndef CONFIG_HAVE_GETTID
163 static int gettid(void)
165 return syscall(__NR_gettid);
169 static unsigned file_depth(struct submitter *s)
171 return (depth + s->nr_files - 1) / s->nr_files;
174 static void init_io(struct submitter *s, unsigned index)
176 struct io_uring_sqe *sqe = &s->sqes[index];
177 unsigned long offset;
182 sqe->opcode = IORING_OP_NOP;
186 if (s->nr_files == 1) {
189 f = &s->files[s->cur_file];
190 if (f->pending_ios >= file_depth(s)) {
192 if (s->cur_file == s->nr_files)
194 f = &s->files[s->cur_file];
200 offset = (r % (f->max_blocks - 1)) * bs;
202 if (register_files) {
203 sqe->flags = IOSQE_FIXED_FILE;
204 sqe->fd = f->fixed_fd;
207 sqe->fd = f->real_fd;
210 sqe->opcode = IORING_OP_READ_FIXED;
211 sqe->addr = (unsigned long) s->iovecs[index].iov_base;
213 sqe->buf_index = index;
214 } else if (!vectored) {
215 sqe->opcode = IORING_OP_READ;
216 sqe->addr = (unsigned long) s->iovecs[index].iov_base;
220 sqe->opcode = IORING_OP_READV;
221 sqe->addr = (unsigned long) &s->iovecs[index];
227 sqe->user_data = (unsigned long) f;
230 static int prep_more_ios(struct submitter *s, int max_ios)
232 struct io_sq_ring *ring = &s->sq_ring;
233 unsigned index, tail, next_tail, prepped = 0;
235 next_tail = tail = *ring->tail;
238 if (next_tail == atomic_load_acquire(ring->head))
241 index = tail & sq_ring_mask;
243 ring->array[index] = index;
246 } while (prepped < max_ios);
249 atomic_store_release(ring->tail, tail);
253 static int get_file_size(struct file *f)
257 if (fstat(f->real_fd, &st) < 0)
259 if (S_ISBLK(st.st_mode)) {
260 unsigned long long bytes;
262 if (ioctl(f->real_fd, BLKGETSIZE64, &bytes) != 0)
265 f->max_blocks = bytes / bs;
267 } else if (S_ISREG(st.st_mode)) {
268 f->max_blocks = st.st_size / bs;
275 static int reap_events(struct submitter *s)
277 struct io_cq_ring *ring = &s->cq_ring;
278 struct io_uring_cqe *cqe;
279 unsigned head, reaped = 0;
286 if (head == atomic_load_acquire(ring->tail))
288 cqe = &ring->cqes[head & cq_ring_mask];
290 f = (struct file *) (uintptr_t) cqe->user_data;
292 if (cqe->res != bs) {
293 printf("io: unexpected ret=%d\n", cqe->res);
294 if (polled && cqe->res == -EOPNOTSUPP)
295 printf("Your filesystem/driver/kernel doesn't support polled IO\n");
304 s->inflight -= reaped;
305 atomic_store_release(ring->head, head);
310 static void *submitter_fn(void *data)
312 struct submitter *s = data;
313 struct io_sq_ring *ring = &s->sq_ring;
316 printf("submitter=%d\n", gettid());
318 srand48(pthread_self());
322 int to_wait, to_submit, this_reap, to_prep;
323 unsigned ring_flags = 0;
325 if (!prepped && s->inflight < depth) {
326 to_prep = min(depth - s->inflight, batch_submit);
327 prepped = prep_more_ios(s, to_prep);
329 s->inflight += prepped;
333 if (to_submit && (s->inflight + to_submit <= depth))
336 to_wait = min(s->inflight + to_submit, batch_complete);
339 * Only need to call io_uring_enter if we're not using SQ thread
340 * poll, or if IORING_SQ_NEED_WAKEUP is set.
343 ring_flags = atomic_load_acquire(ring->flags);
344 if (!sq_thread_poll || ring_flags & IORING_SQ_NEED_WAKEUP) {
348 flags = IORING_ENTER_GETEVENTS;
349 if (ring_flags & IORING_SQ_NEED_WAKEUP)
350 flags |= IORING_ENTER_SQ_WAKEUP;
351 ret = io_uring_enter(s, to_submit, to_wait, flags);
354 /* for SQPOLL, we submitted it all effectively */
359 * For non SQ thread poll, we already got the events we needed
360 * through the io_uring_enter() above. For SQ thread poll, we
361 * need to loop here until we find enough events.
372 } while (sq_thread_poll && this_reap < to_wait);
373 s->reaps += this_reap;
381 } else if (ret < to_submit) {
382 int diff = to_submit - ret;
391 } else if (ret < 0) {
392 if (errno == EAGAIN) {
400 printf("io_submit: %s\n", strerror(errno));
403 } while (!s->finish);
409 static struct submitter *get_submitter(int offset)
415 ret += offset * (sizeof(*submitter) + depth * sizeof(struct iovec));
419 static void sig_int(int sig)
423 printf("Exiting on signal %d\n", sig);
424 for (j = 0; j < nthreads; j++) {
425 struct submitter *s = get_submitter(j);
431 static void arm_sig_int(void)
433 struct sigaction act;
435 memset(&act, 0, sizeof(act));
436 act.sa_handler = sig_int;
437 act.sa_flags = SA_RESTART;
438 sigaction(SIGINT, &act, NULL);
441 static int setup_ring(struct submitter *s)
443 struct io_sq_ring *sring = &s->sq_ring;
444 struct io_cq_ring *cring = &s->cq_ring;
445 struct io_uring_params p;
449 memset(&p, 0, sizeof(p));
451 if (polled && !do_nop)
452 p.flags |= IORING_SETUP_IOPOLL;
453 if (sq_thread_poll) {
454 p.flags |= IORING_SETUP_SQPOLL;
455 if (sq_thread_cpu != -1) {
456 p.flags |= IORING_SETUP_SQ_AFF;
457 p.sq_thread_cpu = sq_thread_cpu;
461 fd = io_uring_setup(depth, &p);
463 perror("io_uring_setup");
471 ret = io_uring_register_buffers(s);
473 perror("io_uring_register_buffers");
478 if (register_files) {
479 ret = io_uring_register_files(s);
481 perror("io_uring_register_files");
486 ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(__u32),
487 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
489 printf("sq_ring ptr = 0x%p\n", ptr);
490 sring->head = ptr + p.sq_off.head;
491 sring->tail = ptr + p.sq_off.tail;
492 sring->ring_mask = ptr + p.sq_off.ring_mask;
493 sring->ring_entries = ptr + p.sq_off.ring_entries;
494 sring->flags = ptr + p.sq_off.flags;
495 sring->array = ptr + p.sq_off.array;
496 sq_ring_mask = *sring->ring_mask;
498 s->sqes = mmap(0, p.sq_entries * sizeof(struct io_uring_sqe),
499 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
501 printf("sqes ptr = 0x%p\n", s->sqes);
503 ptr = mmap(0, p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe),
504 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
506 printf("cq_ring ptr = 0x%p\n", ptr);
507 cring->head = ptr + p.cq_off.head;
508 cring->tail = ptr + p.cq_off.tail;
509 cring->ring_mask = ptr + p.cq_off.ring_mask;
510 cring->ring_entries = ptr + p.cq_off.ring_entries;
511 cring->cqes = ptr + p.cq_off.cqes;
512 cq_ring_mask = *cring->ring_mask;
516 static void file_depths(char *buf)
524 for (j = 0; j < nthreads; j++) {
525 struct submitter *s = get_submitter(j);
527 for (i = 0; i < s->nr_files; i++) {
528 struct file *f = &s->files[i];
531 p += sprintf(p, " %d", f->pending_ios);
533 p += sprintf(p, "%d", f->pending_ios);
539 static void usage(char *argv)
541 printf("%s [options] -- [filenames]\n"
542 " -d <int> : IO Depth, default %d\n"
543 " -s <int> : Batch submit, default %d\n"
544 " -c <int> : Batch complete, default %d\n"
545 " -b <int> : Block size, default %d\n"
546 " -p <bool> : Polled IO, default %d\n",
547 argv, DEPTH, BATCH_SUBMIT, BATCH_COMPLETE, BS, polled);
551 int main(int argc, char *argv[])
554 unsigned long done, calls, reap;
555 int err, i, j, flags, fd, opt;
559 if (!do_nop && argc < 2) {
560 printf("%s: filename [options]\n", argv[0]);
564 while ((opt = getopt(argc, argv, "d:s:c:b:p:B:F:n:h?")) != -1) {
567 depth = atoi(optarg);
570 batch_submit = atoi(optarg);
573 batch_complete = atoi(optarg);
579 polled = !!atoi(optarg);
582 fixedbufs = !!atoi(optarg);
585 register_files = !!atoi(optarg);
588 nthreads = atoi(optarg);
598 submitter = calloc(nthreads, sizeof(*submitter) +
599 depth * sizeof(struct iovec));
600 for (j = 0; j < nthreads; j++) {
601 s = get_submitter(j);
603 s->done = s->calls = s->reaps = 0;
606 flags = O_RDONLY | O_NOATIME;
612 printf("i %d, argc %d\n", i, argc);
613 while (!do_nop && i < argc) {
616 s = get_submitter(j);
617 if (s->nr_files == MAX_FDS) {
618 printf("Max number of files (%d) reached\n", MAX_FDS);
621 fd = open(argv[i], flags);
627 f = &s->files[s->nr_files];
629 if (get_file_size(f)) {
630 printf("failed getting size of device/file\n");
633 if (f->max_blocks <= 1) {
634 printf("Zero file/device size?\n");
639 printf("Added file %s (submitter %d)\n", argv[i], s->index);
649 rlim.rlim_cur = RLIM_INFINITY;
650 rlim.rlim_max = RLIM_INFINITY;
651 if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0) {
659 for (j = 0; j < nthreads; j++) {
660 s = get_submitter(j);
661 for (i = 0; i < depth; i++) {
664 if (posix_memalign(&buf, bs, bs)) {
665 printf("failed alloc\n");
668 s->iovecs[i].iov_base = buf;
669 s->iovecs[i].iov_len = bs;
673 for (j = 0; j < nthreads; j++) {
674 s = get_submitter(j);
678 printf("ring setup failed: %s, %d\n", strerror(errno), err);
682 s = get_submitter(0);
683 printf("polled=%d, fixedbufs=%d, register_files=%d, buffered=%d", polled, fixedbufs, register_files, buffered);
684 printf(" QD=%d, sq_ring=%d, cq_ring=%d\n", depth, *s->sq_ring.ring_entries, *s->cq_ring.ring_entries);
686 for (j = 0; j < nthreads; j++) {
687 s = get_submitter(j);
688 pthread_create(&s->thread, NULL, submitter_fn, s);
691 fdepths = malloc(8 * s->nr_files * nthreads);
692 reap = calls = done = 0;
694 unsigned long this_done = 0;
695 unsigned long this_reap = 0;
696 unsigned long this_call = 0;
697 unsigned long rpc = 0, ipc = 0;
700 for (j = 0; j < nthreads; j++) {
701 this_done += s->done;
702 this_call += s->calls;
703 this_reap += s->reaps;
705 if (this_call - calls) {
706 rpc = (this_done - done) / (this_call - calls);
707 ipc = (this_reap - reap) / (this_call - calls);
710 file_depths(fdepths);
711 printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=(%s)\n",
712 this_done - done, rpc, ipc, fdepths);
718 for (j = 0; j < nthreads; j++) {
719 s = get_submitter(j);
720 pthread_join(s->thread, &ret);