11 #include <sys/ioctl.h>
12 #include <sys/syscall.h>
13 #include <sys/resource.h>
23 #include "../arch/arch.h"
24 #include "../lib/types.h"
25 #include "../os/linux/io_uring.h"
27 #define min(a, b) ((a < b) ? (a) : (b))
33 unsigned *ring_entries;
42 unsigned *ring_entries;
43 struct io_uring_cqe *cqes;
48 #define BATCH_SUBMIT 32
49 #define BATCH_COMPLETE 32
55 static unsigned sq_ring_mask, cq_ring_mask;
58 unsigned long max_blocks;
67 struct drand48_data rand;
68 struct io_sq_ring sq_ring;
69 struct io_uring_sqe *sqes;
70 struct iovec iovecs[DEPTH];
71 struct io_cq_ring cq_ring;
76 unsigned long cachehit, cachemiss;
81 struct file files[MAX_FDS];
86 static struct submitter submitters[1];
87 static volatile int finish;
89 static int polled = 1; /* use IO polling */
90 static int fixedbufs = 1; /* use fixed user buffers */
91 static int register_files = 1; /* use fixed files */
92 static int buffered = 0; /* use buffered IO, not O_DIRECT */
93 static int sq_thread_poll = 0; /* use kernel submission/poller thread */
94 static int sq_thread_cpu = -1; /* pin above thread to this CPU */
95 static int do_nop = 0; /* no-op SQ ring commands */
97 static int io_uring_register_buffers(struct submitter *s)
99 struct io_uring_register_buffers reg = {
107 return syscall(__NR_sys_io_uring_register, s->ring_fd,
108 IORING_REGISTER_BUFFERS, ®);
111 static int io_uring_register_files(struct submitter *s)
113 struct io_uring_register_files reg;
119 s->fds = calloc(s->nr_files, sizeof(__s32));
120 for (i = 0; i < s->nr_files; i++) {
121 s->fds[i] = s->files[i].real_fd;
122 s->files[i].fixed_fd = i;
125 reg.nr_fds = s->nr_files;
127 return syscall(__NR_sys_io_uring_register, s->ring_fd,
128 IORING_REGISTER_FILES, ®);
131 static int io_uring_setup(unsigned entries, struct io_uring_params *p)
133 return syscall(__NR_sys_io_uring_setup, entries, p);
136 static int io_uring_enter(struct submitter *s, unsigned int to_submit,
137 unsigned int min_complete, unsigned int flags)
139 return syscall(__NR_sys_io_uring_enter, s->ring_fd, to_submit,
140 min_complete, flags);
143 static int gettid(void)
145 return syscall(__NR_gettid);
148 static unsigned file_depth(struct submitter *s)
150 return (DEPTH + s->nr_files - 1) / s->nr_files;
153 static void init_io(struct submitter *s, unsigned index)
155 struct io_uring_sqe *sqe = &s->sqes[index];
156 unsigned long offset;
161 sqe->opcode = IORING_OP_NOP;
165 if (s->nr_files == 1) {
168 f = &s->files[s->cur_file];
169 if (f->pending_ios >= file_depth(s)) {
171 if (s->cur_file == s->nr_files)
173 f = &s->files[s->cur_file];
178 lrand48_r(&s->rand, &r);
179 offset = (r % (f->max_blocks - 1)) * BS;
181 if (register_files) {
182 sqe->flags = IOSQE_FIXED_FILE;
183 sqe->fd = f->fixed_fd;
186 sqe->fd = f->real_fd;
189 sqe->opcode = IORING_OP_READ_FIXED;
190 sqe->addr = s->iovecs[index].iov_base;
192 sqe->buf_index = index;
194 sqe->opcode = IORING_OP_READV;
195 sqe->addr = &s->iovecs[index];
201 sqe->user_data = (unsigned long) f;
204 static int prep_more_ios(struct submitter *s, int max_ios)
206 struct io_sq_ring *ring = &s->sq_ring;
207 unsigned index, tail, next_tail, prepped = 0;
209 next_tail = tail = *ring->tail;
213 if (next_tail == *ring->head)
216 index = tail & sq_ring_mask;
218 ring->array[index] = index;
221 } while (prepped < max_ios);
223 if (*ring->tail != tail) {
224 /* order tail store with writes to sqes above */
232 static int get_file_size(struct file *f)
236 if (fstat(f->real_fd, &st) < 0)
238 if (S_ISBLK(st.st_mode)) {
239 unsigned long long bytes;
241 if (ioctl(f->real_fd, BLKGETSIZE64, &bytes) != 0)
244 f->max_blocks = bytes / BS;
246 } else if (S_ISREG(st.st_mode)) {
247 f->max_blocks = st.st_size / BS;
254 static int reap_events(struct submitter *s)
256 struct io_cq_ring *ring = &s->cq_ring;
257 struct io_uring_cqe *cqe;
258 unsigned head, reaped = 0;
265 if (head == *ring->tail)
267 cqe = &ring->cqes[head & cq_ring_mask];
269 f = (struct file *) (uintptr_t) cqe->user_data;
271 if (cqe->res != BS) {
272 printf("io: unexpected ret=%d\n", cqe->res);
276 if (cqe->flags & IOCQE_FLAG_CACHEHIT)
284 s->inflight -= reaped;
290 static void *submitter_fn(void *data)
292 struct submitter *s = data;
293 struct io_sq_ring *ring = &s->sq_ring;
296 printf("submitter=%d\n", gettid());
298 srand48_r(pthread_self(), &s->rand);
302 int to_wait, to_submit, this_reap, to_prep;
304 if (!prepped && s->inflight < DEPTH) {
305 to_prep = min(DEPTH - s->inflight, BATCH_SUBMIT);
306 prepped = prep_more_ios(s, to_prep);
308 s->inflight += prepped;
312 if (to_submit && (s->inflight + to_submit <= DEPTH))
315 to_wait = min(s->inflight + to_submit, BATCH_COMPLETE);
318 * Only need to call io_uring_enter if we're not using SQ thread
319 * poll, or if IORING_SQ_NEED_WAKEUP is set.
321 if (!sq_thread_poll || (*ring->flags & IORING_SQ_NEED_WAKEUP)) {
325 flags = IORING_ENTER_GETEVENTS;
326 ret = io_uring_enter(s, to_submit, to_wait, flags);
331 * For non SQ thread poll, we already got the events we needed
332 * through the io_uring_enter() above. For SQ thread poll, we
333 * need to loop here until we find enough events.
344 } while (sq_thread_poll && this_reap < to_wait);
345 s->reaps += this_reap;
353 } else if (ret < to_submit) {
354 int diff = to_submit - ret;
363 } else if (ret < 0) {
364 if (errno == EAGAIN) {
372 printf("io_submit: %s\n", strerror(errno));
375 } while (!s->finish);
381 static void sig_int(int sig)
383 printf("Exiting on signal %d\n", sig);
384 submitters[0].finish = 1;
388 static void arm_sig_int(void)
390 struct sigaction act;
392 memset(&act, 0, sizeof(act));
393 act.sa_handler = sig_int;
394 act.sa_flags = SA_RESTART;
395 sigaction(SIGINT, &act, NULL);
398 static int setup_ring(struct submitter *s)
400 struct io_sq_ring *sring = &s->sq_ring;
401 struct io_cq_ring *cring = &s->cq_ring;
402 struct io_uring_params p;
406 memset(&p, 0, sizeof(p));
408 if (polled && !do_nop)
409 p.flags |= IORING_SETUP_IOPOLL;
410 if (sq_thread_poll) {
411 p.flags |= IORING_SETUP_SQPOLL;
412 if (sq_thread_cpu != -1) {
413 p.flags |= IORING_SETUP_SQ_AFF;
414 p.sq_thread_cpu = sq_thread_cpu;
418 fd = io_uring_setup(DEPTH, &p);
420 perror("io_uring_setup");
426 ret = io_uring_register_buffers(s);
428 perror("io_uring_register_buffers");
433 if (register_files) {
434 ret = io_uring_register_files(s);
436 perror("io_uring_register_files");
441 ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(__u32),
442 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
444 printf("sq_ring ptr = 0x%p\n", ptr);
445 sring->head = ptr + p.sq_off.head;
446 sring->tail = ptr + p.sq_off.tail;
447 sring->ring_mask = ptr + p.sq_off.ring_mask;
448 sring->ring_entries = ptr + p.sq_off.ring_entries;
449 sring->flags = ptr + p.sq_off.flags;
450 sring->array = ptr + p.sq_off.array;
451 sq_ring_mask = *sring->ring_mask;
453 s->sqes = mmap(0, p.sq_entries * sizeof(struct io_uring_sqe),
454 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
456 printf("sqes ptr = 0x%p\n", s->sqes);
458 ptr = mmap(0, p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe),
459 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
461 printf("cq_ring ptr = 0x%p\n", ptr);
462 cring->head = ptr + p.cq_off.head;
463 cring->tail = ptr + p.cq_off.tail;
464 cring->ring_mask = ptr + p.cq_off.ring_mask;
465 cring->ring_entries = ptr + p.cq_off.ring_entries;
466 cring->cqes = ptr + p.cq_off.cqes;
467 cq_ring_mask = *cring->ring_mask;
471 static void file_depths(char *buf)
473 struct submitter *s = &submitters[0];
479 for (i = 0; i < s->nr_files; i++) {
480 struct file *f = &s->files[i];
482 if (i + 1 == s->nr_files)
483 p += sprintf(p, "%d", f->pending_ios);
485 p += sprintf(p, "%d, ", f->pending_ios);
489 int main(int argc, char *argv[])
491 struct submitter *s = &submitters[0];
492 unsigned long done, calls, reap, cache_hit, cache_miss;
493 int err, i, flags, fd;
497 if (!do_nop && argc < 2) {
498 printf("%s: filename\n", argv[0]);
502 flags = O_RDONLY | O_NOATIME;
507 while (!do_nop && i < argc) {
508 struct file *f = &s->files[s->nr_files];
510 fd = open(argv[i], flags);
516 if (get_file_size(f)) {
517 printf("failed getting size of device/file\n");
520 if (f->max_blocks <= 1) {
521 printf("Zero file/device size?\n");
526 printf("Added file %s\n", argv[i]);
534 rlim.rlim_cur = RLIM_INFINITY;
535 rlim.rlim_max = RLIM_INFINITY;
536 if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0) {
544 for (i = 0; i < DEPTH; i++) {
547 if (posix_memalign(&buf, BS, BS)) {
548 printf("failed alloc\n");
551 s->iovecs[i].iov_base = buf;
552 s->iovecs[i].iov_len = BS;
557 printf("ring setup failed: %s, %d\n", strerror(errno), err);
560 printf("polled=%d, fixedbufs=%d, buffered=%d", polled, fixedbufs, buffered);
561 printf(" QD=%d, sq_ring=%d, cq_ring=%d\n", DEPTH, *s->sq_ring.ring_entries, *s->cq_ring.ring_entries);
563 pthread_create(&s->thread, NULL, submitter_fn, s);
565 fdepths = malloc(8 * s->nr_files);
566 cache_hit = cache_miss = reap = calls = done = 0;
568 unsigned long this_done = 0;
569 unsigned long this_reap = 0;
570 unsigned long this_call = 0;
571 unsigned long this_cache_hit = 0;
572 unsigned long this_cache_miss = 0;
573 unsigned long rpc = 0, ipc = 0;
577 this_done += s->done;
578 this_call += s->calls;
579 this_reap += s->reaps;
580 this_cache_hit += s->cachehit;
581 this_cache_miss += s->cachemiss;
582 if (this_cache_hit && this_cache_miss) {
583 unsigned long hits, total;
585 hits = this_cache_hit - cache_hit;
586 total = hits + this_cache_miss - cache_miss;
587 hit = (double) hits / (double) total;
590 if (this_call - calls) {
591 rpc = (this_done - done) / (this_call - calls);
592 ipc = (this_reap - reap) / (this_call - calls);
595 file_depths(fdepths);
596 printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (%s), Cachehit=%0.2f%%\n",
597 this_done - done, rpc, ipc, s->inflight,
602 cache_hit = s->cachehit;
603 cache_miss = s->cachemiss;
606 pthread_join(s->thread, &ret);