11 #include <sys/ioctl.h>
12 #include <sys/syscall.h>
13 #include <sys/resource.h>
23 #include "../arch/arch.h"
24 #include "../lib/types.h"
25 #include "../os/io_uring.h"
27 #define barrier() __asm__ __volatile__("": : :"memory")
29 #define min(a, b) ((a < b) ? (a) : (b))
35 unsigned *ring_entries;
44 unsigned *ring_entries;
45 struct io_uring_cqe *cqes;
50 #define BATCH_SUBMIT 64
51 #define BATCH_COMPLETE 64
57 static unsigned sq_ring_mask, cq_ring_mask;
60 unsigned long max_blocks;
69 struct drand48_data rand;
70 struct io_sq_ring sq_ring;
71 struct io_uring_sqe *sqes;
72 struct iovec iovecs[DEPTH];
73 struct io_cq_ring cq_ring;
78 unsigned long cachehit, cachemiss;
83 struct file files[MAX_FDS];
88 static struct submitter submitters[1];
89 static volatile int finish;
91 static int polled = 1; /* use IO polling */
92 static int fixedbufs = 1; /* use fixed user buffers */
93 static int buffered = 0; /* use buffered IO, not O_DIRECT */
94 static int sq_thread_poll = 0; /* use kernel submission/poller thread */
95 static int sq_thread_cpu = -1; /* pin above thread to this CPU */
96 static int do_nop = 0; /* no-op SQ ring commands */
98 static int io_uring_register_buffers(struct submitter *s)
100 struct io_uring_register_buffers reg = {
108 return syscall(__NR_sys_io_uring_register, s->ring_fd,
109 IORING_REGISTER_BUFFERS, ®);
112 static int io_uring_register_files(struct submitter *s)
114 struct io_uring_register_files reg;
120 s->fds = calloc(s->nr_files, sizeof(__s32));
121 for (i = 0; i < s->nr_files; i++) {
122 s->fds[i] = s->files[i].real_fd;
123 s->files[i].fixed_fd = i;
126 reg.nr_fds = s->nr_files;
128 return syscall(__NR_sys_io_uring_register, s->ring_fd,
129 IORING_REGISTER_FILES, ®);
132 static int io_uring_setup(unsigned entries, struct io_uring_params *p)
134 return syscall(__NR_sys_io_uring_setup, entries, p);
137 static int io_uring_enter(struct submitter *s, unsigned int to_submit,
138 unsigned int min_complete, unsigned int flags)
140 return syscall(__NR_sys_io_uring_enter, s->ring_fd, to_submit,
141 min_complete, flags);
144 static int gettid(void)
146 return syscall(__NR_gettid);
149 static unsigned file_depth(struct submitter *s)
151 return (DEPTH + s->nr_files - 1) / s->nr_files;
154 static void init_io(struct submitter *s, unsigned index)
156 struct io_uring_sqe *sqe = &s->sqes[index];
157 unsigned long offset;
162 sqe->opcode = IORING_OP_NOP;
166 if (s->nr_files == 1) {
169 f = &s->files[s->cur_file];
170 if (f->pending_ios >= file_depth(s)) {
172 if (s->cur_file == s->nr_files)
178 lrand48_r(&s->rand, &r);
179 offset = (r % (f->max_blocks - 1)) * BS;
181 sqe->flags = IOSQE_FIXED_FILE;
183 sqe->opcode = IORING_OP_READ_FIXED;
184 sqe->addr = s->iovecs[index].iov_base;
186 sqe->buf_index = index;
188 sqe->opcode = IORING_OP_READV;
189 sqe->addr = &s->iovecs[index];
194 sqe->fd = f->fixed_fd;
196 sqe->user_data = (unsigned long) f;
199 static int prep_more_ios(struct submitter *s, int max_ios)
201 struct io_sq_ring *ring = &s->sq_ring;
202 unsigned index, tail, next_tail, prepped = 0;
204 next_tail = tail = *ring->tail;
208 if (next_tail == *ring->head)
211 index = tail & sq_ring_mask;
213 ring->array[index] = index;
216 } while (prepped < max_ios);
218 if (*ring->tail != tail) {
219 /* order tail store with writes to sqes above */
227 static int get_file_size(struct file *f)
231 if (fstat(f->real_fd, &st) < 0)
233 if (S_ISBLK(st.st_mode)) {
234 unsigned long long bytes;
236 if (ioctl(f->real_fd, BLKGETSIZE64, &bytes) != 0)
239 f->max_blocks = bytes / BS;
241 } else if (S_ISREG(st.st_mode)) {
242 f->max_blocks = st.st_size / BS;
249 static int reap_events(struct submitter *s)
251 struct io_cq_ring *ring = &s->cq_ring;
252 struct io_uring_cqe *cqe;
253 unsigned head, reaped = 0;
260 if (head == *ring->tail)
262 cqe = &ring->cqes[head & cq_ring_mask];
264 f = (struct file *) cqe->user_data;
266 if (cqe->res != BS) {
267 printf("io: unexpected ret=%d\n", cqe->res);
271 if (cqe->flags & IOCQE_FLAG_CACHEHIT)
279 s->inflight -= reaped;
285 static void *submitter_fn(void *data)
287 struct submitter *s = data;
288 struct io_sq_ring *ring = &s->sq_ring;
291 printf("submitter=%d\n", gettid());
293 srand48_r(pthread_self(), &s->rand);
297 int to_wait, to_submit, this_reap, to_prep;
299 if (!prepped && s->inflight < DEPTH) {
300 to_prep = min(DEPTH - s->inflight, BATCH_SUBMIT);
301 prepped = prep_more_ios(s, to_prep);
303 s->inflight += prepped;
307 if (s->inflight + BATCH_SUBMIT < DEPTH)
310 to_wait = min(s->inflight + to_submit, BATCH_COMPLETE);
313 * Only need to call io_uring_enter if we're not using SQ thread
314 * poll, or if IORING_SQ_NEED_WAKEUP is set.
316 if (!sq_thread_poll || (*ring->flags & IORING_SQ_NEED_WAKEUP)) {
320 flags = IORING_ENTER_GETEVENTS;
321 ret = io_uring_enter(s, to_submit, to_wait, flags);
326 * For non SQ thread poll, we already got the events we needed
327 * through the io_uring_enter() above. For SQ thread poll, we
328 * need to loop here until we find enough events.
338 } while (sq_thread_poll && this_reap < to_wait);
339 s->reaps += this_reap;
347 } else if (ret < to_submit) {
348 int diff = to_submit - ret;
357 } else if (ret < 0) {
358 if (errno == EAGAIN) {
366 printf("io_submit: %s\n", strerror(errno));
369 } while (!s->finish);
375 static void sig_int(int sig)
377 printf("Exiting on signal %d\n", sig);
378 submitters[0].finish = 1;
382 static void arm_sig_int(void)
384 struct sigaction act;
386 memset(&act, 0, sizeof(act));
387 act.sa_handler = sig_int;
388 act.sa_flags = SA_RESTART;
389 sigaction(SIGINT, &act, NULL);
392 static int setup_ring(struct submitter *s)
394 struct io_sq_ring *sring = &s->sq_ring;
395 struct io_cq_ring *cring = &s->cq_ring;
396 struct io_uring_params p;
400 memset(&p, 0, sizeof(p));
403 p.flags |= IORING_SETUP_IOPOLL;
404 if (sq_thread_poll) {
405 p.flags |= IORING_SETUP_SQPOLL;
406 if (sq_thread_cpu != -1) {
407 p.flags |= IORING_SETUP_SQ_AFF;
408 p.sq_thread_cpu = sq_thread_cpu;
412 fd = io_uring_setup(DEPTH, &p);
414 perror("io_uring_setup");
420 ret = io_uring_register_buffers(s);
422 perror("io_uring_register_buffers");
427 ret = io_uring_register_files(s);
429 perror("io_uring_register_files");
433 ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(__u32),
434 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
436 printf("sq_ring ptr = 0x%p\n", ptr);
437 sring->head = ptr + p.sq_off.head;
438 sring->tail = ptr + p.sq_off.tail;
439 sring->ring_mask = ptr + p.sq_off.ring_mask;
440 sring->ring_entries = ptr + p.sq_off.ring_entries;
441 sring->flags = ptr + p.sq_off.flags;
442 sring->array = ptr + p.sq_off.array;
443 sq_ring_mask = *sring->ring_mask;
445 s->sqes = mmap(0, p.sq_entries * sizeof(struct io_uring_sqe),
446 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
448 printf("sqes ptr = 0x%p\n", s->sqes);
450 ptr = mmap(0, p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe),
451 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
453 printf("cq_ring ptr = 0x%p\n", ptr);
454 cring->head = ptr + p.cq_off.head;
455 cring->tail = ptr + p.cq_off.tail;
456 cring->ring_mask = ptr + p.cq_off.ring_mask;
457 cring->ring_entries = ptr + p.cq_off.ring_entries;
458 cring->cqes = ptr + p.cq_off.cqes;
459 cq_ring_mask = *cring->ring_mask;
463 int main(int argc, char *argv[])
465 struct submitter *s = &submitters[0];
466 unsigned long done, calls, reap, cache_hit, cache_miss;
467 int err, i, flags, fd;
471 if (!do_nop && argc < 2) {
472 printf("%s: filename\n", argv[0]);
476 flags = O_RDONLY | O_NOATIME;
481 while (!do_nop && i < argc) {
482 struct file *f = &s->files[s->nr_files];
484 fd = open(argv[i], flags);
490 if (get_file_size(f)) {
491 printf("failed getting size of device/file\n");
494 if (f->max_blocks <= 1) {
495 printf("Zero file/device size?\n");
500 printf("Added file %s\n", argv[i]);
505 rlim.rlim_cur = RLIM_INFINITY;
506 rlim.rlim_max = RLIM_INFINITY;
507 if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0) {
514 for (i = 0; i < DEPTH; i++) {
517 if (posix_memalign(&buf, BS, BS)) {
518 printf("failed alloc\n");
521 s->iovecs[i].iov_base = buf;
522 s->iovecs[i].iov_len = BS;
527 printf("ring setup failed: %s, %d\n", strerror(errno), err);
530 printf("polled=%d, fixedbufs=%d, buffered=%d", polled, fixedbufs, buffered);
531 printf(" QD=%d, sq_ring=%d, cq_ring=%d\n", DEPTH, *s->sq_ring.ring_entries, *s->cq_ring.ring_entries);
533 pthread_create(&s->thread, NULL, submitter_fn, s);
535 cache_hit = cache_miss = reap = calls = done = 0;
537 unsigned long this_done = 0;
538 unsigned long this_reap = 0;
539 unsigned long this_call = 0;
540 unsigned long this_cache_hit = 0;
541 unsigned long this_cache_miss = 0;
542 unsigned long rpc = 0, ipc = 0;
546 this_done += s->done;
547 this_call += s->calls;
548 this_reap += s->reaps;
549 this_cache_hit += s->cachehit;
550 this_cache_miss += s->cachemiss;
551 if (this_cache_hit && this_cache_miss) {
552 unsigned long hits, total;
554 hits = this_cache_hit - cache_hit;
555 total = hits + this_cache_miss - cache_miss;
556 hit = (double) hits / (double) total;
559 if (this_call - calls) {
560 rpc = (this_done - done) / (this_call - calls);
561 ipc = (this_reap - reap) / (this_call - calls);
564 printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (head=%u tail=%u), Cachehit=%0.2f%%\n",
565 this_done - done, rpc, ipc, s->inflight,
566 *s->cq_ring.head, *s->cq_ring.tail, hit);
570 cache_hit = s->cachehit;
571 cache_miss = s->cachemiss;
574 pthread_join(s->thread, &ret);