14 #include <sys/types.h>
16 #include <sys/ioctl.h>
17 #include <sys/syscall.h>
18 #include <sys/resource.h>
28 #include "../arch/arch.h"
29 #include "../lib/types.h"
30 #include "../lib/roundup.h"
31 #include "../lib/rand.h"
32 #include "../minmax.h"
33 #include "../os/linux/io_uring.h"
39 unsigned *ring_entries;
48 unsigned *ring_entries;
49 struct io_uring_cqe *cqes;
53 #define BATCH_SUBMIT 32
54 #define BATCH_COMPLETE 32
59 static unsigned sq_ring_mask, cq_ring_mask;
62 unsigned long max_blocks;
63 unsigned long max_size;
64 unsigned long cur_off;
72 #define PLAT_VAL (1 << PLAT_BITS)
73 #define PLAT_GROUP_NR 29
74 #define PLAT_NR (PLAT_GROUP_NR * PLAT_VAL)
81 struct io_sq_ring sq_ring;
82 struct io_uring_sqe *sqes;
83 struct io_cq_ring cq_ring;
93 struct taus258_state rand_state;
95 unsigned long *clock_batch;
100 io_context_t aio_ctx;
103 struct file files[MAX_FDS];
106 struct iovec iovecs[];
109 static struct submitter *submitter;
110 static volatile int finish;
111 static int stats_running;
112 static unsigned long max_iops;
114 static int depth = DEPTH;
115 static int batch_submit = BATCH_SUBMIT;
116 static int batch_complete = BATCH_COMPLETE;
118 static int polled = 1; /* use IO polling */
119 static int fixedbufs = 1; /* use fixed user buffers */
120 static int dma_map; /* pre-map DMA buffers */
121 static int register_files = 1; /* use fixed files */
122 static int buffered = 0; /* use buffered IO, not O_DIRECT */
123 static int sq_thread_poll = 0; /* use kernel submission/poller thread */
124 static int sq_thread_cpu = -1; /* pin above thread to this CPU */
125 static int do_nop = 0; /* no-op SQ ring commands */
126 static int nthreads = 1;
127 static int stats = 0; /* generate IO stats */
128 static int aio = 0; /* use libaio */
129 static int runtime = 0; /* runtime */
130 static int random_io = 1; /* random or sequential IO */
131 static int register_ring = 1; /* register ring */
132 static int use_sync = 0; /* use preadv2 */
134 static unsigned long tsc_rate;
136 #define TSC_RATE_FILE "tsc-rate"
138 static int vectored = 1;
140 static float plist[] = { 1.0, 5.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0,
141 80.0, 90.0, 95.0, 99.0, 99.5, 99.9, 99.95, 99.99 };
142 static int plist_len = 17;
144 #ifndef IORING_REGISTER_MAP_BUFFERS
145 #define IORING_REGISTER_MAP_BUFFERS 22
146 struct io_uring_map_buffers {
155 static unsigned long cycles_to_nsec(unsigned long cycles)
162 val = cycles * 1000000000ULL;
163 return val / tsc_rate;
166 static unsigned long plat_idx_to_val(unsigned int idx)
168 unsigned int error_bits;
169 unsigned long k, base;
171 assert(idx < PLAT_NR);
173 /* MSB <= (PLAT_BITS-1), cannot be rounded off. Use
174 * all bits of the sample as index */
175 if (idx < (PLAT_VAL << 1))
176 return cycles_to_nsec(idx);
178 /* Find the group and compute the minimum value of that group */
179 error_bits = (idx >> PLAT_BITS) - 1;
180 base = ((unsigned long) 1) << (error_bits + PLAT_BITS);
182 /* Find its bucket number of the group */
185 /* Return the mean of the range of the bucket */
186 return cycles_to_nsec(base + ((k + 0.5) * (1 << error_bits)));
189 unsigned int calc_clat_percentiles(unsigned long *io_u_plat, unsigned long nr,
190 unsigned long **output,
191 unsigned long *maxv, unsigned long *minv)
193 unsigned long sum = 0;
194 unsigned int len = plist_len, i, j = 0;
195 unsigned long *ovals = NULL;
201 ovals = malloc(len * sizeof(*ovals));
206 * Calculate bucket values, note down max and min values
209 for (i = 0; i < PLAT_NR && !is_last; i++) {
211 while (sum >= ((long double) plist[j] / 100.0 * nr)) {
212 assert(plist[j] <= 100.0);
214 ovals[j] = plat_idx_to_val(i);
215 if (ovals[j] < *minv)
217 if (ovals[j] > *maxv)
220 is_last = (j == len - 1) != 0;
229 fprintf(stderr, "error calculating latency percentiles\n");
235 static void show_clat_percentiles(unsigned long *io_u_plat, unsigned long nr,
236 unsigned int precision)
238 unsigned int divisor, len, i, j = 0;
239 unsigned long minv, maxv;
240 unsigned long *ovals;
241 int per_line, scale_down, time_width;
245 len = calc_clat_percentiles(io_u_plat, nr, &ovals, &maxv, &minv);
252 printf(" percentiles (tsc ticks):\n |");
253 } else if (minv > 2000 && maxv > 99999) {
256 printf(" percentiles (usec):\n |");
260 printf(" percentiles (nsec):\n |");
263 time_width = max(5, (int) (log10(maxv / divisor) + 1));
264 snprintf(fmt, sizeof(fmt), " %%%u.%ufth=[%%%dllu]%%c", precision + 3,
265 precision, time_width);
266 /* fmt will be something like " %5.2fth=[%4llu]%c" */
267 per_line = (80 - 7) / (precision + 10 + time_width);
269 for (j = 0; j < len; j++) {
271 if (j != 0 && (j % per_line) == 0)
274 /* end of the list */
275 is_last = (j == len - 1) != 0;
277 for (i = 0; i < scale_down; i++)
278 ovals[j] = (ovals[j] + 999) / 1000;
280 printf(fmt, plist[j], ovals[j], is_last ? '\n' : ',');
285 if ((j % per_line) == per_line - 1) /* for formatting */
293 #ifdef ARCH_HAVE_CPU_CLOCK
294 static unsigned int plat_val_to_idx(unsigned long val)
296 unsigned int msb, error_bits, base, offset, idx;
298 /* Find MSB starting from bit 0 */
302 msb = (sizeof(val)*8) - __builtin_clzll(val) - 1;
305 * MSB <= (PLAT_BITS-1), cannot be rounded off. Use
306 * all bits of the sample as index
308 if (msb <= PLAT_BITS)
311 /* Compute the number of error bits to discard*/
312 error_bits = msb - PLAT_BITS;
314 /* Compute the number of buckets before the group */
315 base = (error_bits + 1) << PLAT_BITS;
318 * Discard the error bits and apply the mask to find the
319 * index for the buckets in the group
321 offset = (PLAT_VAL - 1) & (val >> error_bits);
323 /* Make sure the index does not exceed (array size - 1) */
324 idx = (base + offset) < (PLAT_NR - 1) ?
325 (base + offset) : (PLAT_NR - 1);
331 static void add_stat(struct submitter *s, int clock_index, int nr)
333 #ifdef ARCH_HAVE_CPU_CLOCK
334 unsigned long cycles;
337 if (!s->finish && clock_index) {
338 cycles = get_cpu_clock();
339 cycles -= s->clock_batch[clock_index];
340 pidx = plat_val_to_idx(cycles);
346 static int io_uring_map_buffers(struct submitter *s)
348 struct io_uring_map_buffers map = {
349 .fd = s->files[0].real_fd,
356 fprintf(stdout, "Mapping buffers may not work with multiple files\n");
358 return syscall(__NR_io_uring_register, s->ring_fd,
359 IORING_REGISTER_MAP_BUFFERS, &map, 1);
362 static int io_uring_register_buffers(struct submitter *s)
367 return syscall(__NR_io_uring_register, s->ring_fd,
368 IORING_REGISTER_BUFFERS, s->iovecs, roundup_pow2(depth));
371 static int io_uring_register_files(struct submitter *s)
378 s->fds = calloc(s->nr_files, sizeof(__s32));
379 for (i = 0; i < s->nr_files; i++) {
380 s->fds[i] = s->files[i].real_fd;
381 s->files[i].fixed_fd = i;
384 return syscall(__NR_io_uring_register, s->ring_fd,
385 IORING_REGISTER_FILES, s->fds, s->nr_files);
388 static int io_uring_setup(unsigned entries, struct io_uring_params *p)
391 * Clamp CQ ring size at our SQ ring size, we don't need more entries
394 p->flags |= IORING_SETUP_CQSIZE;
395 p->cq_entries = entries;
397 return syscall(__NR_io_uring_setup, entries, p);
400 static void io_uring_probe(int fd)
402 struct io_uring_probe *p;
405 p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
409 memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
410 ret = syscall(__NR_io_uring_register, fd, IORING_REGISTER_PROBE, p, 256);
414 if (IORING_OP_READ > p->ops_len)
417 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED))
423 static int io_uring_enter(struct submitter *s, unsigned int to_submit,
424 unsigned int min_complete, unsigned int flags)
427 flags |= IORING_ENTER_REGISTERED_RING;
428 #ifdef FIO_ARCH_HAS_SYSCALL
429 return __do_syscall6(__NR_io_uring_enter, s->enter_ring_fd, to_submit,
430 min_complete, flags, NULL, 0);
432 return syscall(__NR_io_uring_enter, s->enter_ring_fd, to_submit,
433 min_complete, flags, NULL, 0);
437 #ifndef CONFIG_HAVE_GETTID
438 static int gettid(void)
440 return syscall(__NR_gettid);
444 static unsigned file_depth(struct submitter *s)
446 return (depth + s->nr_files - 1) / s->nr_files;
449 static void init_io(struct submitter *s, unsigned index)
451 struct io_uring_sqe *sqe = &s->sqes[index];
452 unsigned long offset;
457 sqe->opcode = IORING_OP_NOP;
461 if (s->nr_files == 1) {
464 f = &s->files[s->cur_file];
465 if (f->pending_ios >= file_depth(s)) {
467 if (s->cur_file == s->nr_files)
469 f = &s->files[s->cur_file];
475 r = __rand64(&s->rand_state);
476 offset = (r % (f->max_blocks - 1)) * bs;
480 if (f->cur_off + bs > f->max_size)
484 if (register_files) {
485 sqe->flags = IOSQE_FIXED_FILE;
486 sqe->fd = f->fixed_fd;
489 sqe->fd = f->real_fd;
492 sqe->opcode = IORING_OP_READ_FIXED;
493 sqe->addr = (unsigned long) s->iovecs[index].iov_base;
495 sqe->buf_index = index;
496 } else if (!vectored) {
497 sqe->opcode = IORING_OP_READ;
498 sqe->addr = (unsigned long) s->iovecs[index].iov_base;
502 sqe->opcode = IORING_OP_READV;
503 sqe->addr = (unsigned long) &s->iovecs[index];
509 sqe->user_data = (unsigned long) f->fileno;
510 if (stats && stats_running)
511 sqe->user_data |= ((uint64_t)s->clock_index << 32);
514 static int prep_more_ios_uring(struct submitter *s, int max_ios)
516 struct io_sq_ring *ring = &s->sq_ring;
517 unsigned index, tail, next_tail, prepped = 0;
519 next_tail = tail = *ring->tail;
522 if (next_tail == atomic_load_acquire(ring->head))
525 index = tail & sq_ring_mask;
527 ring->array[index] = index;
530 } while (prepped < max_ios);
533 atomic_store_release(ring->tail, tail);
537 static int get_file_size(struct file *f)
541 if (fstat(f->real_fd, &st) < 0)
543 if (S_ISBLK(st.st_mode)) {
544 unsigned long long bytes;
546 if (ioctl(f->real_fd, BLKGETSIZE64, &bytes) != 0)
549 f->max_blocks = bytes / bs;
552 } else if (S_ISREG(st.st_mode)) {
553 f->max_blocks = st.st_size / bs;
554 f->max_size = st.st_size;
561 static int reap_events_uring(struct submitter *s)
563 struct io_cq_ring *ring = &s->cq_ring;
564 struct io_uring_cqe *cqe;
565 unsigned head, reaped = 0;
566 int last_idx = -1, stat_nr = 0;
573 if (head == atomic_load_acquire(ring->tail))
575 cqe = &ring->cqes[head & cq_ring_mask];
577 int fileno = cqe->user_data & 0xffffffff;
579 f = &s->files[fileno];
581 if (cqe->res != bs) {
582 printf("io: unexpected ret=%d\n", cqe->res);
583 if (polled && cqe->res == -EOPNOTSUPP)
584 printf("Your filesystem/driver/kernel doesn't support polled IO\n");
589 int clock_index = cqe->user_data >> 32;
591 if (last_idx != clock_index) {
592 if (last_idx != -1) {
593 add_stat(s, last_idx, stat_nr);
596 last_idx = clock_index;
605 add_stat(s, last_idx, stat_nr);
608 s->inflight -= reaped;
609 atomic_store_release(ring->head, head);
614 static int submitter_init(struct submitter *s)
619 printf("submitter=%d, tid=%d\n", s->index, s->tid);
621 __init_rand64(&s->rand_state, pthread_self());
622 srand48(pthread_self());
624 for (i = 0; i < MAX_FDS; i++)
625 s->files[i].fileno = i;
628 nr_batch = roundup_pow2(depth / batch_submit);
631 s->clock_batch = calloc(nr_batch, sizeof(unsigned long));
634 s->plat = calloc(PLAT_NR, sizeof(unsigned long));
636 s->clock_batch = NULL;
645 static int prep_more_ios_aio(struct submitter *s, int max_ios, struct iocb *iocbs)
654 while (index < max_ios) {
655 struct iocb *iocb = &iocbs[index];
657 if (s->nr_files == 1) {
660 f = &s->files[s->cur_file];
661 if (f->pending_ios >= file_depth(s)) {
663 if (s->cur_file == s->nr_files)
665 f = &s->files[s->cur_file];
671 offset = (r % (f->max_blocks - 1)) * bs;
672 io_prep_pread(iocb, f->real_fd, s->iovecs[index].iov_base,
673 s->iovecs[index].iov_len, offset);
676 if (stats && stats_running)
677 data |= (((uint64_t) s->clock_index) << 32);
678 iocb->data = (void *) (uintptr_t) data;
684 static int reap_events_aio(struct submitter *s, struct io_event *events, int evs)
686 int last_idx = -1, stat_nr = 0;
690 uint64_t data = (uintptr_t) events[reaped].data;
691 struct file *f = &s->files[data & 0xffffffff];
694 if (events[reaped].res != bs) {
695 printf("io: unexpected ret=%ld\n", events[reaped].res);
699 int clock_index = data >> 32;
701 if (last_idx != clock_index) {
702 if (last_idx != -1) {
703 add_stat(s, last_idx, stat_nr);
706 last_idx = clock_index;
715 add_stat(s, last_idx, stat_nr);
717 s->inflight -= reaped;
722 static void *submitter_aio_fn(void *data)
724 struct submitter *s = data;
726 struct iocb **iocbsptr;
728 struct io_event *events;
729 #ifdef ARCH_HAVE_CPU_CLOCK
730 int nr_batch = submitter_init(s);
735 iocbsptr = calloc(depth, sizeof(struct iocb *));
736 iocbs = calloc(depth, sizeof(struct iocb));
737 events = calloc(depth, sizeof(struct io_event));
739 for (i = 0; i < depth; i++)
740 iocbsptr[i] = &iocbs[i];
744 int to_wait, to_submit, to_prep;
746 if (!prepped && s->inflight < depth) {
747 to_prep = min(depth - s->inflight, batch_submit);
748 prepped = prep_more_ios_aio(s, to_prep, iocbs);
749 #ifdef ARCH_HAVE_CPU_CLOCK
750 if (prepped && stats) {
751 s->clock_batch[s->clock_index] = get_cpu_clock();
752 s->clock_index = (s->clock_index + 1) & (nr_batch - 1);
756 s->inflight += prepped;
759 if (to_submit && (s->inflight + to_submit <= depth))
762 to_wait = min(s->inflight + to_submit, batch_complete);
764 ret = io_submit(s->aio_ctx, to_submit, iocbsptr);
769 } else if (ret != to_submit) {
770 printf("submitted %d, wanted %d\n", ret, to_submit);
779 r = io_getevents(s->aio_ctx, to_wait, to_wait, events, NULL);
781 perror("io_getevents");
783 } else if (r != to_wait) {
784 printf("r=%d, wait=%d\n", r, to_wait);
787 r = reap_events_aio(s, events, r);
791 } while (!s->finish);
801 static void io_uring_unregister_ring(struct submitter *s)
803 struct io_uring_rsrc_update up = {
804 .offset = s->enter_ring_fd,
807 syscall(__NR_io_uring_register, s->ring_fd, IORING_UNREGISTER_RING_FDS,
811 static int io_uring_register_ring(struct submitter *s)
813 struct io_uring_rsrc_update up = {
819 ret = syscall(__NR_io_uring_register, s->ring_fd,
820 IORING_REGISTER_RING_FDS, &up, 1);
822 s->enter_ring_fd = up.offset;
829 static void *submitter_uring_fn(void *data)
831 struct submitter *s = data;
832 struct io_sq_ring *ring = &s->sq_ring;
834 #ifdef ARCH_HAVE_CPU_CLOCK
835 int nr_batch = submitter_init(s);
841 io_uring_register_ring(s);
845 int to_wait, to_submit, this_reap, to_prep;
846 unsigned ring_flags = 0;
848 if (!prepped && s->inflight < depth) {
849 to_prep = min(depth - s->inflight, batch_submit);
850 prepped = prep_more_ios_uring(s, to_prep);
851 #ifdef ARCH_HAVE_CPU_CLOCK
852 if (prepped && stats) {
853 s->clock_batch[s->clock_index] = get_cpu_clock();
854 s->clock_index = (s->clock_index + 1) & (nr_batch - 1);
858 s->inflight += prepped;
862 if (to_submit && (s->inflight + to_submit <= depth))
865 to_wait = min(s->inflight + to_submit, batch_complete);
868 * Only need to call io_uring_enter if we're not using SQ thread
869 * poll, or if IORING_SQ_NEED_WAKEUP is set.
872 ring_flags = atomic_load_acquire(ring->flags);
873 if (!sq_thread_poll || ring_flags & IORING_SQ_NEED_WAKEUP) {
877 flags = IORING_ENTER_GETEVENTS;
878 if (ring_flags & IORING_SQ_NEED_WAKEUP)
879 flags |= IORING_ENTER_SQ_WAKEUP;
880 ret = io_uring_enter(s, to_submit, to_wait, flags);
883 /* for SQPOLL, we submitted it all effectively */
888 * For non SQ thread poll, we already got the events we needed
889 * through the io_uring_enter() above. For SQ thread poll, we
890 * need to loop here until we find enough events.
896 r = reap_events_uring(s);
902 } while (sq_thread_poll && this_reap < to_wait);
903 s->reaps += this_reap;
911 } else if (ret < to_submit) {
912 int diff = to_submit - ret;
921 } else if (ret < 0) {
922 if (errno == EAGAIN) {
930 printf("io_submit: %s\n", strerror(errno));
933 } while (!s->finish);
936 io_uring_unregister_ring(s);
942 #ifdef CONFIG_PWRITEV2
943 static void *submitter_sync_fn(void *data)
945 struct submitter *s = data;
955 if (s->nr_files == 1) {
958 f = &s->files[s->cur_file];
959 if (f->pending_ios >= file_depth(s)) {
961 if (s->cur_file == s->nr_files)
963 f = &s->files[s->cur_file];
969 r = __rand64(&s->rand_state);
970 offset = (r % (f->max_blocks - 1)) * bs;
974 if (f->cur_off + bs > f->max_size)
978 #ifdef ARCH_HAVE_CPU_CLOCK
980 s->clock_batch[s->clock_index] = get_cpu_clock();
987 ret = preadv2(f->real_fd, &s->iovecs[0], 1, offset, RWF_HIPRI);
989 ret = preadv2(f->real_fd, &s->iovecs[0], 1, offset, 0);
994 } else if (ret != bs) {
1002 add_stat(s, s->clock_index, 1);
1003 } while (!s->finish);
1009 static void *submitter_sync_fn(void *data)
1016 static struct submitter *get_submitter(int offset)
1022 ret += offset * (sizeof(*submitter) + depth * sizeof(struct iovec));
1026 static void do_finish(const char *reason)
1029 printf("Exiting on %s\n", reason);
1030 for (j = 0; j < nthreads; j++) {
1031 struct submitter *s = get_submitter(j);
1034 if (max_iops > 100000)
1035 printf("Maximum IOPS=%luK\n", max_iops / 1000);
1037 printf("Maximum IOPS=%lu\n", max_iops);
1041 static void sig_int(int sig)
1043 do_finish("signal");
1046 static void arm_sig_int(void)
1048 struct sigaction act;
1050 memset(&act, 0, sizeof(act));
1051 act.sa_handler = sig_int;
1052 act.sa_flags = SA_RESTART;
1053 sigaction(SIGINT, &act, NULL);
1055 /* Windows uses SIGBREAK as a quit signal from other applications */
1057 sigaction(SIGBREAK, &act, NULL);
1061 static int setup_aio(struct submitter *s)
1063 #ifdef CONFIG_LIBAIO
1065 fprintf(stderr, "aio does not support polled IO\n");
1068 if (sq_thread_poll) {
1069 fprintf(stderr, "aio does not support SQPOLL IO\n");
1073 fprintf(stderr, "aio does not support polled IO\n");
1076 if (fixedbufs || register_files) {
1077 fprintf(stderr, "aio does not support registered files or buffers\n");
1078 fixedbufs = register_files = 0;
1081 return io_queue_init(roundup_pow2(depth), &s->aio_ctx);
1083 fprintf(stderr, "Legacy AIO not available on this system/build\n");
1089 static int setup_ring(struct submitter *s)
1091 struct io_sq_ring *sring = &s->sq_ring;
1092 struct io_cq_ring *cring = &s->cq_ring;
1093 struct io_uring_params p;
1097 memset(&p, 0, sizeof(p));
1099 if (polled && !do_nop)
1100 p.flags |= IORING_SETUP_IOPOLL;
1101 if (sq_thread_poll) {
1102 p.flags |= IORING_SETUP_SQPOLL;
1103 if (sq_thread_cpu != -1) {
1104 p.flags |= IORING_SETUP_SQ_AFF;
1105 p.sq_thread_cpu = sq_thread_cpu;
1109 fd = io_uring_setup(depth, &p);
1111 perror("io_uring_setup");
1114 s->ring_fd = s->enter_ring_fd = fd;
1121 rlim.rlim_cur = RLIM_INFINITY;
1122 rlim.rlim_max = RLIM_INFINITY;
1123 /* ignore potential error, not needed on newer kernels */
1124 setrlimit(RLIMIT_MEMLOCK, &rlim);
1126 ret = io_uring_register_buffers(s);
1128 perror("io_uring_register_buffers");
1133 ret = io_uring_map_buffers(s);
1135 perror("io_uring_map_buffers");
1141 if (register_files) {
1142 ret = io_uring_register_files(s);
1144 perror("io_uring_register_files");
1149 ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(__u32),
1150 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
1151 IORING_OFF_SQ_RING);
1152 sring->head = ptr + p.sq_off.head;
1153 sring->tail = ptr + p.sq_off.tail;
1154 sring->ring_mask = ptr + p.sq_off.ring_mask;
1155 sring->ring_entries = ptr + p.sq_off.ring_entries;
1156 sring->flags = ptr + p.sq_off.flags;
1157 sring->array = ptr + p.sq_off.array;
1158 sq_ring_mask = *sring->ring_mask;
1160 s->sqes = mmap(0, p.sq_entries * sizeof(struct io_uring_sqe),
1161 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
1164 ptr = mmap(0, p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe),
1165 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
1166 IORING_OFF_CQ_RING);
1167 cring->head = ptr + p.cq_off.head;
1168 cring->tail = ptr + p.cq_off.tail;
1169 cring->ring_mask = ptr + p.cq_off.ring_mask;
1170 cring->ring_entries = ptr + p.cq_off.ring_entries;
1171 cring->cqes = ptr + p.cq_off.cqes;
1172 cq_ring_mask = *cring->ring_mask;
1176 static void file_depths(char *buf)
1184 for (j = 0; j < nthreads; j++) {
1185 struct submitter *s = get_submitter(j);
1187 for (i = 0; i < s->nr_files; i++) {
1188 struct file *f = &s->files[i];
1191 p += sprintf(p, " %d", f->pending_ios);
1193 p += sprintf(p, "%d", f->pending_ios);
1199 static void usage(char *argv, int status)
1201 char runtime_str[16];
1202 snprintf(runtime_str, sizeof(runtime_str), "%d", runtime);
1203 printf("%s [options] -- [filenames]\n"
1204 " -d <int> : IO Depth, default %d\n"
1205 " -s <int> : Batch submit, default %d\n"
1206 " -c <int> : Batch complete, default %d\n"
1207 " -b <int> : Block size, default %d\n"
1208 " -p <bool> : Polled IO, default %d\n"
1209 " -B <bool> : Fixed buffers, default %d\n"
1210 " -D <bool> : DMA map fixed buffers, default %d\n"
1211 " -F <bool> : Register files, default %d\n"
1212 " -n <int> : Number of threads, default %d\n"
1213 " -O <bool> : Use O_DIRECT, default %d\n"
1214 " -N <bool> : Perform just no-op requests, default %d\n"
1215 " -t <bool> : Track IO latencies, default %d\n"
1216 " -T <int> : TSC rate in HZ\n"
1217 " -r <int> : Runtime in seconds, default %s\n"
1218 " -R <bool> : Use random IO, default %d\n"
1219 " -a <bool> : Use legacy aio, default %d\n"
1220 " -S <bool> : Use sync IO (preadv2), default %d\n"
1221 " -X <bool> : Use registered ring %d\n",
1222 argv, DEPTH, BATCH_SUBMIT, BATCH_COMPLETE, BS, polled,
1223 fixedbufs, dma_map, register_files, nthreads, !buffered, do_nop,
1224 stats, runtime == 0 ? "unlimited" : runtime_str, random_io, aio,
1225 use_sync, register_ring);
1229 static void read_tsc_rate(void)
1237 fd = open(TSC_RATE_FILE, O_RDONLY);
1241 ret = read(fd, buffer, sizeof(buffer));
1247 tsc_rate = strtoul(buffer, NULL, 10);
1248 printf("Using TSC rate %luHz\n", tsc_rate);
1252 static void write_tsc_rate(void)
1258 if (!stat(TSC_RATE_FILE, &sb))
1261 fd = open(TSC_RATE_FILE, O_WRONLY | O_CREAT, 0644);
1265 memset(buffer, 0, sizeof(buffer));
1266 sprintf(buffer, "%lu", tsc_rate);
1267 ret = write(fd, buffer, strlen(buffer));
1273 int main(int argc, char *argv[])
1275 struct submitter *s;
1276 unsigned long done, calls, reap;
1277 int err, i, j, flags, fd, opt, threads_per_f, threads_rem = 0, nfiles;
1283 if (!do_nop && argc < 2)
1286 while ((opt = getopt(argc, argv, "d:s:c:b:p:B:F:n:N:O:t:T:a:r:D:R:X:S:h?")) != -1) {
1289 aio = !!atoi(optarg);
1292 depth = atoi(optarg);
1295 batch_submit = atoi(optarg);
1300 batch_complete = atoi(optarg);
1301 if (!batch_complete)
1308 polled = !!atoi(optarg);
1311 fixedbufs = !!atoi(optarg);
1314 register_files = !!atoi(optarg);
1317 nthreads = atoi(optarg);
1319 printf("Threads must be non-zero\n");
1324 do_nop = !!atoi(optarg);
1327 buffered = !atoi(optarg);
1330 #ifndef ARCH_HAVE_CPU_CLOCK
1331 fprintf(stderr, "Stats not supported on this CPU\n");
1334 stats = !!atoi(optarg);
1337 #ifndef ARCH_HAVE_CPU_CLOCK
1338 fprintf(stderr, "Stats not supported on this CPU\n");
1341 tsc_rate = strtoul(optarg, NULL, 10);
1345 runtime = atoi(optarg);
1348 dma_map = !!atoi(optarg);
1351 random_io = !!atoi(optarg);
1354 register_ring = !!atoi(optarg);
1357 #ifdef CONFIG_PWRITEV2
1358 use_sync = !!atoi(optarg);
1360 fprintf(stderr, "preadv2 not supported\n");
1375 if (batch_complete > depth)
1376 batch_complete = depth;
1377 if (batch_submit > depth)
1378 batch_submit = depth;
1379 if (!fixedbufs && dma_map)
1382 submitter = calloc(nthreads, sizeof(*submitter) +
1383 roundup_pow2(depth) * sizeof(struct iovec));
1384 for (j = 0; j < nthreads; j++) {
1385 s = get_submitter(j);
1387 s->done = s->calls = s->reaps = 0;
1390 flags = O_RDONLY | O_NOATIME;
1399 printf("No files specified\n");
1402 threads_per_f = nthreads / nfiles;
1403 /* make sure each thread gets assigned files */
1404 if (threads_per_f == 0) {
1407 threads_rem = nthreads - threads_per_f * nfiles;
1410 while (!do_nop && i < argc) {
1413 memset(&f, 0, sizeof(f));
1415 fd = open(argv[i], flags);
1421 if (get_file_size(&f)) {
1422 printf("failed getting size of device/file\n");
1425 if (f.max_blocks <= 1) {
1426 printf("Zero file/device size?\n");
1431 limit = threads_per_f;
1432 limit += threads_rem > 0 ? 1 : 0;
1433 for (k = 0; k < limit; k++) {
1434 s = get_submitter((j + k) % nthreads);
1436 if (s->nr_files == MAX_FDS) {
1437 printf("Max number of files (%d) reached\n", MAX_FDS);
1441 memcpy(&s->files[s->nr_files], &f, sizeof(f));
1443 printf("Added file %s (submitter %d)\n", argv[i], s->index);
1453 page_size = sysconf(_SC_PAGESIZE);
1457 for (j = 0; j < nthreads; j++) {
1458 s = get_submitter(j);
1459 for (i = 0; i < roundup_pow2(depth); i++) {
1462 if (posix_memalign(&buf, page_size, bs)) {
1463 printf("failed alloc\n");
1466 s->iovecs[i].iov_base = buf;
1467 s->iovecs[i].iov_len = bs;
1471 for (j = 0; j < nthreads; j++) {
1472 s = get_submitter(j);
1477 err = setup_ring(s);
1481 printf("ring setup failed: %s, %d\n", strerror(errno), err);
1485 s = get_submitter(0);
1486 printf("polled=%d, fixedbufs=%d/%d, register_files=%d, buffered=%d, QD=%d\n", polled, fixedbufs, dma_map, register_files, buffered, depth);
1488 printf("Engine=preadv2\n");
1490 printf("Engine=io_uring, sq_ring=%d, cq_ring=%d\n", *s->sq_ring.ring_entries, *s->cq_ring.ring_entries);
1492 printf("Engine=aio\n");
1494 for (j = 0; j < nthreads; j++) {
1495 s = get_submitter(j);
1497 pthread_create(&s->thread, NULL, submitter_sync_fn, s);
1499 pthread_create(&s->thread, NULL, submitter_uring_fn, s);
1500 #ifdef CONFIG_LIBAIO
1502 pthread_create(&s->thread, NULL, submitter_aio_fn, s);
1506 fdepths = malloc(8 * s->nr_files * nthreads);
1507 reap = calls = done = 0;
1509 unsigned long this_done = 0;
1510 unsigned long this_reap = 0;
1511 unsigned long this_call = 0;
1512 unsigned long rpc = 0, ipc = 0;
1513 unsigned long iops, bw;
1516 if (runtime && !--runtime)
1517 do_finish("timeout");
1519 /* don't print partial run, if interrupted by signal */
1523 /* one second in to the run, enable stats */
1527 for (j = 0; j < nthreads; j++) {
1528 s = get_submitter(j);
1529 this_done += s->done;
1530 this_call += s->calls;
1531 this_reap += s->reaps;
1533 if (this_call - calls) {
1534 rpc = (this_done - done) / (this_call - calls);
1535 ipc = (this_reap - reap) / (this_call - calls);
1538 file_depths(fdepths);
1539 iops = this_done - done;
1541 bw = iops * (bs / 1048576);
1543 bw = iops / (1048576 / bs);
1545 printf("IOPS=%luK, ", iops / 1000);
1547 printf("IOPS=%lu, ", iops);
1548 max_iops = max(max_iops, iops);
1550 printf("BW=%luMiB/s, ", bw);
1551 printf("IOS/call=%ld/%ld, inflight=(%s)\n", rpc, ipc, fdepths);
1557 for (j = 0; j < nthreads; j++) {
1558 s = get_submitter(j);
1559 pthread_join(s->thread, &ret);
1565 printf("%d: Latency percentiles:\n", s->tid);
1566 for (i = 0, nr = 0; i < PLAT_NR; i++)
1568 show_clat_percentiles(s->plat, nr, 4);
1569 free(s->clock_batch);