14 #include <sys/types.h>
16 #include <sys/ioctl.h>
17 #include <sys/syscall.h>
18 #include <sys/resource.h>
28 #include "../arch/arch.h"
29 #include "../lib/types.h"
30 #include "../lib/roundup.h"
31 #include "../minmax.h"
32 #include "../os/linux/io_uring.h"
38 unsigned *ring_entries;
47 unsigned *ring_entries;
48 struct io_uring_cqe *cqes;
52 #define BATCH_SUBMIT 32
53 #define BATCH_COMPLETE 32
58 static unsigned sq_ring_mask, cq_ring_mask;
61 unsigned long max_blocks;
69 #define PLAT_VAL (1 << PLAT_BITS)
70 #define PLAT_GROUP_NR 29
71 #define PLAT_NR (PLAT_GROUP_NR * PLAT_VAL)
77 struct io_sq_ring sq_ring;
78 struct io_uring_sqe *sqes;
79 struct io_cq_ring cq_ring;
89 unsigned long *clock_batch;
97 struct file files[MAX_FDS];
100 struct iovec iovecs[];
103 static struct submitter *submitter;
104 static volatile int finish;
105 static int stats_running;
107 static int depth = DEPTH;
108 static int batch_submit = BATCH_SUBMIT;
109 static int batch_complete = BATCH_COMPLETE;
111 static int polled = 1; /* use IO polling */
112 static int fixedbufs = 1; /* use fixed user buffers */
113 static int register_files = 1; /* use fixed files */
114 static int buffered = 0; /* use buffered IO, not O_DIRECT */
115 static int sq_thread_poll = 0; /* use kernel submission/poller thread */
116 static int sq_thread_cpu = -1; /* pin above thread to this CPU */
117 static int do_nop = 0; /* no-op SQ ring commands */
118 static int nthreads = 1;
119 static int stats = 0; /* generate IO stats */
120 static int aio = 0; /* use libaio */
121 static int runtime = 0; /* runtime */
123 static unsigned long tsc_rate;
125 #define TSC_RATE_FILE "tsc-rate"
127 static int vectored = 1;
129 static float plist[] = { 1.0, 5.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0,
130 80.0, 90.0, 95.0, 99.0, 99.5, 99.9, 99.95, 99.99 };
131 static int plist_len = 17;
133 static unsigned long cycles_to_nsec(unsigned long cycles)
140 val = cycles * 1000000000ULL;
141 return val / tsc_rate;
144 static unsigned long plat_idx_to_val(unsigned int idx)
146 unsigned int error_bits;
147 unsigned long k, base;
149 assert(idx < PLAT_NR);
151 /* MSB <= (PLAT_BITS-1), cannot be rounded off. Use
152 * all bits of the sample as index */
153 if (idx < (PLAT_VAL << 1))
154 return cycles_to_nsec(idx);
156 /* Find the group and compute the minimum value of that group */
157 error_bits = (idx >> PLAT_BITS) - 1;
158 base = ((unsigned long) 1) << (error_bits + PLAT_BITS);
160 /* Find its bucket number of the group */
163 /* Return the mean of the range of the bucket */
164 return cycles_to_nsec(base + ((k + 0.5) * (1 << error_bits)));
167 unsigned int calc_clat_percentiles(unsigned long *io_u_plat, unsigned long nr,
168 unsigned long **output,
169 unsigned long *maxv, unsigned long *minv)
171 unsigned long sum = 0;
172 unsigned int len = plist_len, i, j = 0;
173 unsigned long *ovals = NULL;
179 ovals = malloc(len * sizeof(*ovals));
184 * Calculate bucket values, note down max and min values
187 for (i = 0; i < PLAT_NR && !is_last; i++) {
189 while (sum >= ((long double) plist[j] / 100.0 * nr)) {
190 assert(plist[j] <= 100.0);
192 ovals[j] = plat_idx_to_val(i);
193 if (ovals[j] < *minv)
195 if (ovals[j] > *maxv)
198 is_last = (j == len - 1) != 0;
207 fprintf(stderr, "error calculating latency percentiles\n");
213 static void show_clat_percentiles(unsigned long *io_u_plat, unsigned long nr,
214 unsigned int precision)
216 unsigned int divisor, len, i, j = 0;
217 unsigned long minv, maxv;
218 unsigned long *ovals;
219 int per_line, scale_down, time_width;
223 len = calc_clat_percentiles(io_u_plat, nr, &ovals, &maxv, &minv);
230 printf(" percentiles (tsc ticks):\n |");
231 } else if (minv > 2000 && maxv > 99999) {
234 printf(" percentiles (usec):\n |");
238 printf(" percentiles (nsec):\n |");
241 time_width = max(5, (int) (log10(maxv / divisor) + 1));
242 snprintf(fmt, sizeof(fmt), " %%%u.%ufth=[%%%dllu]%%c", precision + 3,
243 precision, time_width);
244 /* fmt will be something like " %5.2fth=[%4llu]%c" */
245 per_line = (80 - 7) / (precision + 10 + time_width);
247 for (j = 0; j < len; j++) {
249 if (j != 0 && (j % per_line) == 0)
252 /* end of the list */
253 is_last = (j == len - 1) != 0;
255 for (i = 0; i < scale_down; i++)
256 ovals[j] = (ovals[j] + 999) / 1000;
258 printf(fmt, plist[j], ovals[j], is_last ? '\n' : ',');
263 if ((j % per_line) == per_line - 1) /* for formatting */
271 static unsigned int plat_val_to_idx(unsigned long val)
273 unsigned int msb, error_bits, base, offset, idx;
275 /* Find MSB starting from bit 0 */
279 msb = (sizeof(val)*8) - __builtin_clzll(val) - 1;
282 * MSB <= (PLAT_BITS-1), cannot be rounded off. Use
283 * all bits of the sample as index
285 if (msb <= PLAT_BITS)
288 /* Compute the number of error bits to discard*/
289 error_bits = msb - PLAT_BITS;
291 /* Compute the number of buckets before the group */
292 base = (error_bits + 1) << PLAT_BITS;
295 * Discard the error bits and apply the mask to find the
296 * index for the buckets in the group
298 offset = (PLAT_VAL - 1) & (val >> error_bits);
300 /* Make sure the index does not exceed (array size - 1) */
301 idx = (base + offset) < (PLAT_NR - 1) ?
302 (base + offset) : (PLAT_NR - 1);
307 static void add_stat(struct submitter *s, int clock_index, int nr)
309 #ifdef ARCH_HAVE_CPU_CLOCK
310 unsigned long cycles;
313 if (!s->finish && clock_index) {
314 cycles = get_cpu_clock();
315 cycles -= s->clock_batch[clock_index];
316 pidx = plat_val_to_idx(cycles);
322 static int io_uring_register_buffers(struct submitter *s)
327 return syscall(__NR_io_uring_register, s->ring_fd,
328 IORING_REGISTER_BUFFERS, s->iovecs, depth);
331 static int io_uring_register_files(struct submitter *s)
338 s->fds = calloc(s->nr_files, sizeof(__s32));
339 for (i = 0; i < s->nr_files; i++) {
340 s->fds[i] = s->files[i].real_fd;
341 s->files[i].fixed_fd = i;
344 return syscall(__NR_io_uring_register, s->ring_fd,
345 IORING_REGISTER_FILES, s->fds, s->nr_files);
348 static int io_uring_setup(unsigned entries, struct io_uring_params *p)
350 return syscall(__NR_io_uring_setup, entries, p);
353 static void io_uring_probe(int fd)
355 struct io_uring_probe *p;
358 p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
362 memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
363 ret = syscall(__NR_io_uring_register, fd, IORING_REGISTER_PROBE, p, 256);
367 if (IORING_OP_READ > p->ops_len)
370 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED))
376 static int io_uring_enter(struct submitter *s, unsigned int to_submit,
377 unsigned int min_complete, unsigned int flags)
379 return syscall(__NR_io_uring_enter, s->ring_fd, to_submit, min_complete,
383 #ifndef CONFIG_HAVE_GETTID
384 static int gettid(void)
386 return syscall(__NR_gettid);
390 static unsigned file_depth(struct submitter *s)
392 return (depth + s->nr_files - 1) / s->nr_files;
395 static void init_io(struct submitter *s, unsigned index)
397 struct io_uring_sqe *sqe = &s->sqes[index];
398 unsigned long offset;
403 sqe->opcode = IORING_OP_NOP;
407 if (s->nr_files == 1) {
410 f = &s->files[s->cur_file];
411 if (f->pending_ios >= file_depth(s)) {
413 if (s->cur_file == s->nr_files)
415 f = &s->files[s->cur_file];
421 offset = (r % (f->max_blocks - 1)) * bs;
423 if (register_files) {
424 sqe->flags = IOSQE_FIXED_FILE;
425 sqe->fd = f->fixed_fd;
428 sqe->fd = f->real_fd;
431 sqe->opcode = IORING_OP_READ_FIXED;
432 sqe->addr = (unsigned long) s->iovecs[index].iov_base;
434 sqe->buf_index = index;
435 } else if (!vectored) {
436 sqe->opcode = IORING_OP_READ;
437 sqe->addr = (unsigned long) s->iovecs[index].iov_base;
441 sqe->opcode = IORING_OP_READV;
442 sqe->addr = (unsigned long) &s->iovecs[index];
448 sqe->user_data = (unsigned long) f->fileno;
449 if (stats && stats_running)
450 sqe->user_data |= ((unsigned long)s->clock_index << 32);
453 static int prep_more_ios_uring(struct submitter *s, int max_ios)
455 struct io_sq_ring *ring = &s->sq_ring;
456 unsigned index, tail, next_tail, prepped = 0;
458 next_tail = tail = *ring->tail;
461 if (next_tail == atomic_load_acquire(ring->head))
464 index = tail & sq_ring_mask;
466 ring->array[index] = index;
469 } while (prepped < max_ios);
472 atomic_store_release(ring->tail, tail);
476 static int get_file_size(struct file *f)
480 if (fstat(f->real_fd, &st) < 0)
482 if (S_ISBLK(st.st_mode)) {
483 unsigned long long bytes;
485 if (ioctl(f->real_fd, BLKGETSIZE64, &bytes) != 0)
488 f->max_blocks = bytes / bs;
490 } else if (S_ISREG(st.st_mode)) {
491 f->max_blocks = st.st_size / bs;
498 static int reap_events_uring(struct submitter *s)
500 struct io_cq_ring *ring = &s->cq_ring;
501 struct io_uring_cqe *cqe;
502 unsigned head, reaped = 0;
503 int last_idx = -1, stat_nr = 0;
510 if (head == atomic_load_acquire(ring->tail))
512 cqe = &ring->cqes[head & cq_ring_mask];
514 int fileno = cqe->user_data & 0xffffffff;
516 f = &s->files[fileno];
518 if (cqe->res != bs) {
519 printf("io: unexpected ret=%d\n", cqe->res);
520 if (polled && cqe->res == -EOPNOTSUPP)
521 printf("Your filesystem/driver/kernel doesn't support polled IO\n");
526 int clock_index = cqe->user_data >> 32;
528 if (last_idx != clock_index) {
529 if (last_idx != -1) {
530 add_stat(s, last_idx, stat_nr);
533 last_idx = clock_index;
542 add_stat(s, last_idx, stat_nr);
545 s->inflight -= reaped;
546 atomic_store_release(ring->head, head);
551 static int submitter_init(struct submitter *s)
556 printf("submitter=%d, tid=%d\n", s->index, s->tid);
558 srand48(pthread_self());
560 for (i = 0; i < MAX_FDS; i++)
561 s->files[i].fileno = i;
564 nr_batch = roundup_pow2(depth / batch_submit);
567 s->clock_batch = calloc(nr_batch, sizeof(unsigned long));
570 s->plat = calloc(PLAT_NR, sizeof(unsigned long));
572 s->clock_batch = NULL;
581 static int prep_more_ios_aio(struct submitter *s, int max_ios, struct iocb *iocbs)
583 unsigned long offset, data;
589 while (index < max_ios) {
590 struct iocb *iocb = &iocbs[index];
592 if (s->nr_files == 1) {
595 f = &s->files[s->cur_file];
596 if (f->pending_ios >= file_depth(s)) {
598 if (s->cur_file == s->nr_files)
600 f = &s->files[s->cur_file];
606 offset = (r % (f->max_blocks - 1)) * bs;
607 io_prep_pread(iocb, f->real_fd, s->iovecs[index].iov_base,
608 s->iovecs[index].iov_len, offset);
611 if (stats && stats_running)
612 data |= ((unsigned long) s->clock_index << 32);
613 iocb->data = (void *) (uintptr_t) data;
619 static int reap_events_aio(struct submitter *s, struct io_event *events, int evs)
621 int last_idx = -1, stat_nr = 0;
625 unsigned long data = (uintptr_t) events[reaped].data;
626 struct file *f = &s->files[data & 0xffffffff];
629 if (events[reaped].res != bs) {
630 printf("io: unexpected ret=%ld\n", events[reaped].res);
634 int clock_index = data >> 32;
636 if (last_idx != clock_index) {
637 if (last_idx != -1) {
638 add_stat(s, last_idx, stat_nr);
641 last_idx = clock_index;
650 add_stat(s, last_idx, stat_nr);
652 s->inflight -= reaped;
657 static void *submitter_aio_fn(void *data)
659 struct submitter *s = data;
660 int i, ret, prepped, nr_batch;
661 struct iocb **iocbsptr;
663 struct io_event *events;
665 nr_batch = submitter_init(s);
667 iocbsptr = calloc(depth, sizeof(struct iocb *));
668 iocbs = calloc(depth, sizeof(struct iocb));
669 events = calloc(depth, sizeof(struct io_event));
671 for (i = 0; i < depth; i++)
672 iocbsptr[i] = &iocbs[i];
676 int to_wait, to_submit, to_prep;
678 if (!prepped && s->inflight < depth) {
679 to_prep = min(depth - s->inflight, batch_submit);
680 prepped = prep_more_ios_aio(s, to_prep, iocbs);
681 #ifdef ARCH_HAVE_CPU_CLOCK
682 if (prepped && stats) {
683 s->clock_batch[s->clock_index] = get_cpu_clock();
684 s->clock_index = (s->clock_index + 1) & (nr_batch - 1);
688 s->inflight += prepped;
691 if (to_submit && (s->inflight + to_submit <= depth))
694 to_wait = min(s->inflight + to_submit, batch_complete);
696 ret = io_submit(s->aio_ctx, to_submit, iocbsptr);
701 } else if (ret != to_submit) {
702 printf("submitted %d, wanted %d\n", ret, to_submit);
711 r = io_getevents(s->aio_ctx, to_wait, to_wait, events, NULL);
713 perror("io_getevents");
715 } else if (r != to_wait) {
716 printf("r=%d, wait=%d\n", r, to_wait);
719 r = reap_events_aio(s, events, r);
723 } while (!s->finish);
733 static void *submitter_uring_fn(void *data)
735 struct submitter *s = data;
736 struct io_sq_ring *ring = &s->sq_ring;
737 int ret, prepped, nr_batch;
739 nr_batch = submitter_init(s);
743 int to_wait, to_submit, this_reap, to_prep;
744 unsigned ring_flags = 0;
746 if (!prepped && s->inflight < depth) {
747 to_prep = min(depth - s->inflight, batch_submit);
748 prepped = prep_more_ios_uring(s, to_prep);
749 #ifdef ARCH_HAVE_CPU_CLOCK
750 if (prepped && stats) {
751 s->clock_batch[s->clock_index] = get_cpu_clock();
752 s->clock_index = (s->clock_index + 1) & (nr_batch - 1);
756 s->inflight += prepped;
760 if (to_submit && (s->inflight + to_submit <= depth))
763 to_wait = min(s->inflight + to_submit, batch_complete);
766 * Only need to call io_uring_enter if we're not using SQ thread
767 * poll, or if IORING_SQ_NEED_WAKEUP is set.
770 ring_flags = atomic_load_acquire(ring->flags);
771 if (!sq_thread_poll || ring_flags & IORING_SQ_NEED_WAKEUP) {
775 flags = IORING_ENTER_GETEVENTS;
776 if (ring_flags & IORING_SQ_NEED_WAKEUP)
777 flags |= IORING_ENTER_SQ_WAKEUP;
778 ret = io_uring_enter(s, to_submit, to_wait, flags);
781 /* for SQPOLL, we submitted it all effectively */
786 * For non SQ thread poll, we already got the events we needed
787 * through the io_uring_enter() above. For SQ thread poll, we
788 * need to loop here until we find enough events.
794 r = reap_events_uring(s);
800 } while (sq_thread_poll && this_reap < to_wait);
801 s->reaps += this_reap;
809 } else if (ret < to_submit) {
810 int diff = to_submit - ret;
819 } else if (ret < 0) {
820 if (errno == EAGAIN) {
828 printf("io_submit: %s\n", strerror(errno));
831 } while (!s->finish);
837 static struct submitter *get_submitter(int offset)
843 ret += offset * (sizeof(*submitter) + depth * sizeof(struct iovec));
847 static void do_finish(const char *reason)
850 printf("Exiting on %s\n", reason);
851 for (j = 0; j < nthreads; j++) {
852 struct submitter *s = get_submitter(j);
858 static void sig_int(int sig)
863 static void arm_sig_int(void)
865 struct sigaction act;
867 memset(&act, 0, sizeof(act));
868 act.sa_handler = sig_int;
869 act.sa_flags = SA_RESTART;
870 sigaction(SIGINT, &act, NULL);
872 /* Windows uses SIGBREAK as a quit signal from other applications */
874 sigaction(SIGBREAK, &act, NULL);
878 static int setup_aio(struct submitter *s)
882 fprintf(stderr, "aio does not support polled IO\n");
885 if (sq_thread_poll) {
886 fprintf(stderr, "aio does not support SQPOLL IO\n");
890 fprintf(stderr, "aio does not support polled IO\n");
893 if (fixedbufs || register_files) {
894 fprintf(stderr, "aio does not support registered files or buffers\n");
895 fixedbufs = register_files = 0;
898 return io_queue_init(depth, &s->aio_ctx);
900 fprintf(stderr, "Legacy AIO not available on this system/build\n");
906 static int setup_ring(struct submitter *s)
908 struct io_sq_ring *sring = &s->sq_ring;
909 struct io_cq_ring *cring = &s->cq_ring;
910 struct io_uring_params p;
914 memset(&p, 0, sizeof(p));
916 if (polled && !do_nop)
917 p.flags |= IORING_SETUP_IOPOLL;
918 if (sq_thread_poll) {
919 p.flags |= IORING_SETUP_SQPOLL;
920 if (sq_thread_cpu != -1) {
921 p.flags |= IORING_SETUP_SQ_AFF;
922 p.sq_thread_cpu = sq_thread_cpu;
926 fd = io_uring_setup(depth, &p);
928 perror("io_uring_setup");
938 rlim.rlim_cur = RLIM_INFINITY;
939 rlim.rlim_max = RLIM_INFINITY;
940 /* ignore potential error, not needed on newer kernels */
941 setrlimit(RLIMIT_MEMLOCK, &rlim);
943 ret = io_uring_register_buffers(s);
945 perror("io_uring_register_buffers");
950 if (register_files) {
951 ret = io_uring_register_files(s);
953 perror("io_uring_register_files");
958 ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(__u32),
959 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
961 sring->head = ptr + p.sq_off.head;
962 sring->tail = ptr + p.sq_off.tail;
963 sring->ring_mask = ptr + p.sq_off.ring_mask;
964 sring->ring_entries = ptr + p.sq_off.ring_entries;
965 sring->flags = ptr + p.sq_off.flags;
966 sring->array = ptr + p.sq_off.array;
967 sq_ring_mask = *sring->ring_mask;
969 s->sqes = mmap(0, p.sq_entries * sizeof(struct io_uring_sqe),
970 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
973 ptr = mmap(0, p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe),
974 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
976 cring->head = ptr + p.cq_off.head;
977 cring->tail = ptr + p.cq_off.tail;
978 cring->ring_mask = ptr + p.cq_off.ring_mask;
979 cring->ring_entries = ptr + p.cq_off.ring_entries;
980 cring->cqes = ptr + p.cq_off.cqes;
981 cq_ring_mask = *cring->ring_mask;
985 static void file_depths(char *buf)
993 for (j = 0; j < nthreads; j++) {
994 struct submitter *s = get_submitter(j);
996 for (i = 0; i < s->nr_files; i++) {
997 struct file *f = &s->files[i];
1000 p += sprintf(p, " %d", f->pending_ios);
1002 p += sprintf(p, "%d", f->pending_ios);
1008 static void usage(char *argv, int status)
1010 char runtime_str[16];
1011 snprintf(runtime_str, sizeof(runtime_str), "%d", runtime);
1012 printf("%s [options] -- [filenames]\n"
1013 " -d <int> : IO Depth, default %d\n"
1014 " -s <int> : Batch submit, default %d\n"
1015 " -c <int> : Batch complete, default %d\n"
1016 " -b <int> : Block size, default %d\n"
1017 " -p <bool> : Polled IO, default %d\n"
1018 " -B <bool> : Fixed buffers, default %d\n"
1019 " -F <bool> : Register files, default %d\n"
1020 " -n <int> : Number of threads, default %d\n"
1021 " -O <bool> : Use O_DIRECT, default %d\n"
1022 " -N <bool> : Perform just no-op requests, default %d\n"
1023 " -t <bool> : Track IO latencies, default %d\n"
1024 " -T <int> : TSC rate in HZ\n"
1025 " -a <bool> : Use legacy aio, default %d\n"
1026 " -r <int> : Runtime in seconds, default %s\n",
1027 argv, DEPTH, BATCH_SUBMIT, BATCH_COMPLETE, BS, polled,
1028 fixedbufs, register_files, nthreads, !buffered, do_nop, stats, aio,
1029 runtime == 0 ? "unlimited" : runtime_str);
1033 static void read_tsc_rate(void)
1041 fd = open(TSC_RATE_FILE, O_RDONLY);
1045 ret = read(fd, buffer, sizeof(buffer));
1051 tsc_rate = strtoul(buffer, NULL, 10);
1052 printf("Using TSC rate %luHz\n", tsc_rate);
1056 static void write_tsc_rate(void)
1062 if (!stat(TSC_RATE_FILE, &sb))
1065 fd = open(TSC_RATE_FILE, O_WRONLY | O_CREAT, 0644);
1069 memset(buffer, 0, sizeof(buffer));
1070 sprintf(buffer, "%lu", tsc_rate);
1071 ret = write(fd, buffer, strlen(buffer));
1077 int main(int argc, char *argv[])
1079 struct submitter *s;
1080 unsigned long done, calls, reap;
1081 int err, i, j, flags, fd, opt, threads_per_f, threads_rem = 0, nfiles;
1086 if (!do_nop && argc < 2)
1089 while ((opt = getopt(argc, argv, "d:s:c:b:p:B:F:n:N:O:t:T:a:r:h?")) != -1) {
1092 aio = !!atoi(optarg);
1095 depth = atoi(optarg);
1098 batch_submit = atoi(optarg);
1103 batch_complete = atoi(optarg);
1104 if (!batch_complete)
1111 polled = !!atoi(optarg);
1114 fixedbufs = !!atoi(optarg);
1117 register_files = !!atoi(optarg);
1120 nthreads = atoi(optarg);
1122 printf("Threads must be non-zero\n");
1127 do_nop = !!atoi(optarg);
1130 buffered = !atoi(optarg);
1133 #ifndef ARCH_HAVE_CPU_CLOCK
1134 fprintf(stderr, "Stats not supported on this CPU\n");
1137 stats = !!atoi(optarg);
1140 #ifndef ARCH_HAVE_CPU_CLOCK
1141 fprintf(stderr, "Stats not supported on this CPU\n");
1144 tsc_rate = strtoul(optarg, NULL, 10);
1148 runtime = atoi(optarg);
1161 if (batch_complete > depth)
1162 batch_complete = depth;
1163 if (batch_submit > depth)
1164 batch_submit = depth;
1166 submitter = calloc(nthreads, sizeof(*submitter) +
1167 depth * sizeof(struct iovec));
1168 for (j = 0; j < nthreads; j++) {
1169 s = get_submitter(j);
1171 s->done = s->calls = s->reaps = 0;
1174 flags = O_RDONLY | O_NOATIME;
1183 printf("No files specified\n");
1186 threads_per_f = nthreads / nfiles;
1187 /* make sure each thread gets assigned files */
1188 if (threads_per_f == 0) {
1191 threads_rem = nthreads - threads_per_f * nfiles;
1194 while (!do_nop && i < argc) {
1197 memset(&f, 0, sizeof(f));
1199 fd = open(argv[i], flags);
1205 if (get_file_size(&f)) {
1206 printf("failed getting size of device/file\n");
1209 if (f.max_blocks <= 1) {
1210 printf("Zero file/device size?\n");
1215 limit = threads_per_f;
1216 limit += threads_rem > 0 ? 1 : 0;
1217 for (k = 0; k < limit; k++) {
1218 s = get_submitter((j + k) % nthreads);
1220 if (s->nr_files == MAX_FDS) {
1221 printf("Max number of files (%d) reached\n", MAX_FDS);
1225 memcpy(&s->files[s->nr_files], &f, sizeof(f));
1227 printf("Added file %s (submitter %d)\n", argv[i], s->index);
1237 for (j = 0; j < nthreads; j++) {
1238 s = get_submitter(j);
1239 for (i = 0; i < depth; i++) {
1242 if (posix_memalign(&buf, bs, bs)) {
1243 printf("failed alloc\n");
1246 s->iovecs[i].iov_base = buf;
1247 s->iovecs[i].iov_len = bs;
1251 for (j = 0; j < nthreads; j++) {
1252 s = get_submitter(j);
1255 err = setup_ring(s);
1259 printf("ring setup failed: %s, %d\n", strerror(errno), err);
1263 s = get_submitter(0);
1264 printf("polled=%d, fixedbufs=%d, register_files=%d, buffered=%d, QD=%d\n", polled, fixedbufs, register_files, buffered, depth);
1266 printf("Engine=io_uring, sq_ring=%d, cq_ring=%d\n", *s->sq_ring.ring_entries, *s->cq_ring.ring_entries);
1268 printf("Engine=aio\n");
1270 for (j = 0; j < nthreads; j++) {
1271 s = get_submitter(j);
1273 pthread_create(&s->thread, NULL, submitter_uring_fn, s);
1274 #ifdef CONFIG_LIBAIO
1276 pthread_create(&s->thread, NULL, submitter_aio_fn, s);
1280 fdepths = malloc(8 * s->nr_files * nthreads);
1281 reap = calls = done = 0;
1283 unsigned long this_done = 0;
1284 unsigned long this_reap = 0;
1285 unsigned long this_call = 0;
1286 unsigned long rpc = 0, ipc = 0;
1287 unsigned long iops, bw;
1290 if (runtime && !--runtime)
1291 do_finish("timeout");
1293 /* don't print partial run, if interrupted by signal */
1297 /* one second in to the run, enable stats */
1301 for (j = 0; j < nthreads; j++) {
1302 this_done += s->done;
1303 this_call += s->calls;
1304 this_reap += s->reaps;
1306 if (this_call - calls) {
1307 rpc = (this_done - done) / (this_call - calls);
1308 ipc = (this_reap - reap) / (this_call - calls);
1311 file_depths(fdepths);
1312 iops = this_done - done;
1314 bw = iops * (bs / 1048576);
1316 bw = iops / (1048576 / bs);
1318 printf("IOPS=%luK, ", iops / 1000);
1320 printf("IOPS=%luK, ", iops);
1322 printf("BW=%luMiB/s, ", bw);
1323 printf("IOS/call=%ld/%ld, inflight=(%s)\n", rpc, ipc, fdepths);
1329 for (j = 0; j < nthreads; j++) {
1330 s = get_submitter(j);
1331 pthread_join(s->thread, &ret);
1337 printf("%d: Latency percentiles:\n", s->tid);
1338 for (i = 0, nr = 0; i < PLAT_NR; i++)
1340 show_clat_percentiles(s->plat, nr, 4);
1341 free(s->clock_batch);