18 #include <sys/types.h>
20 #include <sys/ioctl.h>
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
32 #include "../arch/arch.h"
34 #include "../lib/types.h"
35 #include "../lib/roundup.h"
36 #include "../lib/rand.h"
37 #include "../minmax.h"
38 #include "../os/linux/io_uring.h"
39 #include "../engines/nvme.h"
45 unsigned *ring_entries;
54 unsigned *ring_entries;
55 struct io_uring_cqe *cqes;
59 #define BATCH_SUBMIT 32
60 #define BATCH_COMPLETE 32
65 static unsigned sq_ring_mask, cq_ring_mask;
68 unsigned long max_blocks;
69 unsigned long max_size;
70 unsigned long cur_off;
72 unsigned int nsid; /* nsid field required for nvme-passthrough */
73 unsigned int lba_shift; /* lba_shift field required for nvme-passthrough */
80 #define PLAT_VAL (1 << PLAT_BITS)
81 #define PLAT_GROUP_NR 29
82 #define PLAT_NR (PLAT_GROUP_NR * PLAT_VAL)
89 struct io_sq_ring sq_ring;
90 struct io_uring_sqe *sqes;
91 struct io_cq_ring cq_ring;
101 struct taus258_state rand_state;
103 unsigned long *clock_batch;
108 io_context_t aio_ctx;
112 const char *filename;
114 struct file files[MAX_FDS];
117 struct iovec iovecs[];
120 static struct submitter *submitter;
121 static volatile int finish;
122 static int stats_running;
123 static unsigned long max_iops;
124 static long t_io_uring_page_size;
126 static int depth = DEPTH;
127 static int batch_submit = BATCH_SUBMIT;
128 static int batch_complete = BATCH_COMPLETE;
130 static int polled = 1; /* use IO polling */
131 static int fixedbufs = 1; /* use fixed user buffers */
132 static int dma_map; /* pre-map DMA buffers */
133 static int register_files = 1; /* use fixed files */
134 static int buffered = 0; /* use buffered IO, not O_DIRECT */
135 static int sq_thread_poll = 0; /* use kernel submission/poller thread */
136 static int sq_thread_cpu = -1; /* pin above thread to this CPU */
137 static int do_nop = 0; /* no-op SQ ring commands */
138 static int nthreads = 1;
139 static int stats = 0; /* generate IO stats */
140 static int aio = 0; /* use libaio */
141 static int runtime = 0; /* runtime */
142 static int random_io = 1; /* random or sequential IO */
143 static int register_ring = 1; /* register ring */
144 static int use_sync = 0; /* use preadv2 */
145 static int numa_placement = 0; /* set to node of device */
146 static int pt = 0; /* passthrough I/O or not */
148 static unsigned long tsc_rate;
150 #define TSC_RATE_FILE "tsc-rate"
152 static int vectored = 1;
154 static float plist[] = { 1.0, 5.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0,
155 80.0, 90.0, 95.0, 99.0, 99.5, 99.9, 99.95, 99.99 };
156 static int plist_len = 17;
158 #ifndef IORING_REGISTER_MAP_BUFFERS
159 #define IORING_REGISTER_MAP_BUFFERS 22
160 struct io_uring_map_buffers {
169 static int nvme_identify(int fd, __u32 nsid, enum nvme_identify_cns cns,
170 enum nvme_csi csi, void *data)
172 struct nvme_passthru_cmd cmd = {
173 .opcode = nvme_admin_identify,
175 .addr = (__u64)(uintptr_t)data,
176 .data_len = NVME_IDENTIFY_DATA_SIZE,
178 .cdw11 = csi << NVME_IDENTIFY_CSI_SHIFT,
179 .timeout_ms = NVME_DEFAULT_IOCTL_TIMEOUT,
182 return ioctl(fd, NVME_IOCTL_ADMIN_CMD, &cmd);
185 static int nvme_get_info(int fd, __u32 *nsid, __u32 *lba_sz, __u64 *nlba)
187 struct nvme_id_ns ns;
191 namespace_id = ioctl(fd, NVME_IOCTL_ID);
192 if (namespace_id < 0) {
193 fprintf(stderr, "error failed to fetch namespace-id\n");
199 * Identify namespace to get namespace-id, namespace size in LBA's
202 err = nvme_identify(fd, namespace_id, NVME_IDENTIFY_CNS_NS,
205 fprintf(stderr, "error failed to fetch identify namespace\n");
210 *nsid = namespace_id;
211 *lba_sz = 1 << ns.lbaf[(ns.flbas & 0x0f)].ds;
217 static unsigned long cycles_to_nsec(unsigned long cycles)
224 val = cycles * 1000000000ULL;
225 return val / tsc_rate;
228 static unsigned long plat_idx_to_val(unsigned int idx)
230 unsigned int error_bits;
231 unsigned long k, base;
233 assert(idx < PLAT_NR);
235 /* MSB <= (PLAT_BITS-1), cannot be rounded off. Use
236 * all bits of the sample as index */
237 if (idx < (PLAT_VAL << 1))
238 return cycles_to_nsec(idx);
240 /* Find the group and compute the minimum value of that group */
241 error_bits = (idx >> PLAT_BITS) - 1;
242 base = ((unsigned long) 1) << (error_bits + PLAT_BITS);
244 /* Find its bucket number of the group */
247 /* Return the mean of the range of the bucket */
248 return cycles_to_nsec(base + ((k + 0.5) * (1 << error_bits)));
251 unsigned int calculate_clat_percentiles(unsigned long *io_u_plat,
252 unsigned long nr, unsigned long **output,
253 unsigned long *maxv, unsigned long *minv)
255 unsigned long sum = 0;
256 unsigned int len = plist_len, i, j = 0;
257 unsigned long *ovals = NULL;
263 ovals = malloc(len * sizeof(*ovals));
268 * Calculate bucket values, note down max and min values
271 for (i = 0; i < PLAT_NR && !is_last; i++) {
273 while (sum >= ((long double) plist[j] / 100.0 * nr)) {
274 assert(plist[j] <= 100.0);
276 ovals[j] = plat_idx_to_val(i);
277 if (ovals[j] < *minv)
279 if (ovals[j] > *maxv)
282 is_last = (j == len - 1) != 0;
291 fprintf(stderr, "error calculating latency percentiles\n");
297 static void show_clat_percentiles(unsigned long *io_u_plat, unsigned long nr,
298 unsigned int precision)
300 unsigned int divisor, len, i, j = 0;
301 unsigned long minv, maxv;
302 unsigned long *ovals;
303 int per_line, scale_down, time_width;
307 len = calculate_clat_percentiles(io_u_plat, nr, &ovals, &maxv, &minv);
314 printf(" percentiles (tsc ticks):\n |");
315 } else if (minv > 2000 && maxv > 99999) {
318 printf(" percentiles (usec):\n |");
322 printf(" percentiles (nsec):\n |");
325 time_width = max(5, (int) (log10(maxv / divisor) + 1));
326 snprintf(fmt, sizeof(fmt), " %%%u.%ufth=[%%%dllu]%%c", precision + 3,
327 precision, time_width);
328 /* fmt will be something like " %5.2fth=[%4llu]%c" */
329 per_line = (80 - 7) / (precision + 10 + time_width);
331 for (j = 0; j < len; j++) {
333 if (j != 0 && (j % per_line) == 0)
336 /* end of the list */
337 is_last = (j == len - 1) != 0;
339 for (i = 0; i < scale_down; i++)
340 ovals[j] = (ovals[j] + 999) / 1000;
342 printf(fmt, plist[j], ovals[j], is_last ? '\n' : ',');
347 if ((j % per_line) == per_line - 1) /* for formatting */
355 #ifdef ARCH_HAVE_CPU_CLOCK
356 static unsigned int plat_val_to_idx(unsigned long val)
358 unsigned int msb, error_bits, base, offset, idx;
360 /* Find MSB starting from bit 0 */
364 msb = (sizeof(val)*8) - __builtin_clzll(val) - 1;
367 * MSB <= (PLAT_BITS-1), cannot be rounded off. Use
368 * all bits of the sample as index
370 if (msb <= PLAT_BITS)
373 /* Compute the number of error bits to discard*/
374 error_bits = msb - PLAT_BITS;
376 /* Compute the number of buckets before the group */
377 base = (error_bits + 1) << PLAT_BITS;
380 * Discard the error bits and apply the mask to find the
381 * index for the buckets in the group
383 offset = (PLAT_VAL - 1) & (val >> error_bits);
385 /* Make sure the index does not exceed (array size - 1) */
386 idx = (base + offset) < (PLAT_NR - 1) ?
387 (base + offset) : (PLAT_NR - 1);
393 static void add_stat(struct submitter *s, int clock_index, int nr)
395 #ifdef ARCH_HAVE_CPU_CLOCK
396 unsigned long cycles;
399 if (!s->finish && clock_index) {
400 cycles = get_cpu_clock();
401 cycles -= s->clock_batch[clock_index];
402 pidx = plat_val_to_idx(cycles);
408 static int io_uring_map_buffers(struct submitter *s)
410 struct io_uring_map_buffers map = {
411 .fd = s->files[0].real_fd,
418 fprintf(stdout, "Mapping buffers may not work with multiple files\n");
420 return syscall(__NR_io_uring_register, s->ring_fd,
421 IORING_REGISTER_MAP_BUFFERS, &map, 1);
424 static int io_uring_register_buffers(struct submitter *s)
429 return syscall(__NR_io_uring_register, s->ring_fd,
430 IORING_REGISTER_BUFFERS, s->iovecs, roundup_pow2(depth));
433 static int io_uring_register_files(struct submitter *s)
440 s->fds = calloc(s->nr_files, sizeof(__s32));
441 for (i = 0; i < s->nr_files; i++) {
442 s->fds[i] = s->files[i].real_fd;
443 s->files[i].fixed_fd = i;
446 return syscall(__NR_io_uring_register, s->ring_fd,
447 IORING_REGISTER_FILES, s->fds, s->nr_files);
450 static int io_uring_setup(unsigned entries, struct io_uring_params *p)
455 * Clamp CQ ring size at our SQ ring size, we don't need more entries
458 p->flags |= IORING_SETUP_CQSIZE;
459 p->cq_entries = entries;
461 p->flags |= IORING_SETUP_COOP_TASKRUN;
462 p->flags |= IORING_SETUP_SINGLE_ISSUER;
463 p->flags |= IORING_SETUP_DEFER_TASKRUN;
465 ret = syscall(__NR_io_uring_setup, entries, p);
469 if (errno == EINVAL && p->flags & IORING_SETUP_COOP_TASKRUN) {
470 p->flags &= ~IORING_SETUP_COOP_TASKRUN;
473 if (errno == EINVAL && p->flags & IORING_SETUP_SINGLE_ISSUER) {
474 p->flags &= ~IORING_SETUP_SINGLE_ISSUER;
477 if (errno == EINVAL && p->flags & IORING_SETUP_DEFER_TASKRUN) {
478 p->flags &= ~IORING_SETUP_DEFER_TASKRUN;
485 static void io_uring_probe(int fd)
487 struct io_uring_probe *p;
490 p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
494 memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
495 ret = syscall(__NR_io_uring_register, fd, IORING_REGISTER_PROBE, p, 256);
499 if (IORING_OP_READ > p->ops_len)
502 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED))
508 static int io_uring_enter(struct submitter *s, unsigned int to_submit,
509 unsigned int min_complete, unsigned int flags)
512 flags |= IORING_ENTER_REGISTERED_RING;
513 #ifdef FIO_ARCH_HAS_SYSCALL
514 return __do_syscall6(__NR_io_uring_enter, s->enter_ring_fd, to_submit,
515 min_complete, flags, NULL, 0);
517 return syscall(__NR_io_uring_enter, s->enter_ring_fd, to_submit,
518 min_complete, flags, NULL, 0);
522 static unsigned file_depth(struct submitter *s)
524 return (depth + s->nr_files - 1) / s->nr_files;
527 static unsigned long long get_offset(struct submitter *s, struct file *f)
529 unsigned long long offset;
533 r = __rand64(&s->rand_state);
534 offset = (r % (f->max_blocks - 1)) * bs;
538 if (f->cur_off + bs > f->max_size)
545 static void init_io(struct submitter *s, unsigned index)
547 struct io_uring_sqe *sqe = &s->sqes[index];
551 sqe->opcode = IORING_OP_NOP;
555 if (s->nr_files == 1) {
558 f = &s->files[s->cur_file];
559 if (f->pending_ios >= file_depth(s)) {
561 if (s->cur_file == s->nr_files)
563 f = &s->files[s->cur_file];
568 if (register_files) {
569 sqe->flags = IOSQE_FIXED_FILE;
570 sqe->fd = f->fixed_fd;
573 sqe->fd = f->real_fd;
576 sqe->opcode = IORING_OP_READ_FIXED;
577 sqe->addr = (unsigned long) s->iovecs[index].iov_base;
579 sqe->buf_index = index;
580 } else if (!vectored) {
581 sqe->opcode = IORING_OP_READ;
582 sqe->addr = (unsigned long) s->iovecs[index].iov_base;
586 sqe->opcode = IORING_OP_READV;
587 sqe->addr = (unsigned long) &s->iovecs[index];
592 sqe->off = get_offset(s, f);
593 sqe->user_data = (unsigned long) f->fileno;
594 if (stats && stats_running)
595 sqe->user_data |= ((uint64_t)s->clock_index << 32);
598 static void init_io_pt(struct submitter *s, unsigned index)
600 struct io_uring_sqe *sqe = &s->sqes[index << 1];
601 unsigned long offset;
603 struct nvme_uring_cmd *cmd;
604 unsigned long long slba;
605 unsigned long long nlb;
608 if (s->nr_files == 1) {
611 f = &s->files[s->cur_file];
612 if (f->pending_ios >= file_depth(s)) {
614 if (s->cur_file == s->nr_files)
616 f = &s->files[s->cur_file];
622 r = __rand64(&s->rand_state);
623 offset = (r % (f->max_blocks - 1)) * bs;
627 if (f->cur_off + bs > f->max_size)
631 if (register_files) {
632 sqe->fd = f->fixed_fd;
633 sqe->flags = IOSQE_FIXED_FILE;
635 sqe->fd = f->real_fd;
638 sqe->opcode = IORING_OP_URING_CMD;
639 sqe->user_data = (unsigned long) f->fileno;
641 sqe->user_data |= ((__u64) s->clock_index << 32ULL);
642 sqe->cmd_op = NVME_URING_CMD_IO;
643 slba = offset >> f->lba_shift;
644 nlb = (bs >> f->lba_shift) - 1;
645 cmd = (struct nvme_uring_cmd *)&sqe->cmd;
646 /* cdw10 and cdw11 represent starting slba*/
647 cmd->cdw10 = slba & 0xffffffff;
648 cmd->cdw11 = slba >> 32;
649 /* cdw12 represent number of lba to be read*/
651 cmd->addr = (unsigned long) s->iovecs[index].iov_base;
654 sqe->uring_cmd_flags = IORING_URING_CMD_FIXED;
655 sqe->buf_index = index;
661 static int prep_more_ios_uring(struct submitter *s, int max_ios)
663 struct io_sq_ring *ring = &s->sq_ring;
664 unsigned head, index, tail, next_tail, prepped = 0;
667 head = atomic_load_acquire(ring->head);
671 next_tail = tail = *ring->tail;
674 if (next_tail == head)
677 index = tail & sq_ring_mask;
679 init_io_pt(s, index);
684 } while (prepped < max_ios);
687 atomic_store_release(ring->tail, tail);
691 static int get_file_size(struct file *f)
695 if (fstat(f->real_fd, &st) < 0)
702 if (!S_ISCHR(st.st_mode)) {
703 fprintf(stderr, "passthrough works with only nvme-ns "
704 "generic devices (/dev/ngXnY)\n");
707 ret = nvme_get_info(f->real_fd, &f->nsid, &lbs, &nlba);
710 if ((bs % lbs) != 0) {
711 printf("error: bs:%d should be a multiple logical_block_size:%d\n",
715 f->max_blocks = nlba / bs;
717 f->lba_shift = ilog2(lbs);
719 } else if (S_ISBLK(st.st_mode)) {
720 unsigned long long bytes;
722 if (ioctl(f->real_fd, BLKGETSIZE64, &bytes) != 0)
725 f->max_blocks = bytes / bs;
728 } else if (S_ISREG(st.st_mode)) {
729 f->max_blocks = st.st_size / bs;
730 f->max_size = st.st_size;
737 static int reap_events_uring(struct submitter *s)
739 struct io_cq_ring *ring = &s->cq_ring;
740 struct io_uring_cqe *cqe;
741 unsigned head, reaped = 0;
742 int last_idx = -1, stat_nr = 0;
748 if (head == atomic_load_acquire(ring->tail))
750 cqe = &ring->cqes[head & cq_ring_mask];
752 int fileno = cqe->user_data & 0xffffffff;
754 f = &s->files[fileno];
756 if (cqe->res != bs) {
757 printf("io: unexpected ret=%d\n", cqe->res);
758 if (polled && cqe->res == -EOPNOTSUPP)
759 printf("Your filesystem/driver/kernel doesn't support polled IO\n");
764 int clock_index = cqe->user_data >> 32;
766 if (last_idx != clock_index) {
767 if (last_idx != -1) {
768 add_stat(s, last_idx, stat_nr);
771 last_idx = clock_index;
780 add_stat(s, last_idx, stat_nr);
783 s->inflight -= reaped;
784 atomic_store_release(ring->head, head);
789 static int reap_events_uring_pt(struct submitter *s)
791 struct io_cq_ring *ring = &s->cq_ring;
792 struct io_uring_cqe *cqe;
793 unsigned head, reaped = 0;
794 int last_idx = -1, stat_nr = 0;
802 if (head == atomic_load_acquire(ring->tail))
804 index = head & cq_ring_mask;
805 cqe = &ring->cqes[index << 1];
806 fileno = cqe->user_data & 0xffffffff;
807 f = &s->files[fileno];
811 printf("io: unexpected ret=%d\n", cqe->res);
812 if (polled && cqe->res == -EINVAL)
813 printf("passthrough doesn't support polled IO\n");
817 int clock_index = cqe->user_data >> 32;
819 if (last_idx != clock_index) {
820 if (last_idx != -1) {
821 add_stat(s, last_idx, stat_nr);
824 last_idx = clock_index;
833 add_stat(s, last_idx, stat_nr);
836 s->inflight -= reaped;
837 atomic_store_release(ring->head, head);
842 static void set_affinity(struct submitter *s)
844 #ifdef CONFIG_LIBNUMA
845 struct bitmask *mask;
847 if (s->numa_node == -1)
850 numa_set_preferred(s->numa_node);
852 mask = numa_allocate_cpumask();
853 numa_node_to_cpus(s->numa_node, mask);
854 numa_sched_setaffinity(s->tid, mask);
858 static int detect_node(struct submitter *s, const char *name)
860 #ifdef CONFIG_LIBNUMA
861 const char *base = basename(name);
866 sprintf(str, "/sys/class/nvme-generic/%s/device/numa_node", base);
868 sprintf(str, "/sys/block/%s/device/numa_node", base);
869 fd = open(str, O_RDONLY);
873 ret = read(fd, str, sizeof(str));
887 static int setup_aio(struct submitter *s)
891 fprintf(stderr, "aio does not support polled IO\n");
894 if (sq_thread_poll) {
895 fprintf(stderr, "aio does not support SQPOLL IO\n");
899 fprintf(stderr, "aio does not support polled IO\n");
902 if (fixedbufs || register_files) {
903 fprintf(stderr, "aio does not support registered files or buffers\n");
904 fixedbufs = register_files = 0;
907 return io_queue_init(roundup_pow2(depth), &s->aio_ctx);
909 fprintf(stderr, "Legacy AIO not available on this system/build\n");
915 static int setup_ring(struct submitter *s)
917 struct io_sq_ring *sring = &s->sq_ring;
918 struct io_cq_ring *cring = &s->cq_ring;
919 struct io_uring_params p;
924 memset(&p, 0, sizeof(p));
926 if (polled && !do_nop)
927 p.flags |= IORING_SETUP_IOPOLL;
928 if (sq_thread_poll) {
929 p.flags |= IORING_SETUP_SQPOLL;
930 if (sq_thread_cpu != -1) {
931 p.flags |= IORING_SETUP_SQ_AFF;
932 p.sq_thread_cpu = sq_thread_cpu;
936 p.flags |= IORING_SETUP_SQE128;
937 p.flags |= IORING_SETUP_CQE32;
940 fd = io_uring_setup(depth, &p);
942 perror("io_uring_setup");
945 s->ring_fd = s->enter_ring_fd = fd;
952 rlim.rlim_cur = RLIM_INFINITY;
953 rlim.rlim_max = RLIM_INFINITY;
954 /* ignore potential error, not needed on newer kernels */
955 setrlimit(RLIMIT_MEMLOCK, &rlim);
957 ret = io_uring_register_buffers(s);
959 perror("io_uring_register_buffers");
964 ret = io_uring_map_buffers(s);
966 perror("io_uring_map_buffers");
972 if (register_files) {
973 ret = io_uring_register_files(s);
975 perror("io_uring_register_files");
980 ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(__u32),
981 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
983 sring->head = ptr + p.sq_off.head;
984 sring->tail = ptr + p.sq_off.tail;
985 sring->ring_mask = ptr + p.sq_off.ring_mask;
986 sring->ring_entries = ptr + p.sq_off.ring_entries;
987 sring->flags = ptr + p.sq_off.flags;
988 sring->array = ptr + p.sq_off.array;
989 sq_ring_mask = *sring->ring_mask;
991 if (p.flags & IORING_SETUP_SQE128)
992 len = 2 * p.sq_entries * sizeof(struct io_uring_sqe);
994 len = p.sq_entries * sizeof(struct io_uring_sqe);
995 s->sqes = mmap(0, len,
996 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
999 if (p.flags & IORING_SETUP_CQE32) {
1000 len = p.cq_off.cqes +
1001 2 * p.cq_entries * sizeof(struct io_uring_cqe);
1003 len = p.cq_off.cqes +
1004 p.cq_entries * sizeof(struct io_uring_cqe);
1007 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
1008 IORING_OFF_CQ_RING);
1009 cring->head = ptr + p.cq_off.head;
1010 cring->tail = ptr + p.cq_off.tail;
1011 cring->ring_mask = ptr + p.cq_off.ring_mask;
1012 cring->ring_entries = ptr + p.cq_off.ring_entries;
1013 cring->cqes = ptr + p.cq_off.cqes;
1014 cq_ring_mask = *cring->ring_mask;
1016 for (i = 0; i < p.sq_entries; i++)
1017 sring->array[i] = i;
1022 static void *allocate_mem(struct submitter *s, int size)
1026 #ifdef CONFIG_LIBNUMA
1027 if (s->numa_node != -1)
1028 return numa_alloc_onnode(size, s->numa_node);
1031 if (posix_memalign(&buf, t_io_uring_page_size, bs)) {
1032 printf("failed alloc\n");
1039 static int submitter_init(struct submitter *s)
1041 int i, nr_batch, err;
1042 static int init_printed;
1045 printf("submitter=%d, tid=%d, file=%s, node=%d\n", s->index, s->tid,
1046 s->filename, s->numa_node);
1050 __init_rand64(&s->rand_state, s->tid);
1053 for (i = 0; i < MAX_FDS; i++)
1054 s->files[i].fileno = i;
1056 for (i = 0; i < roundup_pow2(depth); i++) {
1059 buf = allocate_mem(s, bs);
1062 s->iovecs[i].iov_base = buf;
1063 s->iovecs[i].iov_len = bs;
1067 sprintf(buf, "Engine=preadv2\n");
1070 err = setup_ring(s);
1071 sprintf(buf, "Engine=io_uring, sq_ring=%d, cq_ring=%d\n", *s->sq_ring.ring_entries, *s->cq_ring.ring_entries);
1073 sprintf(buf, "Engine=aio\n");
1077 printf("queue setup failed: %s, %d\n", strerror(errno), err);
1081 if (!init_printed) {
1082 printf("polled=%d, fixedbufs=%d/%d, register_files=%d, buffered=%d, QD=%d\n", polled, fixedbufs, dma_map, register_files, buffered, depth);
1088 nr_batch = roundup_pow2(depth / batch_submit);
1091 s->clock_batch = calloc(nr_batch, sizeof(unsigned long));
1094 s->plat = calloc(PLAT_NR, sizeof(unsigned long));
1096 s->clock_batch = NULL;
1100 /* perform the expensive command initialization part for passthrough here
1101 * rather than in the fast path
1104 for (i = 0; i < roundup_pow2(depth); i++) {
1105 struct io_uring_sqe *sqe = &s->sqes[i << 1];
1107 memset(&sqe->cmd, 0, sizeof(struct nvme_uring_cmd));
1113 #ifdef CONFIG_LIBAIO
1114 static int prep_more_ios_aio(struct submitter *s, int max_ios, struct iocb *iocbs)
1121 while (index < max_ios) {
1122 struct iocb *iocb = &iocbs[index];
1124 if (s->nr_files == 1) {
1127 f = &s->files[s->cur_file];
1128 if (f->pending_ios >= file_depth(s)) {
1130 if (s->cur_file == s->nr_files)
1132 f = &s->files[s->cur_file];
1137 io_prep_pread(iocb, f->real_fd, s->iovecs[index].iov_base,
1138 s->iovecs[index].iov_len, get_offset(s, f));
1141 if (stats && stats_running)
1142 data |= (((uint64_t) s->clock_index) << 32);
1143 iocb->data = (void *) (uintptr_t) data;
1149 static int reap_events_aio(struct submitter *s, struct io_event *events, int evs)
1151 int last_idx = -1, stat_nr = 0;
1155 uint64_t data = (uintptr_t) events[reaped].data;
1156 struct file *f = &s->files[data & 0xffffffff];
1159 if (events[reaped].res != bs) {
1160 printf("io: unexpected ret=%ld\n", events[reaped].res);
1164 int clock_index = data >> 32;
1166 if (last_idx != clock_index) {
1167 if (last_idx != -1) {
1168 add_stat(s, last_idx, stat_nr);
1171 last_idx = clock_index;
1180 add_stat(s, last_idx, stat_nr);
1182 s->inflight -= reaped;
1187 static void *submitter_aio_fn(void *data)
1189 struct submitter *s = data;
1190 int i, ret, prepped;
1191 struct iocb **iocbsptr;
1193 struct io_event *events;
1194 #ifdef ARCH_HAVE_CPU_CLOCK
1195 int nr_batch = submitter_init(s);
1200 iocbsptr = calloc(depth, sizeof(struct iocb *));
1201 iocbs = calloc(depth, sizeof(struct iocb));
1202 events = calloc(depth, sizeof(struct io_event));
1204 for (i = 0; i < depth; i++)
1205 iocbsptr[i] = &iocbs[i];
1209 int to_wait, to_submit, to_prep;
1211 if (!prepped && s->inflight < depth) {
1212 to_prep = min(depth - s->inflight, batch_submit);
1213 prepped = prep_more_ios_aio(s, to_prep, iocbs);
1214 #ifdef ARCH_HAVE_CPU_CLOCK
1215 if (prepped && stats) {
1216 s->clock_batch[s->clock_index] = get_cpu_clock();
1217 s->clock_index = (s->clock_index + 1) & (nr_batch - 1);
1221 s->inflight += prepped;
1222 to_submit = prepped;
1224 if (to_submit && (s->inflight + to_submit <= depth))
1227 to_wait = min(s->inflight + to_submit, batch_complete);
1229 ret = io_submit(s->aio_ctx, to_submit, iocbsptr);
1232 perror("io_submit");
1234 } else if (ret != to_submit) {
1235 printf("submitted %d, wanted %d\n", ret, to_submit);
1244 r = io_getevents(s->aio_ctx, to_wait, to_wait, events, NULL);
1246 perror("io_getevents");
1248 } else if (r != to_wait) {
1249 printf("r=%d, wait=%d\n", r, to_wait);
1252 r = reap_events_aio(s, events, r);
1256 } while (!s->finish);
1266 static void io_uring_unregister_ring(struct submitter *s)
1268 struct io_uring_rsrc_update up = {
1269 .offset = s->enter_ring_fd,
1272 syscall(__NR_io_uring_register, s->ring_fd, IORING_UNREGISTER_RING_FDS,
1276 static int io_uring_register_ring(struct submitter *s)
1278 struct io_uring_rsrc_update up = {
1284 ret = syscall(__NR_io_uring_register, s->ring_fd,
1285 IORING_REGISTER_RING_FDS, &up, 1);
1287 s->enter_ring_fd = up.offset;
1294 static void *submitter_uring_fn(void *data)
1296 struct submitter *s = data;
1297 struct io_sq_ring *ring = &s->sq_ring;
1299 #ifdef ARCH_HAVE_CPU_CLOCK
1300 int nr_batch = submitter_init(s);
1306 io_uring_register_ring(s);
1310 int to_wait, to_submit, this_reap, to_prep;
1311 unsigned ring_flags = 0;
1313 if (!prepped && s->inflight < depth) {
1314 to_prep = min(depth - s->inflight, batch_submit);
1315 prepped = prep_more_ios_uring(s, to_prep);
1316 #ifdef ARCH_HAVE_CPU_CLOCK
1317 if (prepped && stats) {
1318 s->clock_batch[s->clock_index] = get_cpu_clock();
1319 s->clock_index = (s->clock_index + 1) & (nr_batch - 1);
1323 s->inflight += prepped;
1325 to_submit = prepped;
1327 if (to_submit && (s->inflight + to_submit <= depth))
1330 to_wait = min(s->inflight + to_submit, batch_complete);
1333 * Only need to call io_uring_enter if we're not using SQ thread
1334 * poll, or if IORING_SQ_NEED_WAKEUP is set.
1337 ring_flags = atomic_load_acquire(ring->flags);
1338 if (!sq_thread_poll || ring_flags & IORING_SQ_NEED_WAKEUP) {
1342 flags = IORING_ENTER_GETEVENTS;
1343 if (ring_flags & IORING_SQ_NEED_WAKEUP)
1344 flags |= IORING_ENTER_SQ_WAKEUP;
1345 ret = io_uring_enter(s, to_submit, to_wait, flags);
1348 /* for SQPOLL, we submitted it all effectively */
1353 * For non SQ thread poll, we already got the events we needed
1354 * through the io_uring_enter() above. For SQ thread poll, we
1355 * need to loop here until we find enough events.
1362 r = reap_events_uring_pt(s);
1364 r = reap_events_uring(s);
1370 } while (sq_thread_poll && this_reap < to_wait);
1371 s->reaps += this_reap;
1379 } else if (ret < to_submit) {
1380 int diff = to_submit - ret;
1389 } else if (ret < 0) {
1390 if (errno == EAGAIN) {
1398 printf("io_submit: %s\n", strerror(errno));
1401 } while (!s->finish);
1404 io_uring_unregister_ring(s);
1410 #ifdef CONFIG_PWRITEV2
1411 static void *submitter_sync_fn(void *data)
1413 struct submitter *s = data;
1422 if (s->nr_files == 1) {
1425 f = &s->files[s->cur_file];
1426 if (f->pending_ios >= file_depth(s)) {
1428 if (s->cur_file == s->nr_files)
1430 f = &s->files[s->cur_file];
1435 #ifdef ARCH_HAVE_CPU_CLOCK
1437 s->clock_batch[s->clock_index] = get_cpu_clock();
1443 offset = get_offset(s, f);
1445 ret = preadv2(f->real_fd, &s->iovecs[0], 1, offset, RWF_HIPRI);
1447 ret = preadv2(f->real_fd, &s->iovecs[0], 1, offset, 0);
1452 } else if (ret != bs) {
1460 add_stat(s, s->clock_index, 1);
1461 } while (!s->finish);
1467 static void *submitter_sync_fn(void *data)
1474 static struct submitter *get_submitter(int offset)
1480 ret += offset * (sizeof(*submitter) + depth * sizeof(struct iovec));
1484 static void do_finish(const char *reason)
1488 printf("Exiting on %s\n", reason);
1489 for (j = 0; j < nthreads; j++) {
1490 struct submitter *s = get_submitter(j);
1493 if (max_iops > 1000000) {
1494 double miops = (double) max_iops / 1000000.0;
1495 printf("Maximum IOPS=%.2fM\n", miops);
1496 } else if (max_iops > 100000) {
1497 double kiops = (double) max_iops / 1000.0;
1498 printf("Maximum IOPS=%.2fK\n", kiops);
1500 printf("Maximum IOPS=%lu\n", max_iops);
1505 static void sig_int(int sig)
1507 do_finish("signal");
1510 static void arm_sig_int(void)
1512 struct sigaction act;
1514 memset(&act, 0, sizeof(act));
1515 act.sa_handler = sig_int;
1516 act.sa_flags = SA_RESTART;
1517 sigaction(SIGINT, &act, NULL);
1519 /* Windows uses SIGBREAK as a quit signal from other applications */
1521 sigaction(SIGBREAK, &act, NULL);
1525 static void usage(char *argv, int status)
1527 char runtime_str[16];
1528 snprintf(runtime_str, sizeof(runtime_str), "%d", runtime);
1529 printf("%s [options] -- [filenames]\n"
1530 " -d <int> : IO Depth, default %d\n"
1531 " -s <int> : Batch submit, default %d\n"
1532 " -c <int> : Batch complete, default %d\n"
1533 " -b <int> : Block size, default %d\n"
1534 " -p <bool> : Polled IO, default %d\n"
1535 " -B <bool> : Fixed buffers, default %d\n"
1536 " -D <bool> : DMA map fixed buffers, default %d\n"
1537 " -F <bool> : Register files, default %d\n"
1538 " -n <int> : Number of threads, default %d\n"
1539 " -O <bool> : Use O_DIRECT, default %d\n"
1540 " -N <bool> : Perform just no-op requests, default %d\n"
1541 " -t <bool> : Track IO latencies, default %d\n"
1542 " -T <int> : TSC rate in HZ\n"
1543 " -r <int> : Runtime in seconds, default %s\n"
1544 " -R <bool> : Use random IO, default %d\n"
1545 " -a <bool> : Use legacy aio, default %d\n"
1546 " -S <bool> : Use sync IO (preadv2), default %d\n"
1547 " -X <bool> : Use registered ring %d\n"
1548 " -P <bool> : Automatically place on device home node %d\n"
1549 " -u <bool> : Use nvme-passthrough I/O, default %d\n",
1550 argv, DEPTH, BATCH_SUBMIT, BATCH_COMPLETE, BS, polled,
1551 fixedbufs, dma_map, register_files, nthreads, !buffered, do_nop,
1552 stats, runtime == 0 ? "unlimited" : runtime_str, random_io, aio,
1553 use_sync, register_ring, numa_placement, pt);
1557 static void read_tsc_rate(void)
1565 fd = open(TSC_RATE_FILE, O_RDONLY);
1569 ret = read(fd, buffer, sizeof(buffer));
1575 tsc_rate = strtoul(buffer, NULL, 10);
1576 printf("Using TSC rate %luHz\n", tsc_rate);
1580 static void write_tsc_rate(void)
1586 if (!stat(TSC_RATE_FILE, &sb))
1589 fd = open(TSC_RATE_FILE, O_WRONLY | O_CREAT, 0644);
1593 memset(buffer, 0, sizeof(buffer));
1594 sprintf(buffer, "%lu", tsc_rate);
1595 ret = write(fd, buffer, strlen(buffer));
1601 int main(int argc, char *argv[])
1603 struct submitter *s;
1604 unsigned long done, calls, reap;
1605 int i, j, flags, fd, opt, threads_per_f, threads_rem = 0, nfiles;
1609 if (!do_nop && argc < 2)
1612 while ((opt = getopt(argc, argv, "d:s:c:b:p:B:F:n:N:O:t:T:a:r:D:R:X:S:P:u:h?")) != -1) {
1615 aio = !!atoi(optarg);
1618 depth = atoi(optarg);
1621 batch_submit = atoi(optarg);
1626 batch_complete = atoi(optarg);
1627 if (!batch_complete)
1634 polled = !!atoi(optarg);
1637 fixedbufs = !!atoi(optarg);
1640 register_files = !!atoi(optarg);
1643 nthreads = atoi(optarg);
1645 printf("Threads must be non-zero\n");
1650 do_nop = !!atoi(optarg);
1653 buffered = !atoi(optarg);
1656 #ifndef ARCH_HAVE_CPU_CLOCK
1657 fprintf(stderr, "Stats not supported on this CPU\n");
1660 stats = !!atoi(optarg);
1663 #ifndef ARCH_HAVE_CPU_CLOCK
1664 fprintf(stderr, "Stats not supported on this CPU\n");
1667 tsc_rate = strtoul(optarg, NULL, 10);
1671 runtime = atoi(optarg);
1674 dma_map = !!atoi(optarg);
1677 random_io = !!atoi(optarg);
1680 register_ring = !!atoi(optarg);
1683 #ifdef CONFIG_PWRITEV2
1684 use_sync = !!atoi(optarg);
1686 fprintf(stderr, "preadv2 not supported\n");
1691 numa_placement = !!atoi(optarg);
1694 pt = !!atoi(optarg);
1707 if (batch_complete > depth)
1708 batch_complete = depth;
1709 if (batch_submit > depth)
1710 batch_submit = depth;
1711 if (!fixedbufs && dma_map)
1714 submitter = calloc(nthreads, sizeof(*submitter) +
1715 roundup_pow2(depth) * sizeof(struct iovec));
1716 for (j = 0; j < nthreads; j++) {
1717 s = get_submitter(j);
1720 s->done = s->calls = s->reaps = 0;
1723 flags = O_RDONLY | O_NOATIME;
1732 printf("No files specified\n");
1735 threads_per_f = nthreads / nfiles;
1736 /* make sure each thread gets assigned files */
1737 if (threads_per_f == 0) {
1740 threads_rem = nthreads - threads_per_f * nfiles;
1743 while (!do_nop && i < argc) {
1746 memset(&f, 0, sizeof(f));
1748 fd = open(argv[i], flags);
1754 if (get_file_size(&f)) {
1755 printf("failed getting size of device/file\n");
1758 if (f.max_blocks <= 1) {
1759 printf("Zero file/device size?\n");
1764 limit = threads_per_f;
1765 limit += threads_rem > 0 ? 1 : 0;
1766 for (k = 0; k < limit; k++) {
1767 s = get_submitter((j + k) % nthreads);
1769 if (s->nr_files == MAX_FDS) {
1770 printf("Max number of files (%d) reached\n", MAX_FDS);
1774 memcpy(&s->files[s->nr_files], &f, sizeof(f));
1777 detect_node(s, argv[i]);
1779 s->filename = argv[i];
1789 t_io_uring_page_size = sysconf(_SC_PAGESIZE);
1790 if (t_io_uring_page_size < 0)
1791 t_io_uring_page_size = 4096;
1793 for (j = 0; j < nthreads; j++) {
1794 s = get_submitter(j);
1796 pthread_create(&s->thread, NULL, submitter_sync_fn, s);
1798 pthread_create(&s->thread, NULL, submitter_uring_fn, s);
1799 #ifdef CONFIG_LIBAIO
1801 pthread_create(&s->thread, NULL, submitter_aio_fn, s);
1805 reap = calls = done = 0;
1807 unsigned long this_done = 0;
1808 unsigned long this_reap = 0;
1809 unsigned long this_call = 0;
1810 unsigned long rpc = 0, ipc = 0;
1811 unsigned long iops, bw;
1814 if (runtime && !--runtime)
1815 do_finish("timeout");
1817 /* don't print partial run, if interrupted by signal */
1821 /* one second in to the run, enable stats */
1825 for (j = 0; j < nthreads; j++) {
1826 s = get_submitter(j);
1827 this_done += s->done;
1828 this_call += s->calls;
1829 this_reap += s->reaps;
1831 if (this_call - calls) {
1832 rpc = (this_done - done) / (this_call - calls);
1833 ipc = (this_reap - reap) / (this_call - calls);
1836 iops = this_done - done;
1838 bw = iops * (bs / 1048576);
1840 bw = iops / (1048576 / bs);
1841 if (iops > 1000000) {
1842 double miops = (double) iops / 1000000.0;
1843 printf("IOPS=%.2fM, ", miops);
1844 } else if (iops > 100000) {
1845 double kiops = (double) iops / 1000.0;
1846 printf("IOPS=%.2fK, ", kiops);
1848 printf("IOPS=%lu, ", iops);
1850 max_iops = max(max_iops, iops);
1853 double bw_g = (double) bw / 1000.0;
1855 printf("BW=%.2fGiB/s, ", bw_g);
1857 printf("BW=%luMiB/s, ", bw);
1860 printf("IOS/call=%ld/%ld\n", rpc, ipc);
1866 for (j = 0; j < nthreads; j++) {
1867 s = get_submitter(j);
1868 pthread_join(s->thread, &ret);
1874 printf("%d: Latency percentiles:\n", s->tid);
1875 for (i = 0, nr = 0; i < PLAT_NR; i++)
1877 show_clat_percentiles(s->plat, nr, 4);
1878 free(s->clock_batch);