t/io_uring: minor optimizations to IO init fast path
[fio.git] / t / io_uring.c
1 #include <stdio.h>
2 #include <errno.h>
3 #include <assert.h>
4 #include <stdlib.h>
5 #include <stddef.h>
6 #include <signal.h>
7 #include <inttypes.h>
8 #include <math.h>
9
10 #ifdef CONFIG_LIBAIO
11 #include <libaio.h>
12 #endif
13
14 #ifdef CONFIG_LIBNUMA
15 #include <numa.h>
16 #endif
17
18 #include <sys/types.h>
19 #include <sys/stat.h>
20 #include <sys/ioctl.h>
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
23 #include <sys/mman.h>
24 #include <sys/uio.h>
25 #include <linux/fs.h>
26 #include <fcntl.h>
27 #include <unistd.h>
28 #include <string.h>
29 #include <pthread.h>
30 #include <sched.h>
31
32 #include "../arch/arch.h"
33 #include "../os/os.h"
34 #include "../lib/types.h"
35 #include "../lib/roundup.h"
36 #include "../lib/rand.h"
37 #include "../minmax.h"
38 #include "../os/linux/io_uring.h"
39 #include "../engines/nvme.h"
40
41 struct io_sq_ring {
42         unsigned *head;
43         unsigned *tail;
44         unsigned *ring_mask;
45         unsigned *ring_entries;
46         unsigned *flags;
47         unsigned *array;
48 };
49
50 struct io_cq_ring {
51         unsigned *head;
52         unsigned *tail;
53         unsigned *ring_mask;
54         unsigned *ring_entries;
55         struct io_uring_cqe *cqes;
56 };
57
58 #define DEPTH                   128
59 #define BATCH_SUBMIT            32
60 #define BATCH_COMPLETE          32
61 #define BS                      4096
62
63 #define MAX_FDS                 16
64
65 static unsigned sq_ring_mask, cq_ring_mask;
66
67 struct file {
68         unsigned long max_blocks;
69         unsigned long max_size;
70         unsigned long cur_off;
71         unsigned pending_ios;
72         unsigned int nsid;      /* nsid field required for nvme-passthrough */
73         unsigned int lba_shift; /* lba_shift field required for nvme-passthrough */
74         int real_fd;
75         int fixed_fd;
76         int fileno;
77 };
78
79 #define PLAT_BITS               6
80 #define PLAT_VAL                (1 << PLAT_BITS)
81 #define PLAT_GROUP_NR           29
82 #define PLAT_NR                 (PLAT_GROUP_NR * PLAT_VAL)
83
84 struct submitter {
85         pthread_t thread;
86         int ring_fd;
87         int enter_ring_fd;
88         int index;
89         struct io_sq_ring sq_ring;
90         struct io_uring_sqe *sqes;
91         struct io_cq_ring cq_ring;
92         int inflight;
93         int tid;
94         unsigned long reaps;
95         unsigned long done;
96         unsigned long calls;
97         volatile int finish;
98
99         __s32 *fds;
100
101         struct taus258_state rand_state;
102
103         unsigned long *clock_batch;
104         int clock_index;
105         unsigned long *plat;
106
107 #ifdef CONFIG_LIBAIO
108         io_context_t aio_ctx;
109 #endif
110
111         int numa_node;
112         const char *filename;
113
114         struct file files[MAX_FDS];
115         unsigned nr_files;
116         unsigned cur_file;
117         struct iovec iovecs[];
118 };
119
120 static struct submitter *submitter;
121 static volatile int finish;
122 static int stats_running;
123 static unsigned long max_iops;
124 static long t_io_uring_page_size;
125
126 static int depth = DEPTH;
127 static int batch_submit = BATCH_SUBMIT;
128 static int batch_complete = BATCH_COMPLETE;
129 static int bs = BS;
130 static int polled = 1;          /* use IO polling */
131 static int fixedbufs = 1;       /* use fixed user buffers */
132 static int dma_map;             /* pre-map DMA buffers */
133 static int register_files = 1;  /* use fixed files */
134 static int buffered = 0;        /* use buffered IO, not O_DIRECT */
135 static int sq_thread_poll = 0;  /* use kernel submission/poller thread */
136 static int sq_thread_cpu = -1;  /* pin above thread to this CPU */
137 static int do_nop = 0;          /* no-op SQ ring commands */
138 static int nthreads = 1;
139 static int stats = 0;           /* generate IO stats */
140 static int aio = 0;             /* use libaio */
141 static int runtime = 0;         /* runtime */
142 static int random_io = 1;       /* random or sequential IO */
143 static int register_ring = 1;   /* register ring */
144 static int use_sync = 0;        /* use preadv2 */
145 static int numa_placement = 0;  /* set to node of device */
146 static int pt = 0;              /* passthrough I/O or not */
147
148 static unsigned long tsc_rate;
149
150 #define TSC_RATE_FILE   "tsc-rate"
151
152 static int vectored = 1;
153
154 static float plist[] = { 1.0, 5.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0,
155                         80.0, 90.0, 95.0, 99.0, 99.5, 99.9, 99.95, 99.99 };
156 static int plist_len = 17;
157
158 #ifndef IORING_REGISTER_MAP_BUFFERS
159 #define IORING_REGISTER_MAP_BUFFERS     22
160 struct io_uring_map_buffers {
161         __s32   fd;
162         __u32   buf_start;
163         __u32   buf_end;
164         __u32   flags;
165         __u64   rsvd[2];
166 };
167 #endif
168
169 static int nvme_identify(int fd, __u32 nsid, enum nvme_identify_cns cns,
170                          enum nvme_csi csi, void *data)
171 {
172         struct nvme_passthru_cmd cmd = {
173                 .opcode         = nvme_admin_identify,
174                 .nsid           = nsid,
175                 .addr           = (__u64)(uintptr_t)data,
176                 .data_len       = NVME_IDENTIFY_DATA_SIZE,
177                 .cdw10          = cns,
178                 .cdw11          = csi << NVME_IDENTIFY_CSI_SHIFT,
179                 .timeout_ms     = NVME_DEFAULT_IOCTL_TIMEOUT,
180         };
181
182         return ioctl(fd, NVME_IOCTL_ADMIN_CMD, &cmd);
183 }
184
185 static int nvme_get_info(int fd, __u32 *nsid, __u32 *lba_sz, __u64 *nlba)
186 {
187         struct nvme_id_ns ns;
188         int namespace_id;
189         int err;
190
191         namespace_id = ioctl(fd, NVME_IOCTL_ID);
192         if (namespace_id < 0) {
193                 fprintf(stderr, "error failed to fetch namespace-id\n");
194                 close(fd);
195                 return -errno;
196         }
197
198         /*
199          * Identify namespace to get namespace-id, namespace size in LBA's
200          * and LBA data size.
201          */
202         err = nvme_identify(fd, namespace_id, NVME_IDENTIFY_CNS_NS,
203                                 NVME_CSI_NVM, &ns);
204         if (err) {
205                 fprintf(stderr, "error failed to fetch identify namespace\n");
206                 close(fd);
207                 return err;
208         }
209
210         *nsid = namespace_id;
211         *lba_sz = 1 << ns.lbaf[(ns.flbas & 0x0f)].ds;
212         *nlba = ns.nsze;
213
214         return 0;
215 }
216
217 static unsigned long cycles_to_nsec(unsigned long cycles)
218 {
219         uint64_t val;
220
221         if (!tsc_rate)
222                 return cycles;
223
224         val = cycles * 1000000000ULL;
225         return val / tsc_rate;
226 }
227
228 static unsigned long plat_idx_to_val(unsigned int idx)
229 {
230         unsigned int error_bits;
231         unsigned long k, base;
232
233         assert(idx < PLAT_NR);
234
235         /* MSB <= (PLAT_BITS-1), cannot be rounded off. Use
236          * all bits of the sample as index */
237         if (idx < (PLAT_VAL << 1))
238                 return cycles_to_nsec(idx);
239
240         /* Find the group and compute the minimum value of that group */
241         error_bits = (idx >> PLAT_BITS) - 1;
242         base = ((unsigned long) 1) << (error_bits + PLAT_BITS);
243
244         /* Find its bucket number of the group */
245         k = idx % PLAT_VAL;
246
247         /* Return the mean of the range of the bucket */
248         return cycles_to_nsec(base + ((k + 0.5) * (1 << error_bits)));
249 }
250
251 unsigned int calculate_clat_percentiles(unsigned long *io_u_plat,
252                 unsigned long nr, unsigned long **output,
253                 unsigned long *maxv, unsigned long *minv)
254 {
255         unsigned long sum = 0;
256         unsigned int len = plist_len, i, j = 0;
257         unsigned long *ovals = NULL;
258         bool is_last;
259
260         *minv = -1UL;
261         *maxv = 0;
262
263         ovals = malloc(len * sizeof(*ovals));
264         if (!ovals)
265                 return 0;
266
267         /*
268          * Calculate bucket values, note down max and min values
269          */
270         is_last = false;
271         for (i = 0; i < PLAT_NR && !is_last; i++) {
272                 sum += io_u_plat[i];
273                 while (sum >= ((long double) plist[j] / 100.0 * nr)) {
274                         assert(plist[j] <= 100.0);
275
276                         ovals[j] = plat_idx_to_val(i);
277                         if (ovals[j] < *minv)
278                                 *minv = ovals[j];
279                         if (ovals[j] > *maxv)
280                                 *maxv = ovals[j];
281
282                         is_last = (j == len - 1) != 0;
283                         if (is_last)
284                                 break;
285
286                         j++;
287                 }
288         }
289
290         if (!is_last)
291                 fprintf(stderr, "error calculating latency percentiles\n");
292
293         *output = ovals;
294         return len;
295 }
296
297 static void show_clat_percentiles(unsigned long *io_u_plat, unsigned long nr,
298                                   unsigned int precision)
299 {
300         unsigned int divisor, len, i, j = 0;
301         unsigned long minv, maxv;
302         unsigned long *ovals;
303         int per_line, scale_down, time_width;
304         bool is_last;
305         char fmt[32];
306
307         len = calculate_clat_percentiles(io_u_plat, nr, &ovals, &maxv, &minv);
308         if (!len || !ovals)
309                 goto out;
310
311         if (!tsc_rate) {
312                 scale_down = 0;
313                 divisor = 1;
314                 printf("    percentiles (tsc ticks):\n     |");
315         } else if (minv > 2000 && maxv > 99999) {
316                 scale_down = 1;
317                 divisor = 1000;
318                 printf("    percentiles (usec):\n     |");
319         } else {
320                 scale_down = 0;
321                 divisor = 1;
322                 printf("    percentiles (nsec):\n     |");
323         }
324
325         time_width = max(5, (int) (log10(maxv / divisor) + 1));
326         snprintf(fmt, sizeof(fmt), " %%%u.%ufth=[%%%dllu]%%c", precision + 3,
327                         precision, time_width);
328         /* fmt will be something like " %5.2fth=[%4llu]%c" */
329         per_line = (80 - 7) / (precision + 10 + time_width);
330
331         for (j = 0; j < len; j++) {
332                 /* for formatting */
333                 if (j != 0 && (j % per_line) == 0)
334                         printf("     |");
335
336                 /* end of the list */
337                 is_last = (j == len - 1) != 0;
338
339                 for (i = 0; i < scale_down; i++)
340                         ovals[j] = (ovals[j] + 999) / 1000;
341
342                 printf(fmt, plist[j], ovals[j], is_last ? '\n' : ',');
343
344                 if (is_last)
345                         break;
346
347                 if ((j % per_line) == per_line - 1)     /* for formatting */
348                         printf("\n");
349         }
350
351 out:
352         free(ovals);
353 }
354
355 #ifdef ARCH_HAVE_CPU_CLOCK
356 static unsigned int plat_val_to_idx(unsigned long val)
357 {
358         unsigned int msb, error_bits, base, offset, idx;
359
360         /* Find MSB starting from bit 0 */
361         if (val == 0)
362                 msb = 0;
363         else
364                 msb = (sizeof(val)*8) - __builtin_clzll(val) - 1;
365
366         /*
367          * MSB <= (PLAT_BITS-1), cannot be rounded off. Use
368          * all bits of the sample as index
369          */
370         if (msb <= PLAT_BITS)
371                 return val;
372
373         /* Compute the number of error bits to discard*/
374         error_bits = msb - PLAT_BITS;
375
376         /* Compute the number of buckets before the group */
377         base = (error_bits + 1) << PLAT_BITS;
378
379         /*
380          * Discard the error bits and apply the mask to find the
381          * index for the buckets in the group
382          */
383         offset = (PLAT_VAL - 1) & (val >> error_bits);
384
385         /* Make sure the index does not exceed (array size - 1) */
386         idx = (base + offset) < (PLAT_NR - 1) ?
387                 (base + offset) : (PLAT_NR - 1);
388
389         return idx;
390 }
391 #endif
392
393 static void add_stat(struct submitter *s, int clock_index, int nr)
394 {
395 #ifdef ARCH_HAVE_CPU_CLOCK
396         unsigned long cycles;
397         unsigned int pidx;
398
399         if (!s->finish && clock_index) {
400                 cycles = get_cpu_clock();
401                 cycles -= s->clock_batch[clock_index];
402                 pidx = plat_val_to_idx(cycles);
403                 s->plat[pidx] += nr;
404         }
405 #endif
406 }
407
408 static int io_uring_map_buffers(struct submitter *s)
409 {
410         struct io_uring_map_buffers map = {
411                 .fd             = s->files[0].real_fd,
412                 .buf_end        = depth,
413         };
414
415         if (do_nop)
416                 return 0;
417         if (s->nr_files > 1)
418                 fprintf(stdout, "Mapping buffers may not work with multiple files\n");
419
420         return syscall(__NR_io_uring_register, s->ring_fd,
421                         IORING_REGISTER_MAP_BUFFERS, &map, 1);
422 }
423
424 static int io_uring_register_buffers(struct submitter *s)
425 {
426         if (do_nop)
427                 return 0;
428
429         return syscall(__NR_io_uring_register, s->ring_fd,
430                         IORING_REGISTER_BUFFERS, s->iovecs, roundup_pow2(depth));
431 }
432
433 static int io_uring_register_files(struct submitter *s)
434 {
435         int i;
436
437         if (do_nop)
438                 return 0;
439
440         s->fds = calloc(s->nr_files, sizeof(__s32));
441         for (i = 0; i < s->nr_files; i++) {
442                 s->fds[i] = s->files[i].real_fd;
443                 s->files[i].fixed_fd = i;
444         }
445
446         return syscall(__NR_io_uring_register, s->ring_fd,
447                         IORING_REGISTER_FILES, s->fds, s->nr_files);
448 }
449
450 static int io_uring_setup(unsigned entries, struct io_uring_params *p)
451 {
452         int ret;
453
454         /*
455          * Clamp CQ ring size at our SQ ring size, we don't need more entries
456          * than that.
457          */
458         p->flags |= IORING_SETUP_CQSIZE;
459         p->cq_entries = entries;
460
461         p->flags |= IORING_SETUP_COOP_TASKRUN;
462         p->flags |= IORING_SETUP_SINGLE_ISSUER;
463         p->flags |= IORING_SETUP_DEFER_TASKRUN;
464 retry:
465         ret = syscall(__NR_io_uring_setup, entries, p);
466         if (!ret)
467                 return 0;
468
469         if (errno == EINVAL && p->flags & IORING_SETUP_COOP_TASKRUN) {
470                 p->flags &= ~IORING_SETUP_COOP_TASKRUN;
471                 goto retry;
472         }
473         if (errno == EINVAL && p->flags & IORING_SETUP_SINGLE_ISSUER) {
474                 p->flags &= ~IORING_SETUP_SINGLE_ISSUER;
475                 goto retry;
476         }
477         if (errno == EINVAL && p->flags & IORING_SETUP_DEFER_TASKRUN) {
478                 p->flags &= ~IORING_SETUP_DEFER_TASKRUN;
479                 goto retry;
480         }
481
482         return ret;
483 }
484
485 static void io_uring_probe(int fd)
486 {
487         struct io_uring_probe *p;
488         int ret;
489
490         p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
491         if (!p)
492                 return;
493
494         memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
495         ret = syscall(__NR_io_uring_register, fd, IORING_REGISTER_PROBE, p, 256);
496         if (ret < 0)
497                 goto out;
498
499         if (IORING_OP_READ > p->ops_len)
500                 goto out;
501
502         if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED))
503                 vectored = 0;
504 out:
505         free(p);
506 }
507
508 static int io_uring_enter(struct submitter *s, unsigned int to_submit,
509                           unsigned int min_complete, unsigned int flags)
510 {
511         if (register_ring)
512                 flags |= IORING_ENTER_REGISTERED_RING;
513 #ifdef FIO_ARCH_HAS_SYSCALL
514         return __do_syscall6(__NR_io_uring_enter, s->enter_ring_fd, to_submit,
515                                 min_complete, flags, NULL, 0);
516 #else
517         return syscall(__NR_io_uring_enter, s->enter_ring_fd, to_submit,
518                         min_complete, flags, NULL, 0);
519 #endif
520 }
521
522 static unsigned file_depth(struct submitter *s)
523 {
524         return (depth + s->nr_files - 1) / s->nr_files;
525 }
526
527 static unsigned long long get_offset(struct submitter *s, struct file *f)
528 {
529         unsigned long long offset;
530         long r;
531
532         if (random_io) {
533                 r = __rand64(&s->rand_state);
534                 offset = (r % (f->max_blocks - 1)) * bs;
535         } else {
536                 offset = f->cur_off;
537                 f->cur_off += bs;
538                 if (f->cur_off + bs > f->max_size)
539                         f->cur_off = 0;
540         }
541
542         return offset;
543 }
544
545 static void init_io(struct submitter *s, unsigned index)
546 {
547         struct io_uring_sqe *sqe = &s->sqes[index];
548         struct file *f;
549
550         if (do_nop) {
551                 sqe->opcode = IORING_OP_NOP;
552                 return;
553         }
554
555         if (s->nr_files == 1) {
556                 f = &s->files[0];
557         } else {
558                 f = &s->files[s->cur_file];
559                 if (f->pending_ios >= file_depth(s)) {
560                         s->cur_file++;
561                         if (s->cur_file == s->nr_files)
562                                 s->cur_file = 0;
563                         f = &s->files[s->cur_file];
564                 }
565         }
566         f->pending_ios++;
567
568         if (register_files) {
569                 sqe->flags = IOSQE_FIXED_FILE;
570                 sqe->fd = f->fixed_fd;
571         } else {
572                 sqe->flags = 0;
573                 sqe->fd = f->real_fd;
574         }
575         if (fixedbufs) {
576                 sqe->opcode = IORING_OP_READ_FIXED;
577                 sqe->addr = (unsigned long) s->iovecs[index].iov_base;
578                 sqe->len = bs;
579                 sqe->buf_index = index;
580         } else if (!vectored) {
581                 sqe->opcode = IORING_OP_READ;
582                 sqe->addr = (unsigned long) s->iovecs[index].iov_base;
583                 sqe->len = bs;
584                 sqe->buf_index = 0;
585         } else {
586                 sqe->opcode = IORING_OP_READV;
587                 sqe->addr = (unsigned long) &s->iovecs[index];
588                 sqe->len = 1;
589                 sqe->buf_index = 0;
590         }
591         sqe->ioprio = 0;
592         sqe->off = get_offset(s, f);
593         sqe->user_data = (unsigned long) f->fileno;
594         if (stats && stats_running)
595                 sqe->user_data |= ((uint64_t)s->clock_index << 32);
596 }
597
598 static void init_io_pt(struct submitter *s, unsigned index)
599 {
600         struct io_uring_sqe *sqe = &s->sqes[index << 1];
601         unsigned long offset;
602         struct file *f;
603         struct nvme_uring_cmd *cmd;
604         unsigned long long slba;
605         unsigned long long nlb;
606         long r;
607
608         if (s->nr_files == 1) {
609                 f = &s->files[0];
610         } else {
611                 f = &s->files[s->cur_file];
612                 if (f->pending_ios >= file_depth(s)) {
613                         s->cur_file++;
614                         if (s->cur_file == s->nr_files)
615                                 s->cur_file = 0;
616                         f = &s->files[s->cur_file];
617                 }
618         }
619         f->pending_ios++;
620
621         if (random_io) {
622                 r = __rand64(&s->rand_state);
623                 offset = (r % (f->max_blocks - 1)) * bs;
624         } else {
625                 offset = f->cur_off;
626                 f->cur_off += bs;
627                 if (f->cur_off + bs > f->max_size)
628                         f->cur_off = 0;
629         }
630
631         if (register_files) {
632                 sqe->fd = f->fixed_fd;
633                 sqe->flags = IOSQE_FIXED_FILE;
634         } else {
635                 sqe->fd = f->real_fd;
636                 sqe->flags = 0;
637         }
638         sqe->opcode = IORING_OP_URING_CMD;
639         sqe->user_data = (unsigned long) f->fileno;
640         if (stats)
641                 sqe->user_data |= ((__u64) s->clock_index << 32ULL);
642         sqe->cmd_op = NVME_URING_CMD_IO;
643         slba = offset >> f->lba_shift;
644         nlb = (bs >> f->lba_shift) - 1;
645         cmd = (struct nvme_uring_cmd *)&sqe->cmd;
646         /* cdw10 and cdw11 represent starting slba*/
647         cmd->cdw10 = slba & 0xffffffff;
648         cmd->cdw11 = slba >> 32;
649         /* cdw12 represent number of lba to be read*/
650         cmd->cdw12 = nlb;
651         cmd->addr = (unsigned long) s->iovecs[index].iov_base;
652         cmd->data_len = bs;
653         cmd->nsid = f->nsid;
654         cmd->opcode = 2;
655 }
656
657 static int prep_more_ios_uring(struct submitter *s, int max_ios)
658 {
659         struct io_sq_ring *ring = &s->sq_ring;
660         unsigned index, tail, next_tail, prepped = 0;
661         unsigned int head = atomic_load_acquire(ring->head);
662
663         next_tail = tail = *ring->tail;
664         do {
665                 next_tail++;
666                 if (next_tail == head)
667                         break;
668
669                 index = tail & sq_ring_mask;
670                 if (pt)
671                         init_io_pt(s, index);
672                 else
673                         init_io(s, index);
674                 prepped++;
675                 tail = next_tail;
676         } while (prepped < max_ios);
677
678         if (prepped)
679                 atomic_store_release(ring->tail, tail);
680         return prepped;
681 }
682
683 static int get_file_size(struct file *f)
684 {
685         struct stat st;
686
687         if (fstat(f->real_fd, &st) < 0)
688                 return -1;
689         if (pt) {
690                 __u64 nlba;
691                 __u32 lbs;
692                 int ret;
693
694                 if (!S_ISCHR(st.st_mode)) {
695                         fprintf(stderr, "passthrough works with only nvme-ns "
696                                         "generic devices (/dev/ngXnY)\n");
697                         return -1;
698                 }
699                 ret = nvme_get_info(f->real_fd, &f->nsid, &lbs, &nlba);
700                 if (ret)
701                         return -1;
702                 if ((bs % lbs) != 0) {
703                         printf("error: bs:%d should be a multiple logical_block_size:%d\n",
704                                         bs, lbs);
705                         return -1;
706                 }
707                 f->max_blocks = nlba / bs;
708                 f->max_size = nlba;
709                 f->lba_shift = ilog2(lbs);
710                 return 0;
711         } else if (S_ISBLK(st.st_mode)) {
712                 unsigned long long bytes;
713
714                 if (ioctl(f->real_fd, BLKGETSIZE64, &bytes) != 0)
715                         return -1;
716
717                 f->max_blocks = bytes / bs;
718                 f->max_size = bytes;
719                 return 0;
720         } else if (S_ISREG(st.st_mode)) {
721                 f->max_blocks = st.st_size / bs;
722                 f->max_size = st.st_size;
723                 return 0;
724         }
725
726         return -1;
727 }
728
729 static int reap_events_uring(struct submitter *s)
730 {
731         struct io_cq_ring *ring = &s->cq_ring;
732         struct io_uring_cqe *cqe;
733         unsigned head, reaped = 0;
734         int last_idx = -1, stat_nr = 0;
735
736         head = *ring->head;
737         do {
738                 struct file *f;
739
740                 read_barrier();
741                 if (head == atomic_load_acquire(ring->tail))
742                         break;
743                 cqe = &ring->cqes[head & cq_ring_mask];
744                 if (!do_nop) {
745                         int fileno = cqe->user_data & 0xffffffff;
746
747                         f = &s->files[fileno];
748                         f->pending_ios--;
749                         if (cqe->res != bs) {
750                                 printf("io: unexpected ret=%d\n", cqe->res);
751                                 if (polled && cqe->res == -EOPNOTSUPP)
752                                         printf("Your filesystem/driver/kernel doesn't support polled IO\n");
753                                 return -1;
754                         }
755                 }
756                 if (stats) {
757                         int clock_index = cqe->user_data >> 32;
758
759                         if (last_idx != clock_index) {
760                                 if (last_idx != -1) {
761                                         add_stat(s, last_idx, stat_nr);
762                                         stat_nr = 0;
763                                 }
764                                 last_idx = clock_index;
765                         }
766                         stat_nr++;
767                 }
768                 reaped++;
769                 head++;
770         } while (1);
771
772         if (stat_nr)
773                 add_stat(s, last_idx, stat_nr);
774
775         if (reaped) {
776                 s->inflight -= reaped;
777                 atomic_store_release(ring->head, head);
778         }
779         return reaped;
780 }
781
782 static int reap_events_uring_pt(struct submitter *s)
783 {
784         struct io_cq_ring *ring = &s->cq_ring;
785         struct io_uring_cqe *cqe;
786         unsigned head, reaped = 0;
787         int last_idx = -1, stat_nr = 0;
788         unsigned index;
789         int fileno;
790
791         head = *ring->head;
792         do {
793                 struct file *f;
794
795                 read_barrier();
796                 if (head == atomic_load_acquire(ring->tail))
797                         break;
798                 index = head & cq_ring_mask;
799                 cqe = &ring->cqes[index << 1];
800                 fileno = cqe->user_data & 0xffffffff;
801                 f = &s->files[fileno];
802                 f->pending_ios--;
803
804                 if (cqe->res != 0) {
805                         printf("io: unexpected ret=%d\n", cqe->res);
806                         if (polled && cqe->res == -EINVAL)
807                                 printf("passthrough doesn't support polled IO\n");
808                         return -1;
809                 }
810                 if (stats) {
811                         int clock_index = cqe->user_data >> 32;
812
813                         if (last_idx != clock_index) {
814                                 if (last_idx != -1) {
815                                         add_stat(s, last_idx, stat_nr);
816                                         stat_nr = 0;
817                                 }
818                                 last_idx = clock_index;
819                         }
820                         stat_nr++;
821                 }
822                 reaped++;
823                 head++;
824         } while (1);
825
826         if (stat_nr)
827                 add_stat(s, last_idx, stat_nr);
828
829         if (reaped) {
830                 s->inflight -= reaped;
831                 atomic_store_release(ring->head, head);
832         }
833         return reaped;
834 }
835
836 static void set_affinity(struct submitter *s)
837 {
838 #ifdef CONFIG_LIBNUMA
839         struct bitmask *mask;
840
841         if (s->numa_node == -1)
842                 return;
843
844         numa_set_preferred(s->numa_node);
845
846         mask = numa_allocate_cpumask();
847         numa_node_to_cpus(s->numa_node, mask);
848         numa_sched_setaffinity(s->tid, mask);
849 #endif
850 }
851
852 static int detect_node(struct submitter *s, const char *name)
853 {
854 #ifdef CONFIG_LIBNUMA
855         const char *base = basename(name);
856         char str[128];
857         int ret, fd, node;
858
859         sprintf(str, "/sys/block/%s/device/numa_node", base);
860         fd = open(str, O_RDONLY);
861         if (fd < 0)
862                 return -1;
863
864         ret = read(fd, str, sizeof(str));
865         if (ret < 0) {
866                 close(fd);
867                 return -1;
868         }
869         node = atoi(str);
870         s->numa_node = node;
871         close(fd);
872 #else
873         s->numa_node = -1;
874 #endif
875         return 0;
876 }
877
878 static int setup_aio(struct submitter *s)
879 {
880 #ifdef CONFIG_LIBAIO
881         if (polled) {
882                 fprintf(stderr, "aio does not support polled IO\n");
883                 polled = 0;
884         }
885         if (sq_thread_poll) {
886                 fprintf(stderr, "aio does not support SQPOLL IO\n");
887                 sq_thread_poll = 0;
888         }
889         if (do_nop) {
890                 fprintf(stderr, "aio does not support polled IO\n");
891                 do_nop = 0;
892         }
893         if (fixedbufs || register_files) {
894                 fprintf(stderr, "aio does not support registered files or buffers\n");
895                 fixedbufs = register_files = 0;
896         }
897
898         return io_queue_init(roundup_pow2(depth), &s->aio_ctx);
899 #else
900         fprintf(stderr, "Legacy AIO not available on this system/build\n");
901         errno = EINVAL;
902         return -1;
903 #endif
904 }
905
906 static int setup_ring(struct submitter *s)
907 {
908         struct io_sq_ring *sring = &s->sq_ring;
909         struct io_cq_ring *cring = &s->cq_ring;
910         struct io_uring_params p;
911         int ret, fd, i;
912         void *ptr;
913         size_t len;
914
915         memset(&p, 0, sizeof(p));
916
917         if (polled && !do_nop)
918                 p.flags |= IORING_SETUP_IOPOLL;
919         if (sq_thread_poll) {
920                 p.flags |= IORING_SETUP_SQPOLL;
921                 if (sq_thread_cpu != -1) {
922                         p.flags |= IORING_SETUP_SQ_AFF;
923                         p.sq_thread_cpu = sq_thread_cpu;
924                 }
925         }
926         if (pt) {
927                 p.flags |= IORING_SETUP_SQE128;
928                 p.flags |= IORING_SETUP_CQE32;
929         }
930
931         fd = io_uring_setup(depth, &p);
932         if (fd < 0) {
933                 perror("io_uring_setup");
934                 return 1;
935         }
936         s->ring_fd = s->enter_ring_fd = fd;
937
938         io_uring_probe(fd);
939
940         if (fixedbufs) {
941                 struct rlimit rlim;
942
943                 rlim.rlim_cur = RLIM_INFINITY;
944                 rlim.rlim_max = RLIM_INFINITY;
945                 /* ignore potential error, not needed on newer kernels */
946                 setrlimit(RLIMIT_MEMLOCK, &rlim);
947
948                 ret = io_uring_register_buffers(s);
949                 if (ret < 0) {
950                         perror("io_uring_register_buffers");
951                         return 1;
952                 }
953
954                 if (dma_map) {
955                         ret = io_uring_map_buffers(s);
956                         if (ret < 0) {
957                                 perror("io_uring_map_buffers");
958                                 return 1;
959                         }
960                 }
961         }
962
963         if (register_files) {
964                 ret = io_uring_register_files(s);
965                 if (ret < 0) {
966                         perror("io_uring_register_files");
967                         return 1;
968                 }
969         }
970
971         ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(__u32),
972                         PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
973                         IORING_OFF_SQ_RING);
974         sring->head = ptr + p.sq_off.head;
975         sring->tail = ptr + p.sq_off.tail;
976         sring->ring_mask = ptr + p.sq_off.ring_mask;
977         sring->ring_entries = ptr + p.sq_off.ring_entries;
978         sring->flags = ptr + p.sq_off.flags;
979         sring->array = ptr + p.sq_off.array;
980         sq_ring_mask = *sring->ring_mask;
981
982         if (p.flags & IORING_SETUP_SQE128)
983                 len = 2 * p.sq_entries * sizeof(struct io_uring_sqe);
984         else
985                 len = p.sq_entries * sizeof(struct io_uring_sqe);
986         s->sqes = mmap(0, len,
987                         PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
988                         IORING_OFF_SQES);
989
990         if (p.flags & IORING_SETUP_CQE32) {
991                 len = p.cq_off.cqes +
992                         2 * p.cq_entries * sizeof(struct io_uring_cqe);
993         } else {
994                 len = p.cq_off.cqes +
995                         p.cq_entries * sizeof(struct io_uring_cqe);
996         }
997         ptr = mmap(0, len,
998                         PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
999                         IORING_OFF_CQ_RING);
1000         cring->head = ptr + p.cq_off.head;
1001         cring->tail = ptr + p.cq_off.tail;
1002         cring->ring_mask = ptr + p.cq_off.ring_mask;
1003         cring->ring_entries = ptr + p.cq_off.ring_entries;
1004         cring->cqes = ptr + p.cq_off.cqes;
1005         cq_ring_mask = *cring->ring_mask;
1006
1007         for (i = 0; i < p.sq_entries; i++)
1008                 sring->array[i] = i;
1009
1010         return 0;
1011 }
1012
1013 static void *allocate_mem(struct submitter *s, int size)
1014 {
1015         void *buf;
1016
1017 #ifdef CONFIG_LIBNUMA
1018         if (s->numa_node != -1)
1019                 return numa_alloc_onnode(size, s->numa_node);
1020 #endif
1021
1022         if (posix_memalign(&buf, t_io_uring_page_size, bs)) {
1023                 printf("failed alloc\n");
1024                 return NULL;
1025         }
1026
1027         return buf;
1028 }
1029
1030 static int submitter_init(struct submitter *s)
1031 {
1032         int i, nr_batch, err;
1033         static int init_printed;
1034         char buf[80];
1035         s->tid = gettid();
1036         printf("submitter=%d, tid=%d, file=%s, node=%d\n", s->index, s->tid,
1037                                                         s->filename, s->numa_node);
1038
1039         set_affinity(s);
1040
1041         __init_rand64(&s->rand_state, s->tid);
1042         srand48(s->tid);
1043
1044         for (i = 0; i < MAX_FDS; i++)
1045                 s->files[i].fileno = i;
1046
1047         for (i = 0; i < roundup_pow2(depth); i++) {
1048                 void *buf;
1049
1050                 buf = allocate_mem(s, bs);
1051                 if (!buf)
1052                         return 1;
1053                 s->iovecs[i].iov_base = buf;
1054                 s->iovecs[i].iov_len = bs;
1055         }
1056
1057         if (use_sync) {
1058                 sprintf(buf, "Engine=preadv2\n");
1059                 err = 0;
1060         } else if (!aio) {
1061                 err = setup_ring(s);
1062                 sprintf(buf, "Engine=io_uring, sq_ring=%d, cq_ring=%d\n", *s->sq_ring.ring_entries, *s->cq_ring.ring_entries);
1063         } else {
1064                 sprintf(buf, "Engine=aio\n");
1065                 err = setup_aio(s);
1066         }
1067         if (err) {
1068                 printf("queue setup failed: %s, %d\n", strerror(errno), err);
1069                 return 1;
1070         }
1071
1072         if (!init_printed) {
1073                 printf("polled=%d, fixedbufs=%d/%d, register_files=%d, buffered=%d, QD=%d\n", polled, fixedbufs, dma_map, register_files, buffered, depth);
1074                 printf("%s", buf);
1075                 init_printed = 1;
1076         }
1077
1078         if (stats) {
1079                 nr_batch = roundup_pow2(depth / batch_submit);
1080                 if (nr_batch < 2)
1081                         nr_batch = 2;
1082                 s->clock_batch = calloc(nr_batch, sizeof(unsigned long));
1083                 s->clock_index = 1;
1084
1085                 s->plat = calloc(PLAT_NR, sizeof(unsigned long));
1086         } else {
1087                 s->clock_batch = NULL;
1088                 s->plat = NULL;
1089                 nr_batch = 0;
1090         }
1091         /* perform the expensive command initialization part for passthrough here
1092          * rather than in the fast path
1093          */
1094         if (pt) {
1095                 for (i = 0; i < roundup_pow2(depth); i++) {
1096                         struct io_uring_sqe *sqe = &s->sqes[i << 1];
1097
1098                         memset(&sqe->cmd, 0, sizeof(struct nvme_uring_cmd));
1099                 }
1100         }
1101         return nr_batch;
1102 }
1103
1104 #ifdef CONFIG_LIBAIO
1105 static int prep_more_ios_aio(struct submitter *s, int max_ios, struct iocb *iocbs)
1106 {
1107         uint64_t data;
1108         struct file *f;
1109         unsigned index;
1110
1111         index = 0;
1112         while (index < max_ios) {
1113                 struct iocb *iocb = &iocbs[index];
1114
1115                 if (s->nr_files == 1) {
1116                         f = &s->files[0];
1117                 } else {
1118                         f = &s->files[s->cur_file];
1119                         if (f->pending_ios >= file_depth(s)) {
1120                                 s->cur_file++;
1121                                 if (s->cur_file == s->nr_files)
1122                                         s->cur_file = 0;
1123                                 f = &s->files[s->cur_file];
1124                         }
1125                 }
1126                 f->pending_ios++;
1127
1128                 io_prep_pread(iocb, f->real_fd, s->iovecs[index].iov_base,
1129                                 s->iovecs[index].iov_len, get_offset(s, f));
1130
1131                 data = f->fileno;
1132                 if (stats && stats_running)
1133                         data |= (((uint64_t) s->clock_index) << 32);
1134                 iocb->data = (void *) (uintptr_t) data;
1135                 index++;
1136         }
1137         return index;
1138 }
1139
1140 static int reap_events_aio(struct submitter *s, struct io_event *events, int evs)
1141 {
1142         int last_idx = -1, stat_nr = 0;
1143         int reaped = 0;
1144
1145         while (evs) {
1146                 uint64_t data = (uintptr_t) events[reaped].data;
1147                 struct file *f = &s->files[data & 0xffffffff];
1148
1149                 f->pending_ios--;
1150                 if (events[reaped].res != bs) {
1151                         printf("io: unexpected ret=%ld\n", events[reaped].res);
1152                         return -1;
1153                 }
1154                 if (stats) {
1155                         int clock_index = data >> 32;
1156
1157                         if (last_idx != clock_index) {
1158                                 if (last_idx != -1) {
1159                                         add_stat(s, last_idx, stat_nr);
1160                                         stat_nr = 0;
1161                                 }
1162                                 last_idx = clock_index;
1163                         }
1164                         stat_nr++;
1165                 }
1166                 reaped++;
1167                 evs--;
1168         }
1169
1170         if (stat_nr)
1171                 add_stat(s, last_idx, stat_nr);
1172
1173         s->inflight -= reaped;
1174         s->done += reaped;
1175         return reaped;
1176 }
1177
1178 static void *submitter_aio_fn(void *data)
1179 {
1180         struct submitter *s = data;
1181         int i, ret, prepped;
1182         struct iocb **iocbsptr;
1183         struct iocb *iocbs;
1184         struct io_event *events;
1185 #ifdef ARCH_HAVE_CPU_CLOCK
1186         int nr_batch = submitter_init(s);
1187 #else
1188         submitter_init(s);
1189 #endif
1190
1191         iocbsptr = calloc(depth, sizeof(struct iocb *));
1192         iocbs = calloc(depth, sizeof(struct iocb));
1193         events = calloc(depth, sizeof(struct io_event));
1194
1195         for (i = 0; i < depth; i++)
1196                 iocbsptr[i] = &iocbs[i];
1197
1198         prepped = 0;
1199         do {
1200                 int to_wait, to_submit, to_prep;
1201
1202                 if (!prepped && s->inflight < depth) {
1203                         to_prep = min(depth - s->inflight, batch_submit);
1204                         prepped = prep_more_ios_aio(s, to_prep, iocbs);
1205 #ifdef ARCH_HAVE_CPU_CLOCK
1206                         if (prepped && stats) {
1207                                 s->clock_batch[s->clock_index] = get_cpu_clock();
1208                                 s->clock_index = (s->clock_index + 1) & (nr_batch - 1);
1209                         }
1210 #endif
1211                 }
1212                 s->inflight += prepped;
1213                 to_submit = prepped;
1214
1215                 if (to_submit && (s->inflight + to_submit <= depth))
1216                         to_wait = 0;
1217                 else
1218                         to_wait = min(s->inflight + to_submit, batch_complete);
1219
1220                 ret = io_submit(s->aio_ctx, to_submit, iocbsptr);
1221                 s->calls++;
1222                 if (ret < 0) {
1223                         perror("io_submit");
1224                         break;
1225                 } else if (ret != to_submit) {
1226                         printf("submitted %d, wanted %d\n", ret, to_submit);
1227                         break;
1228                 }
1229                 prepped = 0;
1230
1231                 while (to_wait) {
1232                         int r;
1233
1234                         s->calls++;
1235                         r = io_getevents(s->aio_ctx, to_wait, to_wait, events, NULL);
1236                         if (r < 0) {
1237                                 perror("io_getevents");
1238                                 break;
1239                         } else if (r != to_wait) {
1240                                 printf("r=%d, wait=%d\n", r, to_wait);
1241                                 break;
1242                         }
1243                         r = reap_events_aio(s, events, r);
1244                         s->reaps += r;
1245                         to_wait -= r;
1246                 }
1247         } while (!s->finish);
1248
1249         free(iocbsptr);
1250         free(iocbs);
1251         free(events);
1252         finish = 1;
1253         return NULL;
1254 }
1255 #endif
1256
1257 static void io_uring_unregister_ring(struct submitter *s)
1258 {
1259         struct io_uring_rsrc_update up = {
1260                 .offset = s->enter_ring_fd,
1261         };
1262
1263         syscall(__NR_io_uring_register, s->ring_fd, IORING_UNREGISTER_RING_FDS,
1264                 &up, 1);
1265 }
1266
1267 static int io_uring_register_ring(struct submitter *s)
1268 {
1269         struct io_uring_rsrc_update up = {
1270                 .data   = s->ring_fd,
1271                 .offset = -1U,
1272         };
1273         int ret;
1274
1275         ret = syscall(__NR_io_uring_register, s->ring_fd,
1276                         IORING_REGISTER_RING_FDS, &up, 1);
1277         if (ret == 1) {
1278                 s->enter_ring_fd = up.offset;
1279                 return 0;
1280         }
1281         register_ring = 0;
1282         return -1;
1283 }
1284
1285 static void *submitter_uring_fn(void *data)
1286 {
1287         struct submitter *s = data;
1288         struct io_sq_ring *ring = &s->sq_ring;
1289         int ret, prepped;
1290 #ifdef ARCH_HAVE_CPU_CLOCK
1291         int nr_batch = submitter_init(s);
1292 #else
1293         submitter_init(s);
1294 #endif
1295
1296         if (register_ring)
1297                 io_uring_register_ring(s);
1298
1299         prepped = 0;
1300         do {
1301                 int to_wait, to_submit, this_reap, to_prep;
1302                 unsigned ring_flags = 0;
1303
1304                 if (!prepped && s->inflight < depth) {
1305                         to_prep = min(depth - s->inflight, batch_submit);
1306                         prepped = prep_more_ios_uring(s, to_prep);
1307 #ifdef ARCH_HAVE_CPU_CLOCK
1308                         if (prepped && stats) {
1309                                 s->clock_batch[s->clock_index] = get_cpu_clock();
1310                                 s->clock_index = (s->clock_index + 1) & (nr_batch - 1);
1311                         }
1312 #endif
1313                 }
1314                 s->inflight += prepped;
1315 submit_more:
1316                 to_submit = prepped;
1317 submit:
1318                 if (to_submit && (s->inflight + to_submit <= depth))
1319                         to_wait = 0;
1320                 else
1321                         to_wait = min(s->inflight + to_submit, batch_complete);
1322
1323                 /*
1324                  * Only need to call io_uring_enter if we're not using SQ thread
1325                  * poll, or if IORING_SQ_NEED_WAKEUP is set.
1326                  */
1327                 if (sq_thread_poll)
1328                         ring_flags = atomic_load_acquire(ring->flags);
1329                 if (!sq_thread_poll || ring_flags & IORING_SQ_NEED_WAKEUP) {
1330                         unsigned flags = 0;
1331
1332                         if (to_wait)
1333                                 flags = IORING_ENTER_GETEVENTS;
1334                         if (ring_flags & IORING_SQ_NEED_WAKEUP)
1335                                 flags |= IORING_ENTER_SQ_WAKEUP;
1336                         ret = io_uring_enter(s, to_submit, to_wait, flags);
1337                         s->calls++;
1338                 } else {
1339                         /* for SQPOLL, we submitted it all effectively */
1340                         ret = to_submit;
1341                 }
1342
1343                 /*
1344                  * For non SQ thread poll, we already got the events we needed
1345                  * through the io_uring_enter() above. For SQ thread poll, we
1346                  * need to loop here until we find enough events.
1347                  */
1348                 this_reap = 0;
1349                 do {
1350                         int r;
1351
1352                         if (pt)
1353                                 r = reap_events_uring_pt(s);
1354                         else
1355                                 r = reap_events_uring(s);
1356                         if (r == -1) {
1357                                 s->finish = 1;
1358                                 break;
1359                         } else if (r > 0)
1360                                 this_reap += r;
1361                 } while (sq_thread_poll && this_reap < to_wait);
1362                 s->reaps += this_reap;
1363
1364                 if (ret >= 0) {
1365                         if (!ret) {
1366                                 to_submit = 0;
1367                                 if (s->inflight)
1368                                         goto submit;
1369                                 continue;
1370                         } else if (ret < to_submit) {
1371                                 int diff = to_submit - ret;
1372
1373                                 s->done += ret;
1374                                 prepped -= diff;
1375                                 goto submit_more;
1376                         }
1377                         s->done += ret;
1378                         prepped = 0;
1379                         continue;
1380                 } else if (ret < 0) {
1381                         if (errno == EAGAIN) {
1382                                 if (s->finish)
1383                                         break;
1384                                 if (this_reap)
1385                                         goto submit;
1386                                 to_submit = 0;
1387                                 goto submit;
1388                         }
1389                         printf("io_submit: %s\n", strerror(errno));
1390                         break;
1391                 }
1392         } while (!s->finish);
1393
1394         if (register_ring)
1395                 io_uring_unregister_ring(s);
1396
1397         finish = 1;
1398         return NULL;
1399 }
1400
1401 #ifdef CONFIG_PWRITEV2
1402 static void *submitter_sync_fn(void *data)
1403 {
1404         struct submitter *s = data;
1405         int ret;
1406
1407         submitter_init(s);
1408
1409         do {
1410                 uint64_t offset;
1411                 struct file *f;
1412
1413                 if (s->nr_files == 1) {
1414                         f = &s->files[0];
1415                 } else {
1416                         f = &s->files[s->cur_file];
1417                         if (f->pending_ios >= file_depth(s)) {
1418                                 s->cur_file++;
1419                                 if (s->cur_file == s->nr_files)
1420                                         s->cur_file = 0;
1421                                 f = &s->files[s->cur_file];
1422                         }
1423                 }
1424                 f->pending_ios++;
1425
1426 #ifdef ARCH_HAVE_CPU_CLOCK
1427                 if (stats)
1428                         s->clock_batch[s->clock_index] = get_cpu_clock();
1429 #endif
1430
1431                 s->inflight++;
1432                 s->calls++;
1433
1434                 offset = get_offset(s, f);
1435                 if (polled)
1436                         ret = preadv2(f->real_fd, &s->iovecs[0], 1, offset, RWF_HIPRI);
1437                 else
1438                         ret = preadv2(f->real_fd, &s->iovecs[0], 1, offset, 0);
1439
1440                 if (ret < 0) {
1441                         perror("preadv2");
1442                         break;
1443                 } else if (ret != bs) {
1444                         break;
1445                 }
1446
1447                 s->done++;
1448                 s->inflight--;
1449                 f->pending_ios--;
1450                 if (stats)
1451                         add_stat(s, s->clock_index, 1);
1452         } while (!s->finish);
1453
1454         finish = 1;
1455         return NULL;
1456 }
1457 #else
1458 static void *submitter_sync_fn(void *data)
1459 {
1460         finish = 1;
1461         return NULL;
1462 }
1463 #endif
1464
1465 static struct submitter *get_submitter(int offset)
1466 {
1467         void *ret;
1468
1469         ret = submitter;
1470         if (offset)
1471                 ret += offset * (sizeof(*submitter) + depth * sizeof(struct iovec));
1472         return ret;
1473 }
1474
1475 static void do_finish(const char *reason)
1476 {
1477         int j;
1478
1479         printf("Exiting on %s\n", reason);
1480         for (j = 0; j < nthreads; j++) {
1481                 struct submitter *s = get_submitter(j);
1482                 s->finish = 1;
1483         }
1484         if (max_iops > 1000000) {
1485                 double miops = (double) max_iops / 1000000.0;
1486                 printf("Maximum IOPS=%.2fM\n", miops);
1487         } else if (max_iops > 100000) {
1488                 double kiops = (double) max_iops / 1000.0;
1489                 printf("Maximum IOPS=%.2fK\n", kiops);
1490         } else {
1491                 printf("Maximum IOPS=%lu\n", max_iops);
1492         }
1493         finish = 1;
1494 }
1495
1496 static void sig_int(int sig)
1497 {
1498         do_finish("signal");
1499 }
1500
1501 static void arm_sig_int(void)
1502 {
1503         struct sigaction act;
1504
1505         memset(&act, 0, sizeof(act));
1506         act.sa_handler = sig_int;
1507         act.sa_flags = SA_RESTART;
1508         sigaction(SIGINT, &act, NULL);
1509
1510         /* Windows uses SIGBREAK as a quit signal from other applications */
1511 #ifdef WIN32
1512         sigaction(SIGBREAK, &act, NULL);
1513 #endif
1514 }
1515
1516 static void usage(char *argv, int status)
1517 {
1518         char runtime_str[16];
1519         snprintf(runtime_str, sizeof(runtime_str), "%d", runtime);
1520         printf("%s [options] -- [filenames]\n"
1521                 " -d <int>  : IO Depth, default %d\n"
1522                 " -s <int>  : Batch submit, default %d\n"
1523                 " -c <int>  : Batch complete, default %d\n"
1524                 " -b <int>  : Block size, default %d\n"
1525                 " -p <bool> : Polled IO, default %d\n"
1526                 " -B <bool> : Fixed buffers, default %d\n"
1527                 " -D <bool> : DMA map fixed buffers, default %d\n"
1528                 " -F <bool> : Register files, default %d\n"
1529                 " -n <int>  : Number of threads, default %d\n"
1530                 " -O <bool> : Use O_DIRECT, default %d\n"
1531                 " -N <bool> : Perform just no-op requests, default %d\n"
1532                 " -t <bool> : Track IO latencies, default %d\n"
1533                 " -T <int>  : TSC rate in HZ\n"
1534                 " -r <int>  : Runtime in seconds, default %s\n"
1535                 " -R <bool> : Use random IO, default %d\n"
1536                 " -a <bool> : Use legacy aio, default %d\n"
1537                 " -S <bool> : Use sync IO (preadv2), default %d\n"
1538                 " -X <bool> : Use registered ring %d\n"
1539                 " -P <bool> : Automatically place on device home node %d\n"
1540                 " -u <bool> : Use nvme-passthrough I/O, default %d\n",
1541                 argv, DEPTH, BATCH_SUBMIT, BATCH_COMPLETE, BS, polled,
1542                 fixedbufs, dma_map, register_files, nthreads, !buffered, do_nop,
1543                 stats, runtime == 0 ? "unlimited" : runtime_str, random_io, aio,
1544                 use_sync, register_ring, numa_placement, pt);
1545         exit(status);
1546 }
1547
1548 static void read_tsc_rate(void)
1549 {
1550         char buffer[32];
1551         int fd, ret;
1552
1553         if (tsc_rate)
1554                 return;
1555
1556         fd = open(TSC_RATE_FILE, O_RDONLY);
1557         if (fd < 0)
1558                 return;
1559
1560         ret = read(fd, buffer, sizeof(buffer));
1561         if (ret < 0) {
1562                 close(fd);
1563                 return;
1564         }
1565
1566         tsc_rate = strtoul(buffer, NULL, 10);
1567         printf("Using TSC rate %luHz\n", tsc_rate);
1568         close(fd);
1569 }
1570
1571 static void write_tsc_rate(void)
1572 {
1573         char buffer[32];
1574         struct stat sb;
1575         int fd, ret;
1576
1577         if (!stat(TSC_RATE_FILE, &sb))
1578                 return;
1579
1580         fd = open(TSC_RATE_FILE, O_WRONLY | O_CREAT, 0644);
1581         if (fd < 0)
1582                 return;
1583
1584         memset(buffer, 0, sizeof(buffer));
1585         sprintf(buffer, "%lu", tsc_rate);
1586         ret = write(fd, buffer, strlen(buffer));
1587         if (ret < 0)
1588                 perror("write");
1589         close(fd);
1590 }
1591
1592 int main(int argc, char *argv[])
1593 {
1594         struct submitter *s;
1595         unsigned long done, calls, reap;
1596         int i, j, flags, fd, opt, threads_per_f, threads_rem = 0, nfiles;
1597         struct file f;
1598         void *ret;
1599
1600         if (!do_nop && argc < 2)
1601                 usage(argv[0], 1);
1602
1603         while ((opt = getopt(argc, argv, "d:s:c:b:p:B:F:n:N:O:t:T:a:r:D:R:X:S:P:u:h?")) != -1) {
1604                 switch (opt) {
1605                 case 'a':
1606                         aio = !!atoi(optarg);
1607                         break;
1608                 case 'd':
1609                         depth = atoi(optarg);
1610                         break;
1611                 case 's':
1612                         batch_submit = atoi(optarg);
1613                         if (!batch_submit)
1614                                 batch_submit = 1;
1615                         break;
1616                 case 'c':
1617                         batch_complete = atoi(optarg);
1618                         if (!batch_complete)
1619                                 batch_complete = 1;
1620                         break;
1621                 case 'b':
1622                         bs = atoi(optarg);
1623                         break;
1624                 case 'p':
1625                         polled = !!atoi(optarg);
1626                         break;
1627                 case 'B':
1628                         fixedbufs = !!atoi(optarg);
1629                         break;
1630                 case 'F':
1631                         register_files = !!atoi(optarg);
1632                         break;
1633                 case 'n':
1634                         nthreads = atoi(optarg);
1635                         if (!nthreads) {
1636                                 printf("Threads must be non-zero\n");
1637                                 usage(argv[0], 1);
1638                         }
1639                         break;
1640                 case 'N':
1641                         do_nop = !!atoi(optarg);
1642                         break;
1643                 case 'O':
1644                         buffered = !atoi(optarg);
1645                         break;
1646                 case 't':
1647 #ifndef ARCH_HAVE_CPU_CLOCK
1648                         fprintf(stderr, "Stats not supported on this CPU\n");
1649                         return 1;
1650 #endif
1651                         stats = !!atoi(optarg);
1652                         break;
1653                 case 'T':
1654 #ifndef ARCH_HAVE_CPU_CLOCK
1655                         fprintf(stderr, "Stats not supported on this CPU\n");
1656                         return 1;
1657 #endif
1658                         tsc_rate = strtoul(optarg, NULL, 10);
1659                         write_tsc_rate();
1660                         break;
1661                 case 'r':
1662                         runtime = atoi(optarg);
1663                         break;
1664                 case 'D':
1665                         dma_map = !!atoi(optarg);
1666                         break;
1667                 case 'R':
1668                         random_io = !!atoi(optarg);
1669                         break;
1670                 case 'X':
1671                         register_ring = !!atoi(optarg);
1672                         break;
1673                 case 'S':
1674 #ifdef CONFIG_PWRITEV2
1675                         use_sync = !!atoi(optarg);
1676 #else
1677                         fprintf(stderr, "preadv2 not supported\n");
1678                         exit(1);
1679 #endif
1680                         break;
1681                 case 'P':
1682                         numa_placement = !!atoi(optarg);
1683                         break;
1684                 case 'u':
1685                         pt = !!atoi(optarg);
1686                         break;
1687                 case 'h':
1688                 case '?':
1689                 default:
1690                         usage(argv[0], 0);
1691                         break;
1692                 }
1693         }
1694
1695         if (stats)
1696                 read_tsc_rate();
1697
1698         if (batch_complete > depth)
1699                 batch_complete = depth;
1700         if (batch_submit > depth)
1701                 batch_submit = depth;
1702         if (!fixedbufs && dma_map)
1703                 dma_map = 0;
1704
1705         submitter = calloc(nthreads, sizeof(*submitter) +
1706                                 roundup_pow2(depth) * sizeof(struct iovec));
1707         for (j = 0; j < nthreads; j++) {
1708                 s = get_submitter(j);
1709                 s->numa_node = -1;
1710                 s->index = j;
1711                 s->done = s->calls = s->reaps = 0;
1712         }
1713
1714         flags = O_RDONLY | O_NOATIME;
1715         if (!buffered)
1716                 flags |= O_DIRECT;
1717
1718         j = 0;
1719         i = optind;
1720         nfiles = argc - i;
1721         if (!do_nop) {
1722                 if (!nfiles) {
1723                         printf("No files specified\n");
1724                         usage(argv[0], 1);
1725                 }
1726                 threads_per_f = nthreads / nfiles;
1727                 /* make sure each thread gets assigned files */
1728                 if (threads_per_f == 0) {
1729                         threads_per_f = 1;
1730                 } else {
1731                         threads_rem = nthreads - threads_per_f * nfiles;
1732                 }
1733         }
1734         while (!do_nop && i < argc) {
1735                 int k, limit;
1736
1737                 memset(&f, 0, sizeof(f));
1738
1739                 fd = open(argv[i], flags);
1740                 if (fd < 0) {
1741                         perror("open");
1742                         return 1;
1743                 }
1744                 f.real_fd = fd;
1745                 if (get_file_size(&f)) {
1746                         printf("failed getting size of device/file\n");
1747                         return 1;
1748                 }
1749                 if (f.max_blocks <= 1) {
1750                         printf("Zero file/device size?\n");
1751                         return 1;
1752                 }
1753                 f.max_blocks--;
1754
1755                 limit = threads_per_f;
1756                 limit += threads_rem > 0 ? 1 : 0;
1757                 for (k = 0; k < limit; k++) {
1758                         s = get_submitter((j + k) % nthreads);
1759
1760                         if (s->nr_files == MAX_FDS) {
1761                                 printf("Max number of files (%d) reached\n", MAX_FDS);
1762                                 break;
1763                         }
1764
1765                         memcpy(&s->files[s->nr_files], &f, sizeof(f));
1766
1767                         if (numa_placement)
1768                                 detect_node(s, argv[i]);
1769
1770                         s->filename = argv[i];
1771                         s->nr_files++;
1772                 }
1773                 threads_rem--;
1774                 i++;
1775                 j += limit;
1776         }
1777
1778         arm_sig_int();
1779
1780         t_io_uring_page_size = sysconf(_SC_PAGESIZE);
1781         if (t_io_uring_page_size < 0)
1782                 t_io_uring_page_size = 4096;
1783
1784         for (j = 0; j < nthreads; j++) {
1785                 s = get_submitter(j);
1786                 if (use_sync)
1787                         pthread_create(&s->thread, NULL, submitter_sync_fn, s);
1788                 else if (!aio)
1789                         pthread_create(&s->thread, NULL, submitter_uring_fn, s);
1790 #ifdef CONFIG_LIBAIO
1791                 else
1792                         pthread_create(&s->thread, NULL, submitter_aio_fn, s);
1793 #endif
1794         }
1795
1796         reap = calls = done = 0;
1797         do {
1798                 unsigned long this_done = 0;
1799                 unsigned long this_reap = 0;
1800                 unsigned long this_call = 0;
1801                 unsigned long rpc = 0, ipc = 0;
1802                 unsigned long iops, bw;
1803
1804                 sleep(1);
1805                 if (runtime && !--runtime)
1806                         do_finish("timeout");
1807
1808                 /* don't print partial run, if interrupted by signal */
1809                 if (finish)
1810                         break;
1811
1812                 /* one second in to the run, enable stats */
1813                 if (stats)
1814                         stats_running = 1;
1815
1816                 for (j = 0; j < nthreads; j++) {
1817                         s = get_submitter(j);
1818                         this_done += s->done;
1819                         this_call += s->calls;
1820                         this_reap += s->reaps;
1821                 }
1822                 if (this_call - calls) {
1823                         rpc = (this_done - done) / (this_call - calls);
1824                         ipc = (this_reap - reap) / (this_call - calls);
1825                 } else
1826                         rpc = ipc = -1;
1827                 iops = this_done - done;
1828                 if (bs > 1048576)
1829                         bw = iops * (bs / 1048576);
1830                 else
1831                         bw = iops / (1048576 / bs);
1832                 if (iops > 1000000) {
1833                         double miops = (double) iops / 1000000.0;
1834                         printf("IOPS=%.2fM, ", miops);
1835                 } else if (iops > 100000) {
1836                         double kiops = (double) iops / 1000.0;
1837                         printf("IOPS=%.2fK, ", kiops);
1838                 } else {
1839                         printf("IOPS=%lu, ", iops);
1840                 }
1841                 max_iops = max(max_iops, iops);
1842                 if (!do_nop) {
1843                         if (bw > 2000) {
1844                                 double bw_g = (double) bw / 1000.0;
1845
1846                                 printf("BW=%.2fGiB/s, ", bw_g);
1847                         } else {
1848                                 printf("BW=%luMiB/s, ", bw);
1849                         }
1850                 }
1851                 printf("IOS/call=%ld/%ld\n", rpc, ipc);
1852                 done = this_done;
1853                 calls = this_call;
1854                 reap = this_reap;
1855         } while (!finish);
1856
1857         for (j = 0; j < nthreads; j++) {
1858                 s = get_submitter(j);
1859                 pthread_join(s->thread, &ret);
1860                 close(s->ring_fd);
1861
1862                 if (stats) {
1863                         unsigned long nr;
1864
1865                         printf("%d: Latency percentiles:\n", s->tid);
1866                         for (i = 0, nr = 0; i < PLAT_NR; i++)
1867                                 nr += s->plat[i];
1868                         show_clat_percentiles(s->plat, nr, 4);
1869                         free(s->clock_batch);
1870                         free(s->plat);
1871                 }
1872         }
1873
1874         free(submitter);
1875         return 0;
1876 }