Commit | Line | Data |
---|---|---|
c9fb4c5b JA |
1 | #include <stdio.h> |
2 | #include <errno.h> | |
3 | #include <assert.h> | |
4 | #include <stdlib.h> | |
5 | #include <stddef.h> | |
6 | #include <signal.h> | |
7 | #include <inttypes.h> | |
932131c9 | 8 | #include <math.h> |
c9fb4c5b | 9 | |
256714ea JA |
10 | #ifdef CONFIG_LIBAIO |
11 | #include <libaio.h> | |
12 | #endif | |
13 | ||
4b9e13dc JA |
14 | #ifdef CONFIG_LIBNUMA |
15 | #include <numa.h> | |
16 | #endif | |
17 | ||
c9fb4c5b JA |
18 | #include <sys/types.h> |
19 | #include <sys/stat.h> | |
20 | #include <sys/ioctl.h> | |
21 | #include <sys/syscall.h> | |
22 | #include <sys/resource.h> | |
c3e2fc25 | 23 | #include <sys/mman.h> |
e31b8288 | 24 | #include <sys/uio.h> |
c9fb4c5b JA |
25 | #include <linux/fs.h> |
26 | #include <fcntl.h> | |
27 | #include <unistd.h> | |
c9fb4c5b JA |
28 | #include <string.h> |
29 | #include <pthread.h> | |
30 | #include <sched.h> | |
31 | ||
74efb029 | 32 | #include "../arch/arch.h" |
a5a2429e | 33 | #include "../os/os.h" |
57fa61f0 | 34 | #include "../lib/types.h" |
932131c9 | 35 | #include "../lib/roundup.h" |
9eff5320 | 36 | #include "../lib/rand.h" |
932131c9 | 37 | #include "../minmax.h" |
f3e769a4 | 38 | #include "../os/linux/io_uring.h" |
7d04588a | 39 | #include "../engines/nvme.h" |
ac122fea | 40 | |
e31b8288 | 41 | struct io_sq_ring { |
e2239016 JA |
42 | unsigned *head; |
43 | unsigned *tail; | |
44 | unsigned *ring_mask; | |
45 | unsigned *ring_entries; | |
ce1705de | 46 | unsigned *flags; |
e2239016 | 47 | unsigned *array; |
c9fb4c5b JA |
48 | }; |
49 | ||
e31b8288 | 50 | struct io_cq_ring { |
e2239016 JA |
51 | unsigned *head; |
52 | unsigned *tail; | |
53 | unsigned *ring_mask; | |
54 | unsigned *ring_entries; | |
f0403f94 | 55 | struct io_uring_cqe *cqes; |
c9fb4c5b JA |
56 | }; |
57 | ||
701d1277 | 58 | #define DEPTH 128 |
2e7888ef JA |
59 | #define BATCH_SUBMIT 32 |
60 | #define BATCH_COMPLETE 32 | |
c9fb4c5b JA |
61 | #define BS 4096 |
62 | ||
a7086591 JA |
63 | #define MAX_FDS 16 |
64 | ||
c3e2fc25 | 65 | static unsigned sq_ring_mask, cq_ring_mask; |
e39c34dc | 66 | |
a7086591 JA |
67 | struct file { |
68 | unsigned long max_blocks; | |
beda9d8d JA |
69 | unsigned long max_size; |
70 | unsigned long cur_off; | |
701d1277 | 71 | unsigned pending_ios; |
7d04588a AG |
72 | unsigned int nsid; /* nsid field required for nvme-passthrough */ |
73 | unsigned int lba_shift; /* lba_shift field required for nvme-passthrough */ | |
48e698fa JA |
74 | int real_fd; |
75 | int fixed_fd; | |
932131c9 | 76 | int fileno; |
a7086591 JA |
77 | }; |
78 | ||
932131c9 JA |
79 | #define PLAT_BITS 6 |
80 | #define PLAT_VAL (1 << PLAT_BITS) | |
81 | #define PLAT_GROUP_NR 29 | |
82 | #define PLAT_NR (PLAT_GROUP_NR * PLAT_VAL) | |
83 | ||
c9fb4c5b JA |
84 | struct submitter { |
85 | pthread_t thread; | |
f310970e | 86 | int ring_fd; |
ca8c91c5 | 87 | int enter_ring_fd; |
54319661 | 88 | int index; |
e31b8288 | 89 | struct io_sq_ring sq_ring; |
f0403f94 | 90 | struct io_uring_sqe *sqes; |
e31b8288 | 91 | struct io_cq_ring cq_ring; |
c9fb4c5b | 92 | int inflight; |
932131c9 | 93 | int tid; |
c9fb4c5b JA |
94 | unsigned long reaps; |
95 | unsigned long done; | |
96 | unsigned long calls; | |
97 | volatile int finish; | |
701d1277 | 98 | |
48e698fa JA |
99 | __s32 *fds; |
100 | ||
9eff5320 JA |
101 | struct taus258_state rand_state; |
102 | ||
932131c9 JA |
103 | unsigned long *clock_batch; |
104 | int clock_index; | |
105 | unsigned long *plat; | |
106 | ||
256714ea JA |
107 | #ifdef CONFIG_LIBAIO |
108 | io_context_t aio_ctx; | |
109 | #endif | |
110 | ||
4b9e13dc JA |
111 | int numa_node; |
112 | const char *filename; | |
113 | ||
a7086591 JA |
114 | struct file files[MAX_FDS]; |
115 | unsigned nr_files; | |
116 | unsigned cur_file; | |
e39863e3 | 117 | struct iovec iovecs[]; |
c9fb4c5b JA |
118 | }; |
119 | ||
e39863e3 | 120 | static struct submitter *submitter; |
c9fb4c5b | 121 | static volatile int finish; |
52479d8b | 122 | static int stats_running; |
16d25711 | 123 | static unsigned long max_iops; |
c409e4c2 | 124 | static long t_io_uring_page_size; |
c9fb4c5b | 125 | |
e39863e3 KB |
126 | static int depth = DEPTH; |
127 | static int batch_submit = BATCH_SUBMIT; | |
128 | static int batch_complete = BATCH_COMPLETE; | |
5bd526f2 | 129 | static int bs = BS; |
f0403f94 | 130 | static int polled = 1; /* use IO polling */ |
701d1277 | 131 | static int fixedbufs = 1; /* use fixed user buffers */ |
a71ad043 | 132 | static int dma_map; /* pre-map DMA buffers */ |
8c5fa755 | 133 | static int register_files = 1; /* use fixed files */ |
f0403f94 | 134 | static int buffered = 0; /* use buffered IO, not O_DIRECT */ |
3d7d00a3 JA |
135 | static int sq_thread_poll = 0; /* use kernel submission/poller thread */ |
136 | static int sq_thread_cpu = -1; /* pin above thread to this CPU */ | |
8025517d | 137 | static int do_nop = 0; /* no-op SQ ring commands */ |
54319661 | 138 | static int nthreads = 1; |
932131c9 | 139 | static int stats = 0; /* generate IO stats */ |
256714ea | 140 | static int aio = 0; /* use libaio */ |
beda9d8d JA |
141 | static int runtime = 0; /* runtime */ |
142 | static int random_io = 1; /* random or sequential IO */ | |
ca8c91c5 | 143 | static int register_ring = 1; /* register ring */ |
379406bc | 144 | static int use_sync = 0; /* use preadv2 */ |
4b9e13dc | 145 | static int numa_placement = 0; /* set to node of device */ |
7d04588a | 146 | static int pt = 0; /* passthrough I/O or not */ |
256714ea | 147 | |
932131c9 | 148 | static unsigned long tsc_rate; |
c9fb4c5b | 149 | |
203e4c26 JA |
150 | #define TSC_RATE_FILE "tsc-rate" |
151 | ||
84106576 | 152 | static int vectored = 1; |
b3915995 | 153 | |
932131c9 | 154 | static float plist[] = { 1.0, 5.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, |
ad45a465 | 155 | 80.0, 90.0, 95.0, 99.0, 99.5, 99.9, 99.95, 99.99 }; |
932131c9 JA |
156 | static int plist_len = 17; |
157 | ||
a71ad043 | 158 | #ifndef IORING_REGISTER_MAP_BUFFERS |
8538256c | 159 | #define IORING_REGISTER_MAP_BUFFERS 22 |
a71ad043 JA |
160 | struct io_uring_map_buffers { |
161 | __s32 fd; | |
162 | __u32 buf_start; | |
163 | __u32 buf_end; | |
702b0be2 JA |
164 | __u32 flags; |
165 | __u64 rsvd[2]; | |
a71ad043 JA |
166 | }; |
167 | #endif | |
168 | ||
7d04588a AG |
169 | static int nvme_identify(int fd, __u32 nsid, enum nvme_identify_cns cns, |
170 | enum nvme_csi csi, void *data) | |
171 | { | |
172 | struct nvme_passthru_cmd cmd = { | |
173 | .opcode = nvme_admin_identify, | |
174 | .nsid = nsid, | |
175 | .addr = (__u64)(uintptr_t)data, | |
176 | .data_len = NVME_IDENTIFY_DATA_SIZE, | |
177 | .cdw10 = cns, | |
178 | .cdw11 = csi << NVME_IDENTIFY_CSI_SHIFT, | |
179 | .timeout_ms = NVME_DEFAULT_IOCTL_TIMEOUT, | |
180 | }; | |
181 | ||
182 | return ioctl(fd, NVME_IOCTL_ADMIN_CMD, &cmd); | |
183 | } | |
184 | ||
185 | static int nvme_get_info(int fd, __u32 *nsid, __u32 *lba_sz, __u64 *nlba) | |
186 | { | |
187 | struct nvme_id_ns ns; | |
188 | int namespace_id; | |
189 | int err; | |
190 | ||
191 | namespace_id = ioctl(fd, NVME_IOCTL_ID); | |
192 | if (namespace_id < 0) { | |
193 | fprintf(stderr, "error failed to fetch namespace-id\n"); | |
194 | close(fd); | |
195 | return -errno; | |
196 | } | |
197 | ||
198 | /* | |
199 | * Identify namespace to get namespace-id, namespace size in LBA's | |
200 | * and LBA data size. | |
201 | */ | |
202 | err = nvme_identify(fd, namespace_id, NVME_IDENTIFY_CNS_NS, | |
203 | NVME_CSI_NVM, &ns); | |
204 | if (err) { | |
205 | fprintf(stderr, "error failed to fetch identify namespace\n"); | |
206 | close(fd); | |
207 | return err; | |
208 | } | |
209 | ||
210 | *nsid = namespace_id; | |
211 | *lba_sz = 1 << ns.lbaf[(ns.flbas & 0x0f)].ds; | |
212 | *nlba = ns.nsze; | |
213 | ||
214 | return 0; | |
215 | } | |
216 | ||
932131c9 JA |
217 | static unsigned long cycles_to_nsec(unsigned long cycles) |
218 | { | |
219 | uint64_t val; | |
220 | ||
221 | if (!tsc_rate) | |
222 | return cycles; | |
223 | ||
224 | val = cycles * 1000000000ULL; | |
225 | return val / tsc_rate; | |
226 | } | |
227 | ||
228 | static unsigned long plat_idx_to_val(unsigned int idx) | |
229 | { | |
230 | unsigned int error_bits; | |
231 | unsigned long k, base; | |
232 | ||
233 | assert(idx < PLAT_NR); | |
234 | ||
235 | /* MSB <= (PLAT_BITS-1), cannot be rounded off. Use | |
236 | * all bits of the sample as index */ | |
237 | if (idx < (PLAT_VAL << 1)) | |
238 | return cycles_to_nsec(idx); | |
239 | ||
240 | /* Find the group and compute the minimum value of that group */ | |
241 | error_bits = (idx >> PLAT_BITS) - 1; | |
242 | base = ((unsigned long) 1) << (error_bits + PLAT_BITS); | |
243 | ||
244 | /* Find its bucket number of the group */ | |
245 | k = idx % PLAT_VAL; | |
246 | ||
247 | /* Return the mean of the range of the bucket */ | |
248 | return cycles_to_nsec(base + ((k + 0.5) * (1 << error_bits))); | |
249 | } | |
250 | ||
c409e4c2 AG |
251 | unsigned int calculate_clat_percentiles(unsigned long *io_u_plat, |
252 | unsigned long nr, unsigned long **output, | |
253 | unsigned long *maxv, unsigned long *minv) | |
932131c9 JA |
254 | { |
255 | unsigned long sum = 0; | |
256 | unsigned int len = plist_len, i, j = 0; | |
257 | unsigned long *ovals = NULL; | |
258 | bool is_last; | |
259 | ||
bb209d68 | 260 | *minv = -1UL; |
932131c9 JA |
261 | *maxv = 0; |
262 | ||
263 | ovals = malloc(len * sizeof(*ovals)); | |
264 | if (!ovals) | |
265 | return 0; | |
266 | ||
267 | /* | |
268 | * Calculate bucket values, note down max and min values | |
269 | */ | |
270 | is_last = false; | |
271 | for (i = 0; i < PLAT_NR && !is_last; i++) { | |
272 | sum += io_u_plat[i]; | |
273 | while (sum >= ((long double) plist[j] / 100.0 * nr)) { | |
274 | assert(plist[j] <= 100.0); | |
275 | ||
276 | ovals[j] = plat_idx_to_val(i); | |
277 | if (ovals[j] < *minv) | |
278 | *minv = ovals[j]; | |
279 | if (ovals[j] > *maxv) | |
280 | *maxv = ovals[j]; | |
281 | ||
282 | is_last = (j == len - 1) != 0; | |
283 | if (is_last) | |
284 | break; | |
285 | ||
286 | j++; | |
287 | } | |
288 | } | |
289 | ||
290 | if (!is_last) | |
291 | fprintf(stderr, "error calculating latency percentiles\n"); | |
292 | ||
293 | *output = ovals; | |
294 | return len; | |
295 | } | |
296 | ||
297 | static void show_clat_percentiles(unsigned long *io_u_plat, unsigned long nr, | |
298 | unsigned int precision) | |
299 | { | |
300 | unsigned int divisor, len, i, j = 0; | |
301 | unsigned long minv, maxv; | |
302 | unsigned long *ovals; | |
303 | int per_line, scale_down, time_width; | |
304 | bool is_last; | |
305 | char fmt[32]; | |
306 | ||
c409e4c2 | 307 | len = calculate_clat_percentiles(io_u_plat, nr, &ovals, &maxv, &minv); |
932131c9 JA |
308 | if (!len || !ovals) |
309 | goto out; | |
310 | ||
311 | if (!tsc_rate) { | |
312 | scale_down = 0; | |
313 | divisor = 1; | |
314 | printf(" percentiles (tsc ticks):\n |"); | |
315 | } else if (minv > 2000 && maxv > 99999) { | |
316 | scale_down = 1; | |
317 | divisor = 1000; | |
318 | printf(" percentiles (usec):\n |"); | |
319 | } else { | |
320 | scale_down = 0; | |
321 | divisor = 1; | |
322 | printf(" percentiles (nsec):\n |"); | |
323 | } | |
324 | ||
325 | time_width = max(5, (int) (log10(maxv / divisor) + 1)); | |
326 | snprintf(fmt, sizeof(fmt), " %%%u.%ufth=[%%%dllu]%%c", precision + 3, | |
327 | precision, time_width); | |
328 | /* fmt will be something like " %5.2fth=[%4llu]%c" */ | |
329 | per_line = (80 - 7) / (precision + 10 + time_width); | |
330 | ||
331 | for (j = 0; j < len; j++) { | |
332 | /* for formatting */ | |
333 | if (j != 0 && (j % per_line) == 0) | |
334 | printf(" |"); | |
335 | ||
336 | /* end of the list */ | |
337 | is_last = (j == len - 1) != 0; | |
338 | ||
339 | for (i = 0; i < scale_down; i++) | |
340 | ovals[j] = (ovals[j] + 999) / 1000; | |
341 | ||
342 | printf(fmt, plist[j], ovals[j], is_last ? '\n' : ','); | |
343 | ||
344 | if (is_last) | |
345 | break; | |
346 | ||
347 | if ((j % per_line) == per_line - 1) /* for formatting */ | |
348 | printf("\n"); | |
349 | } | |
350 | ||
351 | out: | |
352 | free(ovals); | |
353 | } | |
354 | ||
b65c1fc0 | 355 | #ifdef ARCH_HAVE_CPU_CLOCK |
932131c9 JA |
356 | static unsigned int plat_val_to_idx(unsigned long val) |
357 | { | |
358 | unsigned int msb, error_bits, base, offset, idx; | |
359 | ||
360 | /* Find MSB starting from bit 0 */ | |
361 | if (val == 0) | |
362 | msb = 0; | |
363 | else | |
364 | msb = (sizeof(val)*8) - __builtin_clzll(val) - 1; | |
365 | ||
366 | /* | |
367 | * MSB <= (PLAT_BITS-1), cannot be rounded off. Use | |
368 | * all bits of the sample as index | |
369 | */ | |
370 | if (msb <= PLAT_BITS) | |
371 | return val; | |
372 | ||
373 | /* Compute the number of error bits to discard*/ | |
374 | error_bits = msb - PLAT_BITS; | |
375 | ||
376 | /* Compute the number of buckets before the group */ | |
377 | base = (error_bits + 1) << PLAT_BITS; | |
378 | ||
379 | /* | |
380 | * Discard the error bits and apply the mask to find the | |
381 | * index for the buckets in the group | |
382 | */ | |
383 | offset = (PLAT_VAL - 1) & (val >> error_bits); | |
384 | ||
385 | /* Make sure the index does not exceed (array size - 1) */ | |
386 | idx = (base + offset) < (PLAT_NR - 1) ? | |
387 | (base + offset) : (PLAT_NR - 1); | |
388 | ||
389 | return idx; | |
390 | } | |
b65c1fc0 | 391 | #endif |
932131c9 JA |
392 | |
393 | static void add_stat(struct submitter *s, int clock_index, int nr) | |
394 | { | |
395 | #ifdef ARCH_HAVE_CPU_CLOCK | |
396 | unsigned long cycles; | |
397 | unsigned int pidx; | |
398 | ||
265697fc JA |
399 | if (!s->finish && clock_index) { |
400 | cycles = get_cpu_clock(); | |
401 | cycles -= s->clock_batch[clock_index]; | |
402 | pidx = plat_val_to_idx(cycles); | |
403 | s->plat[pidx] += nr; | |
404 | } | |
932131c9 JA |
405 | #endif |
406 | } | |
407 | ||
a71ad043 JA |
408 | static int io_uring_map_buffers(struct submitter *s) |
409 | { | |
410 | struct io_uring_map_buffers map = { | |
411 | .fd = s->files[0].real_fd, | |
a71ad043 | 412 | .buf_end = depth, |
a71ad043 JA |
413 | }; |
414 | ||
415 | if (do_nop) | |
416 | return 0; | |
d49885aa JA |
417 | if (s->nr_files > 1) |
418 | fprintf(stdout, "Mapping buffers may not work with multiple files\n"); | |
a71ad043 JA |
419 | |
420 | return syscall(__NR_io_uring_register, s->ring_fd, | |
421 | IORING_REGISTER_MAP_BUFFERS, &map, 1); | |
422 | } | |
423 | ||
2ea53ca3 | 424 | static int io_uring_register_buffers(struct submitter *s) |
c9fb4c5b | 425 | { |
8025517d JA |
426 | if (do_nop) |
427 | return 0; | |
428 | ||
bfed648c | 429 | return syscall(__NR_io_uring_register, s->ring_fd, |
55845033 | 430 | IORING_REGISTER_BUFFERS, s->iovecs, roundup_pow2(depth)); |
2ea53ca3 JA |
431 | } |
432 | ||
a7abc9fb JA |
433 | static int io_uring_register_files(struct submitter *s) |
434 | { | |
48e698fa | 435 | int i; |
a7abc9fb | 436 | |
8025517d JA |
437 | if (do_nop) |
438 | return 0; | |
439 | ||
48e698fa JA |
440 | s->fds = calloc(s->nr_files, sizeof(__s32)); |
441 | for (i = 0; i < s->nr_files; i++) { | |
442 | s->fds[i] = s->files[i].real_fd; | |
443 | s->files[i].fixed_fd = i; | |
444 | } | |
a7abc9fb | 445 | |
bfed648c | 446 | return syscall(__NR_io_uring_register, s->ring_fd, |
919850d2 | 447 | IORING_REGISTER_FILES, s->fds, s->nr_files); |
a7abc9fb JA |
448 | } |
449 | ||
2ea53ca3 JA |
450 | static int io_uring_setup(unsigned entries, struct io_uring_params *p) |
451 | { | |
2be18f6b JA |
452 | int ret; |
453 | ||
1db268db JA |
454 | /* |
455 | * Clamp CQ ring size at our SQ ring size, we don't need more entries | |
456 | * than that. | |
457 | */ | |
458 | p->flags |= IORING_SETUP_CQSIZE; | |
459 | p->cq_entries = entries; | |
460 | ||
2be18f6b JA |
461 | p->flags |= IORING_SETUP_COOP_TASKRUN; |
462 | p->flags |= IORING_SETUP_SINGLE_ISSUER; | |
463 | p->flags |= IORING_SETUP_DEFER_TASKRUN; | |
464 | retry: | |
465 | ret = syscall(__NR_io_uring_setup, entries, p); | |
466 | if (!ret) | |
467 | return 0; | |
468 | ||
469 | if (errno == EINVAL && p->flags & IORING_SETUP_COOP_TASKRUN) { | |
470 | p->flags &= ~IORING_SETUP_COOP_TASKRUN; | |
471 | goto retry; | |
472 | } | |
473 | if (errno == EINVAL && p->flags & IORING_SETUP_SINGLE_ISSUER) { | |
474 | p->flags &= ~IORING_SETUP_SINGLE_ISSUER; | |
475 | goto retry; | |
476 | } | |
477 | if (errno == EINVAL && p->flags & IORING_SETUP_DEFER_TASKRUN) { | |
478 | p->flags &= ~IORING_SETUP_DEFER_TASKRUN; | |
479 | goto retry; | |
480 | } | |
481 | ||
482 | return ret; | |
c9fb4c5b JA |
483 | } |
484 | ||
b3915995 JA |
485 | static void io_uring_probe(int fd) |
486 | { | |
487 | struct io_uring_probe *p; | |
488 | int ret; | |
489 | ||
490 | p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op)); | |
491 | if (!p) | |
492 | return; | |
493 | ||
494 | memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op)); | |
495 | ret = syscall(__NR_io_uring_register, fd, IORING_REGISTER_PROBE, p, 256); | |
496 | if (ret < 0) | |
497 | goto out; | |
498 | ||
499 | if (IORING_OP_READ > p->ops_len) | |
500 | goto out; | |
501 | ||
502 | if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED)) | |
84106576 | 503 | vectored = 0; |
b3915995 JA |
504 | out: |
505 | free(p); | |
506 | } | |
507 | ||
c3e2fc25 JA |
508 | static int io_uring_enter(struct submitter *s, unsigned int to_submit, |
509 | unsigned int min_complete, unsigned int flags) | |
c9fb4c5b | 510 | { |
ca8c91c5 JA |
511 | if (register_ring) |
512 | flags |= IORING_ENTER_REGISTERED_RING; | |
c377f4f8 | 513 | #ifdef FIO_ARCH_HAS_SYSCALL |
ca8c91c5 | 514 | return __do_syscall6(__NR_io_uring_enter, s->enter_ring_fd, to_submit, |
c377f4f8 JA |
515 | min_complete, flags, NULL, 0); |
516 | #else | |
ca8c91c5 JA |
517 | return syscall(__NR_io_uring_enter, s->enter_ring_fd, to_submit, |
518 | min_complete, flags, NULL, 0); | |
c377f4f8 | 519 | #endif |
c9fb4c5b JA |
520 | } |
521 | ||
701d1277 JA |
522 | static unsigned file_depth(struct submitter *s) |
523 | { | |
e39863e3 | 524 | return (depth + s->nr_files - 1) / s->nr_files; |
701d1277 JA |
525 | } |
526 | ||
501565a1 JA |
527 | static unsigned long long get_offset(struct submitter *s, struct file *f) |
528 | { | |
529 | unsigned long long offset; | |
530 | long r; | |
531 | ||
532 | if (random_io) { | |
533 | r = __rand64(&s->rand_state); | |
534 | offset = (r % (f->max_blocks - 1)) * bs; | |
535 | } else { | |
536 | offset = f->cur_off; | |
537 | f->cur_off += bs; | |
538 | if (f->cur_off + bs > f->max_size) | |
539 | f->cur_off = 0; | |
540 | } | |
541 | ||
542 | return offset; | |
543 | } | |
544 | ||
a7086591 | 545 | static void init_io(struct submitter *s, unsigned index) |
c9fb4c5b | 546 | { |
f0403f94 | 547 | struct io_uring_sqe *sqe = &s->sqes[index]; |
a7086591 | 548 | struct file *f; |
c9fb4c5b | 549 | |
8025517d JA |
550 | if (do_nop) { |
551 | sqe->opcode = IORING_OP_NOP; | |
552 | return; | |
553 | } | |
554 | ||
701d1277 JA |
555 | if (s->nr_files == 1) { |
556 | f = &s->files[0]; | |
557 | } else { | |
558 | f = &s->files[s->cur_file]; | |
559 | if (f->pending_ios >= file_depth(s)) { | |
560 | s->cur_file++; | |
561 | if (s->cur_file == s->nr_files) | |
562 | s->cur_file = 0; | |
93d1811c | 563 | f = &s->files[s->cur_file]; |
701d1277 JA |
564 | } |
565 | } | |
566 | f->pending_ios++; | |
a7086591 | 567 | |
8c5fa755 JA |
568 | if (register_files) { |
569 | sqe->flags = IOSQE_FIXED_FILE; | |
570 | sqe->fd = f->fixed_fd; | |
571 | } else { | |
572 | sqe->flags = 0; | |
573 | sqe->fd = f->real_fd; | |
574 | } | |
f0403f94 | 575 | if (fixedbufs) { |
48e698fa | 576 | sqe->opcode = IORING_OP_READ_FIXED; |
919850d2 | 577 | sqe->addr = (unsigned long) s->iovecs[index].iov_base; |
5bd526f2 | 578 | sqe->len = bs; |
2ea53ca3 | 579 | sqe->buf_index = index; |
84106576 | 580 | } else if (!vectored) { |
b3915995 JA |
581 | sqe->opcode = IORING_OP_READ; |
582 | sqe->addr = (unsigned long) s->iovecs[index].iov_base; | |
583 | sqe->len = bs; | |
584 | sqe->buf_index = 0; | |
84106576 JA |
585 | } else { |
586 | sqe->opcode = IORING_OP_READV; | |
587 | sqe->addr = (unsigned long) &s->iovecs[index]; | |
588 | sqe->len = 1; | |
589 | sqe->buf_index = 0; | |
f0403f94 | 590 | } |
f0403f94 | 591 | sqe->ioprio = 0; |
501565a1 | 592 | sqe->off = get_offset(s, f); |
932131c9 | 593 | sqe->user_data = (unsigned long) f->fileno; |
52479d8b | 594 | if (stats && stats_running) |
bb209d68 | 595 | sqe->user_data |= ((uint64_t)s->clock_index << 32); |
c9fb4c5b JA |
596 | } |
597 | ||
7d04588a AG |
598 | static void init_io_pt(struct submitter *s, unsigned index) |
599 | { | |
600 | struct io_uring_sqe *sqe = &s->sqes[index << 1]; | |
601 | unsigned long offset; | |
602 | struct file *f; | |
603 | struct nvme_uring_cmd *cmd; | |
604 | unsigned long long slba; | |
605 | unsigned long long nlb; | |
606 | long r; | |
607 | ||
608 | if (s->nr_files == 1) { | |
609 | f = &s->files[0]; | |
610 | } else { | |
611 | f = &s->files[s->cur_file]; | |
612 | if (f->pending_ios >= file_depth(s)) { | |
613 | s->cur_file++; | |
614 | if (s->cur_file == s->nr_files) | |
615 | s->cur_file = 0; | |
616 | f = &s->files[s->cur_file]; | |
617 | } | |
618 | } | |
619 | f->pending_ios++; | |
620 | ||
621 | if (random_io) { | |
622 | r = __rand64(&s->rand_state); | |
623 | offset = (r % (f->max_blocks - 1)) * bs; | |
624 | } else { | |
625 | offset = f->cur_off; | |
626 | f->cur_off += bs; | |
627 | if (f->cur_off + bs > f->max_size) | |
628 | f->cur_off = 0; | |
629 | } | |
630 | ||
631 | if (register_files) { | |
632 | sqe->fd = f->fixed_fd; | |
633 | sqe->flags = IOSQE_FIXED_FILE; | |
634 | } else { | |
635 | sqe->fd = f->real_fd; | |
636 | sqe->flags = 0; | |
637 | } | |
638 | sqe->opcode = IORING_OP_URING_CMD; | |
639 | sqe->user_data = (unsigned long) f->fileno; | |
640 | if (stats) | |
9ce84fbd | 641 | sqe->user_data |= ((__u64) s->clock_index << 32ULL); |
7d04588a AG |
642 | sqe->cmd_op = NVME_URING_CMD_IO; |
643 | slba = offset >> f->lba_shift; | |
644 | nlb = (bs >> f->lba_shift) - 1; | |
645 | cmd = (struct nvme_uring_cmd *)&sqe->cmd; | |
646 | /* cdw10 and cdw11 represent starting slba*/ | |
647 | cmd->cdw10 = slba & 0xffffffff; | |
648 | cmd->cdw11 = slba >> 32; | |
649 | /* cdw12 represent number of lba to be read*/ | |
650 | cmd->cdw12 = nlb; | |
651 | cmd->addr = (unsigned long) s->iovecs[index].iov_base; | |
652 | cmd->data_len = bs; | |
021ce718 JA |
653 | if (fixedbufs) { |
654 | sqe->uring_cmd_flags = IORING_URING_CMD_FIXED; | |
655 | sqe->buf_index = index; | |
656 | } | |
7d04588a AG |
657 | cmd->nsid = f->nsid; |
658 | cmd->opcode = 2; | |
659 | } | |
660 | ||
256714ea | 661 | static int prep_more_ios_uring(struct submitter *s, int max_ios) |
c9fb4c5b | 662 | { |
e31b8288 | 663 | struct io_sq_ring *ring = &s->sq_ring; |
e2239016 | 664 | unsigned index, tail, next_tail, prepped = 0; |
6b6f52b9 | 665 | unsigned int head = atomic_load_acquire(ring->head); |
c9fb4c5b | 666 | |
c3e2fc25 | 667 | next_tail = tail = *ring->tail; |
c9fb4c5b JA |
668 | do { |
669 | next_tail++; | |
6b6f52b9 | 670 | if (next_tail == head) |
c9fb4c5b JA |
671 | break; |
672 | ||
e39c34dc | 673 | index = tail & sq_ring_mask; |
7d04588a AG |
674 | if (pt) |
675 | init_io_pt(s, index); | |
676 | else | |
677 | init_io(s, index); | |
c9fb4c5b JA |
678 | prepped++; |
679 | tail = next_tail; | |
680 | } while (prepped < max_ios); | |
681 | ||
fc2dc21b JA |
682 | if (prepped) |
683 | atomic_store_release(ring->tail, tail); | |
c9fb4c5b JA |
684 | return prepped; |
685 | } | |
686 | ||
a7086591 | 687 | static int get_file_size(struct file *f) |
c9fb4c5b JA |
688 | { |
689 | struct stat st; | |
690 | ||
48e698fa | 691 | if (fstat(f->real_fd, &st) < 0) |
c9fb4c5b | 692 | return -1; |
7d04588a AG |
693 | if (pt) { |
694 | __u64 nlba; | |
695 | __u32 lbs; | |
696 | int ret; | |
697 | ||
698 | if (!S_ISCHR(st.st_mode)) { | |
699 | fprintf(stderr, "passthrough works with only nvme-ns " | |
700 | "generic devices (/dev/ngXnY)\n"); | |
701 | return -1; | |
702 | } | |
703 | ret = nvme_get_info(f->real_fd, &f->nsid, &lbs, &nlba); | |
704 | if (ret) | |
705 | return -1; | |
706 | if ((bs % lbs) != 0) { | |
707 | printf("error: bs:%d should be a multiple logical_block_size:%d\n", | |
708 | bs, lbs); | |
709 | return -1; | |
710 | } | |
711 | f->max_blocks = nlba / bs; | |
712 | f->max_size = nlba; | |
713 | f->lba_shift = ilog2(lbs); | |
714 | return 0; | |
715 | } else if (S_ISBLK(st.st_mode)) { | |
c9fb4c5b JA |
716 | unsigned long long bytes; |
717 | ||
48e698fa | 718 | if (ioctl(f->real_fd, BLKGETSIZE64, &bytes) != 0) |
c9fb4c5b JA |
719 | return -1; |
720 | ||
5bd526f2 | 721 | f->max_blocks = bytes / bs; |
beda9d8d | 722 | f->max_size = bytes; |
c9fb4c5b JA |
723 | return 0; |
724 | } else if (S_ISREG(st.st_mode)) { | |
5bd526f2 | 725 | f->max_blocks = st.st_size / bs; |
beda9d8d | 726 | f->max_size = st.st_size; |
c9fb4c5b JA |
727 | return 0; |
728 | } | |
729 | ||
730 | return -1; | |
731 | } | |
732 | ||
256714ea | 733 | static int reap_events_uring(struct submitter *s) |
c9fb4c5b | 734 | { |
e31b8288 | 735 | struct io_cq_ring *ring = &s->cq_ring; |
f0403f94 | 736 | struct io_uring_cqe *cqe; |
e2239016 | 737 | unsigned head, reaped = 0; |
ab85494f | 738 | int last_idx = -1, stat_nr = 0; |
c9fb4c5b | 739 | |
c3e2fc25 | 740 | head = *ring->head; |
c9fb4c5b | 741 | do { |
701d1277 JA |
742 | struct file *f; |
743 | ||
679d8352 | 744 | read_barrier(); |
fc2dc21b | 745 | if (head == atomic_load_acquire(ring->tail)) |
c9fb4c5b | 746 | break; |
f0403f94 | 747 | cqe = &ring->cqes[head & cq_ring_mask]; |
8025517d | 748 | if (!do_nop) { |
932131c9 JA |
749 | int fileno = cqe->user_data & 0xffffffff; |
750 | ||
751 | f = &s->files[fileno]; | |
8025517d | 752 | f->pending_ios--; |
5bd526f2 | 753 | if (cqe->res != bs) { |
8025517d | 754 | printf("io: unexpected ret=%d\n", cqe->res); |
154a9582 | 755 | if (polled && cqe->res == -EOPNOTSUPP) |
8066f6b6 | 756 | printf("Your filesystem/driver/kernel doesn't support polled IO\n"); |
8025517d JA |
757 | return -1; |
758 | } | |
c9fb4c5b | 759 | } |
932131c9 JA |
760 | if (stats) { |
761 | int clock_index = cqe->user_data >> 32; | |
762 | ||
ab85494f JA |
763 | if (last_idx != clock_index) { |
764 | if (last_idx != -1) { | |
765 | add_stat(s, last_idx, stat_nr); | |
766 | stat_nr = 0; | |
767 | } | |
768 | last_idx = clock_index; | |
d4af2ece JA |
769 | } |
770 | stat_nr++; | |
932131c9 | 771 | } |
c9fb4c5b JA |
772 | reaped++; |
773 | head++; | |
c9fb4c5b JA |
774 | } while (1); |
775 | ||
ab85494f JA |
776 | if (stat_nr) |
777 | add_stat(s, last_idx, stat_nr); | |
778 | ||
fc2dc21b JA |
779 | if (reaped) { |
780 | s->inflight -= reaped; | |
781 | atomic_store_release(ring->head, head); | |
782 | } | |
c9fb4c5b JA |
783 | return reaped; |
784 | } | |
785 | ||
7d04588a AG |
786 | static int reap_events_uring_pt(struct submitter *s) |
787 | { | |
788 | struct io_cq_ring *ring = &s->cq_ring; | |
789 | struct io_uring_cqe *cqe; | |
790 | unsigned head, reaped = 0; | |
791 | int last_idx = -1, stat_nr = 0; | |
792 | unsigned index; | |
793 | int fileno; | |
794 | ||
795 | head = *ring->head; | |
796 | do { | |
797 | struct file *f; | |
798 | ||
799 | read_barrier(); | |
800 | if (head == atomic_load_acquire(ring->tail)) | |
801 | break; | |
802 | index = head & cq_ring_mask; | |
803 | cqe = &ring->cqes[index << 1]; | |
804 | fileno = cqe->user_data & 0xffffffff; | |
805 | f = &s->files[fileno]; | |
806 | f->pending_ios--; | |
807 | ||
808 | if (cqe->res != 0) { | |
809 | printf("io: unexpected ret=%d\n", cqe->res); | |
810 | if (polled && cqe->res == -EINVAL) | |
811 | printf("passthrough doesn't support polled IO\n"); | |
812 | return -1; | |
813 | } | |
814 | if (stats) { | |
815 | int clock_index = cqe->user_data >> 32; | |
816 | ||
817 | if (last_idx != clock_index) { | |
818 | if (last_idx != -1) { | |
819 | add_stat(s, last_idx, stat_nr); | |
820 | stat_nr = 0; | |
821 | } | |
822 | last_idx = clock_index; | |
823 | } | |
824 | stat_nr++; | |
825 | } | |
826 | reaped++; | |
827 | head++; | |
828 | } while (1); | |
829 | ||
830 | if (stat_nr) | |
831 | add_stat(s, last_idx, stat_nr); | |
832 | ||
833 | if (reaped) { | |
834 | s->inflight -= reaped; | |
835 | atomic_store_release(ring->head, head); | |
836 | } | |
837 | return reaped; | |
838 | } | |
839 | ||
4b9e13dc JA |
840 | static void set_affinity(struct submitter *s) |
841 | { | |
842 | #ifdef CONFIG_LIBNUMA | |
843 | struct bitmask *mask; | |
844 | ||
845 | if (s->numa_node == -1) | |
846 | return; | |
847 | ||
848 | numa_set_preferred(s->numa_node); | |
849 | ||
850 | mask = numa_allocate_cpumask(); | |
851 | numa_node_to_cpus(s->numa_node, mask); | |
852 | numa_sched_setaffinity(s->tid, mask); | |
853 | #endif | |
854 | } | |
855 | ||
856 | static int detect_node(struct submitter *s, const char *name) | |
857 | { | |
858 | #ifdef CONFIG_LIBNUMA | |
859 | const char *base = basename(name); | |
860 | char str[128]; | |
861 | int ret, fd, node; | |
862 | ||
cc791a99 JA |
863 | if (pt) |
864 | sprintf(str, "/sys/class/nvme-generic/%s/device/numa_node", base); | |
865 | else | |
866 | sprintf(str, "/sys/block/%s/device/numa_node", base); | |
4b9e13dc JA |
867 | fd = open(str, O_RDONLY); |
868 | if (fd < 0) | |
869 | return -1; | |
870 | ||
871 | ret = read(fd, str, sizeof(str)); | |
872 | if (ret < 0) { | |
873 | close(fd); | |
874 | return -1; | |
875 | } | |
876 | node = atoi(str); | |
877 | s->numa_node = node; | |
878 | close(fd); | |
879 | #else | |
880 | s->numa_node = -1; | |
881 | #endif | |
882 | return 0; | |
883 | } | |
884 | ||
885 | static int setup_aio(struct submitter *s) | |
886 | { | |
887 | #ifdef CONFIG_LIBAIO | |
888 | if (polled) { | |
889 | fprintf(stderr, "aio does not support polled IO\n"); | |
890 | polled = 0; | |
891 | } | |
892 | if (sq_thread_poll) { | |
893 | fprintf(stderr, "aio does not support SQPOLL IO\n"); | |
894 | sq_thread_poll = 0; | |
895 | } | |
896 | if (do_nop) { | |
897 | fprintf(stderr, "aio does not support polled IO\n"); | |
898 | do_nop = 0; | |
899 | } | |
900 | if (fixedbufs || register_files) { | |
901 | fprintf(stderr, "aio does not support registered files or buffers\n"); | |
902 | fixedbufs = register_files = 0; | |
903 | } | |
904 | ||
905 | return io_queue_init(roundup_pow2(depth), &s->aio_ctx); | |
906 | #else | |
907 | fprintf(stderr, "Legacy AIO not available on this system/build\n"); | |
908 | errno = EINVAL; | |
909 | return -1; | |
910 | #endif | |
911 | } | |
912 | ||
913 | static int setup_ring(struct submitter *s) | |
914 | { | |
915 | struct io_sq_ring *sring = &s->sq_ring; | |
916 | struct io_cq_ring *cring = &s->cq_ring; | |
917 | struct io_uring_params p; | |
6b6f52b9 | 918 | int ret, fd, i; |
4b9e13dc | 919 | void *ptr; |
7d04588a | 920 | size_t len; |
4b9e13dc JA |
921 | |
922 | memset(&p, 0, sizeof(p)); | |
923 | ||
924 | if (polled && !do_nop) | |
925 | p.flags |= IORING_SETUP_IOPOLL; | |
926 | if (sq_thread_poll) { | |
927 | p.flags |= IORING_SETUP_SQPOLL; | |
928 | if (sq_thread_cpu != -1) { | |
929 | p.flags |= IORING_SETUP_SQ_AFF; | |
930 | p.sq_thread_cpu = sq_thread_cpu; | |
931 | } | |
932 | } | |
7d04588a AG |
933 | if (pt) { |
934 | p.flags |= IORING_SETUP_SQE128; | |
935 | p.flags |= IORING_SETUP_CQE32; | |
936 | } | |
4b9e13dc JA |
937 | |
938 | fd = io_uring_setup(depth, &p); | |
939 | if (fd < 0) { | |
940 | perror("io_uring_setup"); | |
941 | return 1; | |
942 | } | |
943 | s->ring_fd = s->enter_ring_fd = fd; | |
944 | ||
945 | io_uring_probe(fd); | |
946 | ||
947 | if (fixedbufs) { | |
948 | struct rlimit rlim; | |
949 | ||
950 | rlim.rlim_cur = RLIM_INFINITY; | |
951 | rlim.rlim_max = RLIM_INFINITY; | |
952 | /* ignore potential error, not needed on newer kernels */ | |
953 | setrlimit(RLIMIT_MEMLOCK, &rlim); | |
954 | ||
955 | ret = io_uring_register_buffers(s); | |
956 | if (ret < 0) { | |
957 | perror("io_uring_register_buffers"); | |
958 | return 1; | |
959 | } | |
960 | ||
961 | if (dma_map) { | |
962 | ret = io_uring_map_buffers(s); | |
963 | if (ret < 0) { | |
964 | perror("io_uring_map_buffers"); | |
965 | return 1; | |
966 | } | |
967 | } | |
968 | } | |
969 | ||
970 | if (register_files) { | |
971 | ret = io_uring_register_files(s); | |
972 | if (ret < 0) { | |
973 | perror("io_uring_register_files"); | |
974 | return 1; | |
975 | } | |
976 | } | |
977 | ||
978 | ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(__u32), | |
979 | PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, | |
980 | IORING_OFF_SQ_RING); | |
981 | sring->head = ptr + p.sq_off.head; | |
982 | sring->tail = ptr + p.sq_off.tail; | |
983 | sring->ring_mask = ptr + p.sq_off.ring_mask; | |
984 | sring->ring_entries = ptr + p.sq_off.ring_entries; | |
985 | sring->flags = ptr + p.sq_off.flags; | |
986 | sring->array = ptr + p.sq_off.array; | |
987 | sq_ring_mask = *sring->ring_mask; | |
988 | ||
7d04588a AG |
989 | if (p.flags & IORING_SETUP_SQE128) |
990 | len = 2 * p.sq_entries * sizeof(struct io_uring_sqe); | |
991 | else | |
992 | len = p.sq_entries * sizeof(struct io_uring_sqe); | |
993 | s->sqes = mmap(0, len, | |
4b9e13dc JA |
994 | PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, |
995 | IORING_OFF_SQES); | |
996 | ||
7d04588a AG |
997 | if (p.flags & IORING_SETUP_CQE32) { |
998 | len = p.cq_off.cqes + | |
999 | 2 * p.cq_entries * sizeof(struct io_uring_cqe); | |
1000 | } else { | |
1001 | len = p.cq_off.cqes + | |
1002 | p.cq_entries * sizeof(struct io_uring_cqe); | |
1003 | } | |
1004 | ptr = mmap(0, len, | |
4b9e13dc JA |
1005 | PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, |
1006 | IORING_OFF_CQ_RING); | |
1007 | cring->head = ptr + p.cq_off.head; | |
1008 | cring->tail = ptr + p.cq_off.tail; | |
1009 | cring->ring_mask = ptr + p.cq_off.ring_mask; | |
1010 | cring->ring_entries = ptr + p.cq_off.ring_entries; | |
1011 | cring->cqes = ptr + p.cq_off.cqes; | |
1012 | cq_ring_mask = *cring->ring_mask; | |
6b6f52b9 JA |
1013 | |
1014 | for (i = 0; i < p.sq_entries; i++) | |
1015 | sring->array[i] = i; | |
1016 | ||
4b9e13dc JA |
1017 | return 0; |
1018 | } | |
1019 | ||
1020 | static void *allocate_mem(struct submitter *s, int size) | |
1021 | { | |
1022 | void *buf; | |
1023 | ||
1024 | #ifdef CONFIG_LIBNUMA | |
1025 | if (s->numa_node != -1) | |
1026 | return numa_alloc_onnode(size, s->numa_node); | |
1027 | #endif | |
1028 | ||
c409e4c2 | 1029 | if (posix_memalign(&buf, t_io_uring_page_size, bs)) { |
4b9e13dc JA |
1030 | printf("failed alloc\n"); |
1031 | return NULL; | |
1032 | } | |
1033 | ||
1034 | return buf; | |
1035 | } | |
1036 | ||
256714ea | 1037 | static int submitter_init(struct submitter *s) |
c9fb4c5b | 1038 | { |
4b9e13dc JA |
1039 | int i, nr_batch, err; |
1040 | static int init_printed; | |
1041 | char buf[80]; | |
932131c9 | 1042 | s->tid = gettid(); |
4b9e13dc JA |
1043 | printf("submitter=%d, tid=%d, file=%s, node=%d\n", s->index, s->tid, |
1044 | s->filename, s->numa_node); | |
1045 | ||
1046 | set_affinity(s); | |
c9fb4c5b | 1047 | |
6243766b KR |
1048 | __init_rand64(&s->rand_state, s->tid); |
1049 | srand48(s->tid); | |
c9fb4c5b | 1050 | |
932131c9 JA |
1051 | for (i = 0; i < MAX_FDS; i++) |
1052 | s->files[i].fileno = i; | |
1053 | ||
4b9e13dc JA |
1054 | for (i = 0; i < roundup_pow2(depth); i++) { |
1055 | void *buf; | |
1056 | ||
1057 | buf = allocate_mem(s, bs); | |
1058 | if (!buf) | |
1059 | return 1; | |
1060 | s->iovecs[i].iov_base = buf; | |
1061 | s->iovecs[i].iov_len = bs; | |
1062 | } | |
1063 | ||
1064 | if (use_sync) { | |
1065 | sprintf(buf, "Engine=preadv2\n"); | |
1066 | err = 0; | |
1067 | } else if (!aio) { | |
1068 | err = setup_ring(s); | |
1069 | sprintf(buf, "Engine=io_uring, sq_ring=%d, cq_ring=%d\n", *s->sq_ring.ring_entries, *s->cq_ring.ring_entries); | |
1070 | } else { | |
1071 | sprintf(buf, "Engine=aio\n"); | |
1072 | err = setup_aio(s); | |
1073 | } | |
1074 | if (err) { | |
1075 | printf("queue setup failed: %s, %d\n", strerror(errno), err); | |
1076 | return 1; | |
1077 | } | |
1078 | ||
1079 | if (!init_printed) { | |
1080 | printf("polled=%d, fixedbufs=%d/%d, register_files=%d, buffered=%d, QD=%d\n", polled, fixedbufs, dma_map, register_files, buffered, depth); | |
1081 | printf("%s", buf); | |
1082 | init_printed = 1; | |
1083 | } | |
1084 | ||
932131c9 JA |
1085 | if (stats) { |
1086 | nr_batch = roundup_pow2(depth / batch_submit); | |
d4af2ece JA |
1087 | if (nr_batch < 2) |
1088 | nr_batch = 2; | |
932131c9 | 1089 | s->clock_batch = calloc(nr_batch, sizeof(unsigned long)); |
52479d8b | 1090 | s->clock_index = 1; |
932131c9 JA |
1091 | |
1092 | s->plat = calloc(PLAT_NR, sizeof(unsigned long)); | |
1093 | } else { | |
1094 | s->clock_batch = NULL; | |
1095 | s->plat = NULL; | |
1096 | nr_batch = 0; | |
1097 | } | |
7d04588a AG |
1098 | /* perform the expensive command initialization part for passthrough here |
1099 | * rather than in the fast path | |
1100 | */ | |
1101 | if (pt) { | |
1102 | for (i = 0; i < roundup_pow2(depth); i++) { | |
1103 | struct io_uring_sqe *sqe = &s->sqes[i << 1]; | |
932131c9 | 1104 | |
7d04588a AG |
1105 | memset(&sqe->cmd, 0, sizeof(struct nvme_uring_cmd)); |
1106 | } | |
1107 | } | |
256714ea JA |
1108 | return nr_batch; |
1109 | } | |
1110 | ||
1111 | #ifdef CONFIG_LIBAIO | |
1112 | static int prep_more_ios_aio(struct submitter *s, int max_ios, struct iocb *iocbs) | |
1113 | { | |
8310c570 | 1114 | uint64_t data; |
256714ea JA |
1115 | struct file *f; |
1116 | unsigned index; | |
256714ea JA |
1117 | |
1118 | index = 0; | |
1119 | while (index < max_ios) { | |
1120 | struct iocb *iocb = &iocbs[index]; | |
1121 | ||
1122 | if (s->nr_files == 1) { | |
1123 | f = &s->files[0]; | |
1124 | } else { | |
1125 | f = &s->files[s->cur_file]; | |
1126 | if (f->pending_ios >= file_depth(s)) { | |
1127 | s->cur_file++; | |
1128 | if (s->cur_file == s->nr_files) | |
1129 | s->cur_file = 0; | |
1130 | f = &s->files[s->cur_file]; | |
1131 | } | |
1132 | } | |
1133 | f->pending_ios++; | |
1134 | ||
256714ea | 1135 | io_prep_pread(iocb, f->real_fd, s->iovecs[index].iov_base, |
501565a1 | 1136 | s->iovecs[index].iov_len, get_offset(s, f)); |
256714ea JA |
1137 | |
1138 | data = f->fileno; | |
52479d8b | 1139 | if (stats && stats_running) |
8310c570 | 1140 | data |= (((uint64_t) s->clock_index) << 32); |
256714ea JA |
1141 | iocb->data = (void *) (uintptr_t) data; |
1142 | index++; | |
1143 | } | |
1144 | return index; | |
1145 | } | |
1146 | ||
1147 | static int reap_events_aio(struct submitter *s, struct io_event *events, int evs) | |
1148 | { | |
1149 | int last_idx = -1, stat_nr = 0; | |
1150 | int reaped = 0; | |
1151 | ||
1152 | while (evs) { | |
8310c570 | 1153 | uint64_t data = (uintptr_t) events[reaped].data; |
256714ea JA |
1154 | struct file *f = &s->files[data & 0xffffffff]; |
1155 | ||
1156 | f->pending_ios--; | |
1157 | if (events[reaped].res != bs) { | |
1158 | printf("io: unexpected ret=%ld\n", events[reaped].res); | |
1159 | return -1; | |
1160 | } | |
1161 | if (stats) { | |
1162 | int clock_index = data >> 32; | |
1163 | ||
1164 | if (last_idx != clock_index) { | |
1165 | if (last_idx != -1) { | |
1166 | add_stat(s, last_idx, stat_nr); | |
1167 | stat_nr = 0; | |
1168 | } | |
1169 | last_idx = clock_index; | |
d4af2ece JA |
1170 | } |
1171 | stat_nr++; | |
256714ea JA |
1172 | } |
1173 | reaped++; | |
1174 | evs--; | |
1175 | } | |
1176 | ||
1177 | if (stat_nr) | |
1178 | add_stat(s, last_idx, stat_nr); | |
1179 | ||
1180 | s->inflight -= reaped; | |
1181 | s->done += reaped; | |
1182 | return reaped; | |
1183 | } | |
1184 | ||
1185 | static void *submitter_aio_fn(void *data) | |
1186 | { | |
1187 | struct submitter *s = data; | |
71989c1b | 1188 | int i, ret, prepped; |
256714ea JA |
1189 | struct iocb **iocbsptr; |
1190 | struct iocb *iocbs; | |
1191 | struct io_event *events; | |
71989c1b JA |
1192 | #ifdef ARCH_HAVE_CPU_CLOCK |
1193 | int nr_batch = submitter_init(s); | |
1194 | #else | |
1195 | submitter_init(s); | |
1196 | #endif | |
256714ea JA |
1197 | |
1198 | iocbsptr = calloc(depth, sizeof(struct iocb *)); | |
1199 | iocbs = calloc(depth, sizeof(struct iocb)); | |
1200 | events = calloc(depth, sizeof(struct io_event)); | |
1201 | ||
1202 | for (i = 0; i < depth; i++) | |
1203 | iocbsptr[i] = &iocbs[i]; | |
1204 | ||
1205 | prepped = 0; | |
1206 | do { | |
1207 | int to_wait, to_submit, to_prep; | |
1208 | ||
1209 | if (!prepped && s->inflight < depth) { | |
1210 | to_prep = min(depth - s->inflight, batch_submit); | |
1211 | prepped = prep_more_ios_aio(s, to_prep, iocbs); | |
1212 | #ifdef ARCH_HAVE_CPU_CLOCK | |
1213 | if (prepped && stats) { | |
1214 | s->clock_batch[s->clock_index] = get_cpu_clock(); | |
1215 | s->clock_index = (s->clock_index + 1) & (nr_batch - 1); | |
1216 | } | |
1217 | #endif | |
1218 | } | |
1219 | s->inflight += prepped; | |
1220 | to_submit = prepped; | |
1221 | ||
1222 | if (to_submit && (s->inflight + to_submit <= depth)) | |
1223 | to_wait = 0; | |
1224 | else | |
1225 | to_wait = min(s->inflight + to_submit, batch_complete); | |
1226 | ||
1227 | ret = io_submit(s->aio_ctx, to_submit, iocbsptr); | |
1228 | s->calls++; | |
1229 | if (ret < 0) { | |
1230 | perror("io_submit"); | |
1231 | break; | |
1232 | } else if (ret != to_submit) { | |
1233 | printf("submitted %d, wanted %d\n", ret, to_submit); | |
1234 | break; | |
1235 | } | |
1236 | prepped = 0; | |
1237 | ||
24a24c12 | 1238 | while (to_wait) { |
256714ea JA |
1239 | int r; |
1240 | ||
24a24c12 JA |
1241 | s->calls++; |
1242 | r = io_getevents(s->aio_ctx, to_wait, to_wait, events, NULL); | |
1243 | if (r < 0) { | |
1244 | perror("io_getevents"); | |
1245 | break; | |
1246 | } else if (r != to_wait) { | |
1247 | printf("r=%d, wait=%d\n", r, to_wait); | |
1248 | break; | |
1249 | } | |
1250 | r = reap_events_aio(s, events, r); | |
1251 | s->reaps += r; | |
1252 | to_wait -= r; | |
256714ea JA |
1253 | } |
1254 | } while (!s->finish); | |
1255 | ||
1256 | free(iocbsptr); | |
1257 | free(iocbs); | |
1258 | free(events); | |
1259 | finish = 1; | |
1260 | return NULL; | |
1261 | } | |
1262 | #endif | |
1263 | ||
ca8c91c5 JA |
1264 | static void io_uring_unregister_ring(struct submitter *s) |
1265 | { | |
1266 | struct io_uring_rsrc_update up = { | |
1267 | .offset = s->enter_ring_fd, | |
1268 | }; | |
1269 | ||
1270 | syscall(__NR_io_uring_register, s->ring_fd, IORING_UNREGISTER_RING_FDS, | |
1271 | &up, 1); | |
1272 | } | |
1273 | ||
1274 | static int io_uring_register_ring(struct submitter *s) | |
1275 | { | |
1276 | struct io_uring_rsrc_update up = { | |
1277 | .data = s->ring_fd, | |
1278 | .offset = -1U, | |
1279 | }; | |
1280 | int ret; | |
1281 | ||
1282 | ret = syscall(__NR_io_uring_register, s->ring_fd, | |
1283 | IORING_REGISTER_RING_FDS, &up, 1); | |
1284 | if (ret == 1) { | |
1285 | s->enter_ring_fd = up.offset; | |
1286 | return 0; | |
1287 | } | |
1288 | register_ring = 0; | |
1289 | return -1; | |
1290 | } | |
1291 | ||
256714ea JA |
1292 | static void *submitter_uring_fn(void *data) |
1293 | { | |
1294 | struct submitter *s = data; | |
1295 | struct io_sq_ring *ring = &s->sq_ring; | |
b65c1fc0 JA |
1296 | int ret, prepped; |
1297 | #ifdef ARCH_HAVE_CPU_CLOCK | |
1298 | int nr_batch = submitter_init(s); | |
1299 | #else | |
1300 | submitter_init(s); | |
1301 | #endif | |
256714ea | 1302 | |
ca8c91c5 JA |
1303 | if (register_ring) |
1304 | io_uring_register_ring(s); | |
1305 | ||
c9fb4c5b JA |
1306 | prepped = 0; |
1307 | do { | |
f310970e | 1308 | int to_wait, to_submit, this_reap, to_prep; |
fc2dc21b | 1309 | unsigned ring_flags = 0; |
c9fb4c5b | 1310 | |
e39863e3 KB |
1311 | if (!prepped && s->inflight < depth) { |
1312 | to_prep = min(depth - s->inflight, batch_submit); | |
256714ea | 1313 | prepped = prep_more_ios_uring(s, to_prep); |
932131c9 JA |
1314 | #ifdef ARCH_HAVE_CPU_CLOCK |
1315 | if (prepped && stats) { | |
1316 | s->clock_batch[s->clock_index] = get_cpu_clock(); | |
1317 | s->clock_index = (s->clock_index + 1) & (nr_batch - 1); | |
1318 | } | |
1319 | #endif | |
f310970e | 1320 | } |
c9fb4c5b JA |
1321 | s->inflight += prepped; |
1322 | submit_more: | |
1323 | to_submit = prepped; | |
1324 | submit: | |
e39863e3 | 1325 | if (to_submit && (s->inflight + to_submit <= depth)) |
c9fb4c5b JA |
1326 | to_wait = 0; |
1327 | else | |
e39863e3 | 1328 | to_wait = min(s->inflight + to_submit, batch_complete); |
c9fb4c5b | 1329 | |
ce1705de JA |
1330 | /* |
1331 | * Only need to call io_uring_enter if we're not using SQ thread | |
1332 | * poll, or if IORING_SQ_NEED_WAKEUP is set. | |
1333 | */ | |
fc2dc21b JA |
1334 | if (sq_thread_poll) |
1335 | ring_flags = atomic_load_acquire(ring->flags); | |
1336 | if (!sq_thread_poll || ring_flags & IORING_SQ_NEED_WAKEUP) { | |
e0abe388 JA |
1337 | unsigned flags = 0; |
1338 | ||
1339 | if (to_wait) | |
1340 | flags = IORING_ENTER_GETEVENTS; | |
fc2dc21b | 1341 | if (ring_flags & IORING_SQ_NEED_WAKEUP) |
b532dd6d | 1342 | flags |= IORING_ENTER_SQ_WAKEUP; |
e0abe388 | 1343 | ret = io_uring_enter(s, to_submit, to_wait, flags); |
ce1705de | 1344 | s->calls++; |
fc2dc21b JA |
1345 | } else { |
1346 | /* for SQPOLL, we submitted it all effectively */ | |
1347 | ret = to_submit; | |
ce1705de | 1348 | } |
c9fb4c5b | 1349 | |
ce1705de JA |
1350 | /* |
1351 | * For non SQ thread poll, we already got the events we needed | |
1352 | * through the io_uring_enter() above. For SQ thread poll, we | |
1353 | * need to loop here until we find enough events. | |
1354 | */ | |
1355 | this_reap = 0; | |
1356 | do { | |
1357 | int r; | |
256714ea | 1358 | |
7d04588a AG |
1359 | if (pt) |
1360 | r = reap_events_uring_pt(s); | |
1361 | else | |
1362 | r = reap_events_uring(s); | |
7d1435e6 JA |
1363 | if (r == -1) { |
1364 | s->finish = 1; | |
ce1705de | 1365 | break; |
7d1435e6 | 1366 | } else if (r > 0) |
ce1705de JA |
1367 | this_reap += r; |
1368 | } while (sq_thread_poll && this_reap < to_wait); | |
c9fb4c5b JA |
1369 | s->reaps += this_reap; |
1370 | ||
1371 | if (ret >= 0) { | |
1372 | if (!ret) { | |
1373 | to_submit = 0; | |
1374 | if (s->inflight) | |
1375 | goto submit; | |
1376 | continue; | |
1377 | } else if (ret < to_submit) { | |
1378 | int diff = to_submit - ret; | |
1379 | ||
1380 | s->done += ret; | |
1381 | prepped -= diff; | |
1382 | goto submit_more; | |
1383 | } | |
1384 | s->done += ret; | |
1385 | prepped = 0; | |
1386 | continue; | |
1387 | } else if (ret < 0) { | |
ac122fea | 1388 | if (errno == EAGAIN) { |
c9fb4c5b JA |
1389 | if (s->finish) |
1390 | break; | |
1391 | if (this_reap) | |
1392 | goto submit; | |
c9fb4c5b JA |
1393 | to_submit = 0; |
1394 | goto submit; | |
1395 | } | |
ac122fea | 1396 | printf("io_submit: %s\n", strerror(errno)); |
c9fb4c5b JA |
1397 | break; |
1398 | } | |
1399 | } while (!s->finish); | |
a7086591 | 1400 | |
ca8c91c5 JA |
1401 | if (register_ring) |
1402 | io_uring_unregister_ring(s); | |
1403 | ||
c9fb4c5b JA |
1404 | finish = 1; |
1405 | return NULL; | |
1406 | } | |
1407 | ||
a7648136 | 1408 | #ifdef CONFIG_PWRITEV2 |
379406bc JA |
1409 | static void *submitter_sync_fn(void *data) |
1410 | { | |
1411 | struct submitter *s = data; | |
1412 | int ret; | |
1413 | ||
1414 | submitter_init(s); | |
1415 | ||
1416 | do { | |
1417 | uint64_t offset; | |
1418 | struct file *f; | |
379406bc JA |
1419 | |
1420 | if (s->nr_files == 1) { | |
1421 | f = &s->files[0]; | |
1422 | } else { | |
1423 | f = &s->files[s->cur_file]; | |
1424 | if (f->pending_ios >= file_depth(s)) { | |
1425 | s->cur_file++; | |
1426 | if (s->cur_file == s->nr_files) | |
1427 | s->cur_file = 0; | |
1428 | f = &s->files[s->cur_file]; | |
1429 | } | |
1430 | } | |
1431 | f->pending_ios++; | |
1432 | ||
379406bc JA |
1433 | #ifdef ARCH_HAVE_CPU_CLOCK |
1434 | if (stats) | |
1435 | s->clock_batch[s->clock_index] = get_cpu_clock(); | |
1436 | #endif | |
1437 | ||
1438 | s->inflight++; | |
1439 | s->calls++; | |
1440 | ||
501565a1 | 1441 | offset = get_offset(s, f); |
379406bc JA |
1442 | if (polled) |
1443 | ret = preadv2(f->real_fd, &s->iovecs[0], 1, offset, RWF_HIPRI); | |
1444 | else | |
1445 | ret = preadv2(f->real_fd, &s->iovecs[0], 1, offset, 0); | |
1446 | ||
1447 | if (ret < 0) { | |
1448 | perror("preadv2"); | |
1449 | break; | |
1450 | } else if (ret != bs) { | |
1451 | break; | |
1452 | } | |
1453 | ||
1454 | s->done++; | |
1455 | s->inflight--; | |
1456 | f->pending_ios--; | |
1457 | if (stats) | |
1458 | add_stat(s, s->clock_index, 1); | |
1459 | } while (!s->finish); | |
1460 | ||
1461 | finish = 1; | |
1462 | return NULL; | |
1463 | } | |
a7648136 JA |
1464 | #else |
1465 | static void *submitter_sync_fn(void *data) | |
1466 | { | |
1467 | finish = 1; | |
1468 | return NULL; | |
1469 | } | |
1470 | #endif | |
379406bc | 1471 | |
54319661 JA |
1472 | static struct submitter *get_submitter(int offset) |
1473 | { | |
1474 | void *ret; | |
1475 | ||
1476 | ret = submitter; | |
1477 | if (offset) | |
1478 | ret += offset * (sizeof(*submitter) + depth * sizeof(struct iovec)); | |
1479 | return ret; | |
1480 | } | |
1481 | ||
65e1a5e8 | 1482 | static void do_finish(const char *reason) |
c9fb4c5b | 1483 | { |
54319661 | 1484 | int j; |
4b9e13dc | 1485 | |
65e1a5e8 | 1486 | printf("Exiting on %s\n", reason); |
54319661 JA |
1487 | for (j = 0; j < nthreads; j++) { |
1488 | struct submitter *s = get_submitter(j); | |
1489 | s->finish = 1; | |
1490 | } | |
4b9e13dc JA |
1491 | if (max_iops > 1000000) { |
1492 | double miops = (double) max_iops / 1000000.0; | |
1493 | printf("Maximum IOPS=%.2fM\n", miops); | |
1494 | } else if (max_iops > 100000) { | |
1495 | double kiops = (double) max_iops / 1000.0; | |
1496 | printf("Maximum IOPS=%.2fK\n", kiops); | |
1497 | } else { | |
18b557a0 | 1498 | printf("Maximum IOPS=%lu\n", max_iops); |
4b9e13dc | 1499 | } |
c9fb4c5b JA |
1500 | finish = 1; |
1501 | } | |
1502 | ||
65e1a5e8 EV |
1503 | static void sig_int(int sig) |
1504 | { | |
1505 | do_finish("signal"); | |
1506 | } | |
1507 | ||
c9fb4c5b JA |
1508 | static void arm_sig_int(void) |
1509 | { | |
1510 | struct sigaction act; | |
1511 | ||
1512 | memset(&act, 0, sizeof(act)); | |
1513 | act.sa_handler = sig_int; | |
1514 | act.sa_flags = SA_RESTART; | |
1515 | sigaction(SIGINT, &act, NULL); | |
2cf71009 BP |
1516 | |
1517 | /* Windows uses SIGBREAK as a quit signal from other applications */ | |
1518 | #ifdef WIN32 | |
1519 | sigaction(SIGBREAK, &act, NULL); | |
1520 | #endif | |
c9fb4c5b JA |
1521 | } |
1522 | ||
d79ff8c9 | 1523 | static void usage(char *argv, int status) |
e39863e3 | 1524 | { |
65e1a5e8 EV |
1525 | char runtime_str[16]; |
1526 | snprintf(runtime_str, sizeof(runtime_str), "%d", runtime); | |
e39863e3 | 1527 | printf("%s [options] -- [filenames]\n" |
35268e11 AJ |
1528 | " -d <int> : IO Depth, default %d\n" |
1529 | " -s <int> : Batch submit, default %d\n" | |
1530 | " -c <int> : Batch complete, default %d\n" | |
1531 | " -b <int> : Block size, default %d\n" | |
1532 | " -p <bool> : Polled IO, default %d\n" | |
1533 | " -B <bool> : Fixed buffers, default %d\n" | |
dd1f1ba0 | 1534 | " -D <bool> : DMA map fixed buffers, default %d\n" |
35268e11 | 1535 | " -F <bool> : Register files, default %d\n" |
0862f718 | 1536 | " -n <int> : Number of threads, default %d\n" |
2686fc22 | 1537 | " -O <bool> : Use O_DIRECT, default %d\n" |
4a39c524 EV |
1538 | " -N <bool> : Perform just no-op requests, default %d\n" |
1539 | " -t <bool> : Track IO latencies, default %d\n" | |
256714ea | 1540 | " -T <int> : TSC rate in HZ\n" |
beda9d8d JA |
1541 | " -r <int> : Runtime in seconds, default %s\n" |
1542 | " -R <bool> : Use random IO, default %d\n" | |
ca8c91c5 | 1543 | " -a <bool> : Use legacy aio, default %d\n" |
3be2f0ca | 1544 | " -S <bool> : Use sync IO (preadv2), default %d\n" |
4b9e13dc | 1545 | " -X <bool> : Use registered ring %d\n" |
7d04588a AG |
1546 | " -P <bool> : Automatically place on device home node %d\n" |
1547 | " -u <bool> : Use nvme-passthrough I/O, default %d\n", | |
35268e11 | 1548 | argv, DEPTH, BATCH_SUBMIT, BATCH_COMPLETE, BS, polled, |
a71ad043 | 1549 | fixedbufs, dma_map, register_files, nthreads, !buffered, do_nop, |
ca8c91c5 | 1550 | stats, runtime == 0 ? "unlimited" : runtime_str, random_io, aio, |
7d04588a | 1551 | use_sync, register_ring, numa_placement, pt); |
d79ff8c9 | 1552 | exit(status); |
e39863e3 KB |
1553 | } |
1554 | ||
203e4c26 JA |
1555 | static void read_tsc_rate(void) |
1556 | { | |
1557 | char buffer[32]; | |
1558 | int fd, ret; | |
1559 | ||
1560 | if (tsc_rate) | |
1561 | return; | |
1562 | ||
1563 | fd = open(TSC_RATE_FILE, O_RDONLY); | |
1564 | if (fd < 0) | |
1565 | return; | |
1566 | ||
1567 | ret = read(fd, buffer, sizeof(buffer)); | |
1568 | if (ret < 0) { | |
1569 | close(fd); | |
1570 | return; | |
1571 | } | |
1572 | ||
1573 | tsc_rate = strtoul(buffer, NULL, 10); | |
1574 | printf("Using TSC rate %luHz\n", tsc_rate); | |
1575 | close(fd); | |
1576 | } | |
1577 | ||
1578 | static void write_tsc_rate(void) | |
1579 | { | |
1580 | char buffer[32]; | |
1581 | struct stat sb; | |
1582 | int fd, ret; | |
1583 | ||
1584 | if (!stat(TSC_RATE_FILE, &sb)) | |
1585 | return; | |
1586 | ||
1587 | fd = open(TSC_RATE_FILE, O_WRONLY | O_CREAT, 0644); | |
1588 | if (fd < 0) | |
1589 | return; | |
1590 | ||
1591 | memset(buffer, 0, sizeof(buffer)); | |
1592 | sprintf(buffer, "%lu", tsc_rate); | |
1593 | ret = write(fd, buffer, strlen(buffer)); | |
1594 | if (ret < 0) | |
1595 | perror("write"); | |
1596 | close(fd); | |
1597 | } | |
1598 | ||
c9fb4c5b JA |
1599 | int main(int argc, char *argv[]) |
1600 | { | |
e39863e3 | 1601 | struct submitter *s; |
05138221 | 1602 | unsigned long done, calls, reap; |
4b9e13dc | 1603 | int i, j, flags, fd, opt, threads_per_f, threads_rem = 0, nfiles; |
d79ff8c9 | 1604 | struct file f; |
c3e2fc25 | 1605 | void *ret; |
c9fb4c5b | 1606 | |
0862f718 JA |
1607 | if (!do_nop && argc < 2) |
1608 | usage(argv[0], 1); | |
c9fb4c5b | 1609 | |
7d04588a | 1610 | while ((opt = getopt(argc, argv, "d:s:c:b:p:B:F:n:N:O:t:T:a:r:D:R:X:S:P:u:h?")) != -1) { |
e39863e3 | 1611 | switch (opt) { |
256714ea JA |
1612 | case 'a': |
1613 | aio = !!atoi(optarg); | |
1614 | break; | |
e39863e3 KB |
1615 | case 'd': |
1616 | depth = atoi(optarg); | |
1617 | break; | |
1618 | case 's': | |
1619 | batch_submit = atoi(optarg); | |
932131c9 JA |
1620 | if (!batch_submit) |
1621 | batch_submit = 1; | |
e39863e3 KB |
1622 | break; |
1623 | case 'c': | |
1624 | batch_complete = atoi(optarg); | |
932131c9 JA |
1625 | if (!batch_complete) |
1626 | batch_complete = 1; | |
e39863e3 | 1627 | break; |
5bd526f2 JA |
1628 | case 'b': |
1629 | bs = atoi(optarg); | |
1630 | break; | |
1631 | case 'p': | |
1632 | polled = !!atoi(optarg); | |
1633 | break; | |
6a87c3b0 JA |
1634 | case 'B': |
1635 | fixedbufs = !!atoi(optarg); | |
1636 | break; | |
1637 | case 'F': | |
1638 | register_files = !!atoi(optarg); | |
1639 | break; | |
54319661 JA |
1640 | case 'n': |
1641 | nthreads = atoi(optarg); | |
caf2b9ac JA |
1642 | if (!nthreads) { |
1643 | printf("Threads must be non-zero\n"); | |
1644 | usage(argv[0], 1); | |
1645 | } | |
54319661 | 1646 | break; |
0862f718 JA |
1647 | case 'N': |
1648 | do_nop = !!atoi(optarg); | |
1649 | break; | |
2686fc22 JA |
1650 | case 'O': |
1651 | buffered = !atoi(optarg); | |
1652 | break; | |
932131c9 JA |
1653 | case 't': |
1654 | #ifndef ARCH_HAVE_CPU_CLOCK | |
1655 | fprintf(stderr, "Stats not supported on this CPU\n"); | |
1656 | return 1; | |
1657 | #endif | |
1658 | stats = !!atoi(optarg); | |
1659 | break; | |
1660 | case 'T': | |
1661 | #ifndef ARCH_HAVE_CPU_CLOCK | |
1662 | fprintf(stderr, "Stats not supported on this CPU\n"); | |
1663 | return 1; | |
1664 | #endif | |
1665 | tsc_rate = strtoul(optarg, NULL, 10); | |
203e4c26 | 1666 | write_tsc_rate(); |
932131c9 | 1667 | break; |
65e1a5e8 EV |
1668 | case 'r': |
1669 | runtime = atoi(optarg); | |
1670 | break; | |
a71ad043 JA |
1671 | case 'D': |
1672 | dma_map = !!atoi(optarg); | |
1673 | break; | |
beda9d8d JA |
1674 | case 'R': |
1675 | random_io = !!atoi(optarg); | |
1676 | break; | |
ca8c91c5 JA |
1677 | case 'X': |
1678 | register_ring = !!atoi(optarg); | |
1679 | break; | |
379406bc | 1680 | case 'S': |
a7648136 | 1681 | #ifdef CONFIG_PWRITEV2 |
379406bc | 1682 | use_sync = !!atoi(optarg); |
a7648136 JA |
1683 | #else |
1684 | fprintf(stderr, "preadv2 not supported\n"); | |
1685 | exit(1); | |
1686 | #endif | |
379406bc | 1687 | break; |
4b9e13dc JA |
1688 | case 'P': |
1689 | numa_placement = !!atoi(optarg); | |
1690 | break; | |
7d04588a AG |
1691 | case 'u': |
1692 | pt = !!atoi(optarg); | |
1693 | break; | |
e39863e3 KB |
1694 | case 'h': |
1695 | case '?': | |
1696 | default: | |
d79ff8c9 | 1697 | usage(argv[0], 0); |
e39863e3 KB |
1698 | break; |
1699 | } | |
1700 | } | |
1701 | ||
203e4c26 JA |
1702 | if (stats) |
1703 | read_tsc_rate(); | |
1704 | ||
c5347611 JA |
1705 | if (batch_complete > depth) |
1706 | batch_complete = depth; | |
1707 | if (batch_submit > depth) | |
1708 | batch_submit = depth; | |
a71ad043 JA |
1709 | if (!fixedbufs && dma_map) |
1710 | dma_map = 0; | |
c5347611 | 1711 | |
54319661 | 1712 | submitter = calloc(nthreads, sizeof(*submitter) + |
55845033 | 1713 | roundup_pow2(depth) * sizeof(struct iovec)); |
54319661 JA |
1714 | for (j = 0; j < nthreads; j++) { |
1715 | s = get_submitter(j); | |
4b9e13dc | 1716 | s->numa_node = -1; |
54319661 JA |
1717 | s->index = j; |
1718 | s->done = s->calls = s->reaps = 0; | |
1719 | } | |
e39863e3 | 1720 | |
701d1277 | 1721 | flags = O_RDONLY | O_NOATIME; |
a7086591 JA |
1722 | if (!buffered) |
1723 | flags |= O_DIRECT; | |
1724 | ||
54319661 | 1725 | j = 0; |
e39863e3 | 1726 | i = optind; |
d79ff8c9 | 1727 | nfiles = argc - i; |
2ac00585 JA |
1728 | if (!do_nop) { |
1729 | if (!nfiles) { | |
1730 | printf("No files specified\n"); | |
1731 | usage(argv[0], 1); | |
1732 | } | |
1733 | threads_per_f = nthreads / nfiles; | |
1734 | /* make sure each thread gets assigned files */ | |
1735 | if (threads_per_f == 0) { | |
1736 | threads_per_f = 1; | |
1737 | } else { | |
1738 | threads_rem = nthreads - threads_per_f * nfiles; | |
1739 | } | |
d79ff8c9 | 1740 | } |
8025517d | 1741 | while (!do_nop && i < argc) { |
d79ff8c9 AJ |
1742 | int k, limit; |
1743 | ||
1744 | memset(&f, 0, sizeof(f)); | |
a7086591 JA |
1745 | |
1746 | fd = open(argv[i], flags); | |
1747 | if (fd < 0) { | |
1748 | perror("open"); | |
1749 | return 1; | |
1750 | } | |
d79ff8c9 AJ |
1751 | f.real_fd = fd; |
1752 | if (get_file_size(&f)) { | |
a7086591 JA |
1753 | printf("failed getting size of device/file\n"); |
1754 | return 1; | |
1755 | } | |
d79ff8c9 | 1756 | if (f.max_blocks <= 1) { |
a7086591 JA |
1757 | printf("Zero file/device size?\n"); |
1758 | return 1; | |
1759 | } | |
d79ff8c9 AJ |
1760 | f.max_blocks--; |
1761 | ||
1762 | limit = threads_per_f; | |
1763 | limit += threads_rem > 0 ? 1 : 0; | |
1764 | for (k = 0; k < limit; k++) { | |
1765 | s = get_submitter((j + k) % nthreads); | |
a7086591 | 1766 | |
d79ff8c9 AJ |
1767 | if (s->nr_files == MAX_FDS) { |
1768 | printf("Max number of files (%d) reached\n", MAX_FDS); | |
1769 | break; | |
1770 | } | |
1771 | ||
1772 | memcpy(&s->files[s->nr_files], &f, sizeof(f)); | |
1773 | ||
4b9e13dc JA |
1774 | if (numa_placement) |
1775 | detect_node(s, argv[i]); | |
1776 | ||
1777 | s->filename = argv[i]; | |
d79ff8c9 AJ |
1778 | s->nr_files++; |
1779 | } | |
1780 | threads_rem--; | |
a7086591 | 1781 | i++; |
d79ff8c9 | 1782 | j += limit; |
a7086591 JA |
1783 | } |
1784 | ||
c9fb4c5b JA |
1785 | arm_sig_int(); |
1786 | ||
c409e4c2 AG |
1787 | t_io_uring_page_size = sysconf(_SC_PAGESIZE); |
1788 | if (t_io_uring_page_size < 0) | |
1789 | t_io_uring_page_size = 4096; | |
a0639afe | 1790 | |
54319661 JA |
1791 | for (j = 0; j < nthreads; j++) { |
1792 | s = get_submitter(j); | |
379406bc JA |
1793 | if (use_sync) |
1794 | pthread_create(&s->thread, NULL, submitter_sync_fn, s); | |
1795 | else if (!aio) | |
256714ea JA |
1796 | pthread_create(&s->thread, NULL, submitter_uring_fn, s); |
1797 | #ifdef CONFIG_LIBAIO | |
1798 | else | |
1799 | pthread_create(&s->thread, NULL, submitter_aio_fn, s); | |
1800 | #endif | |
54319661 | 1801 | } |
c9fb4c5b | 1802 | |
05138221 | 1803 | reap = calls = done = 0; |
c9fb4c5b JA |
1804 | do { |
1805 | unsigned long this_done = 0; | |
1806 | unsigned long this_reap = 0; | |
1807 | unsigned long this_call = 0; | |
1808 | unsigned long rpc = 0, ipc = 0; | |
f3057d26 | 1809 | unsigned long iops, bw; |
c9fb4c5b JA |
1810 | |
1811 | sleep(1); | |
65e1a5e8 EV |
1812 | if (runtime && !--runtime) |
1813 | do_finish("timeout"); | |
a1f17100 JA |
1814 | |
1815 | /* don't print partial run, if interrupted by signal */ | |
1816 | if (finish) | |
1817 | break; | |
52479d8b JA |
1818 | |
1819 | /* one second in to the run, enable stats */ | |
1820 | if (stats) | |
1821 | stats_running = 1; | |
1822 | ||
54319661 | 1823 | for (j = 0; j < nthreads; j++) { |
7d1ce4b7 | 1824 | s = get_submitter(j); |
54319661 JA |
1825 | this_done += s->done; |
1826 | this_call += s->calls; | |
1827 | this_reap += s->reaps; | |
1828 | } | |
c9fb4c5b JA |
1829 | if (this_call - calls) { |
1830 | rpc = (this_done - done) / (this_call - calls); | |
1831 | ipc = (this_reap - reap) / (this_call - calls); | |
191561c1 JA |
1832 | } else |
1833 | rpc = ipc = -1; | |
22fd3501 | 1834 | iops = this_done - done; |
f3057d26 JA |
1835 | if (bs > 1048576) |
1836 | bw = iops * (bs / 1048576); | |
1837 | else | |
1838 | bw = iops / (1048576 / bs); | |
4b9e13dc JA |
1839 | if (iops > 1000000) { |
1840 | double miops = (double) iops / 1000000.0; | |
1841 | printf("IOPS=%.2fM, ", miops); | |
1842 | } else if (iops > 100000) { | |
1843 | double kiops = (double) iops / 1000.0; | |
1844 | printf("IOPS=%.2fK, ", kiops); | |
1845 | } else { | |
53b5fa1e | 1846 | printf("IOPS=%lu, ", iops); |
4b9e13dc | 1847 | } |
16d25711 | 1848 | max_iops = max(max_iops, iops); |
55037c48 JA |
1849 | if (!do_nop) { |
1850 | if (bw > 2000) { | |
1851 | double bw_g = (double) bw / 1000.0; | |
1852 | ||
1853 | printf("BW=%.2fGiB/s, ", bw_g); | |
1854 | } else { | |
1855 | printf("BW=%luMiB/s, ", bw); | |
1856 | } | |
1857 | } | |
4b9e13dc | 1858 | printf("IOS/call=%ld/%ld\n", rpc, ipc); |
c9fb4c5b JA |
1859 | done = this_done; |
1860 | calls = this_call; | |
1861 | reap = this_reap; | |
1862 | } while (!finish); | |
1863 | ||
54319661 JA |
1864 | for (j = 0; j < nthreads; j++) { |
1865 | s = get_submitter(j); | |
1866 | pthread_join(s->thread, &ret); | |
1867 | close(s->ring_fd); | |
932131c9 JA |
1868 | |
1869 | if (stats) { | |
1870 | unsigned long nr; | |
1871 | ||
1872 | printf("%d: Latency percentiles:\n", s->tid); | |
1873 | for (i = 0, nr = 0; i < PLAT_NR; i++) | |
1874 | nr += s->plat[i]; | |
1875 | show_clat_percentiles(s->plat, nr, 4); | |
1876 | free(s->clock_batch); | |
1877 | free(s->plat); | |
1878 | } | |
54319661 | 1879 | } |
932131c9 | 1880 | |
54319661 | 1881 | free(submitter); |
c9fb4c5b JA |
1882 | return 0; |
1883 | } |