Commit | Line | Data |
---|---|---|
c9fb4c5b JA |
1 | #include <stdio.h> |
2 | #include <errno.h> | |
3 | #include <assert.h> | |
4 | #include <stdlib.h> | |
5 | #include <stddef.h> | |
6 | #include <signal.h> | |
7 | #include <inttypes.h> | |
932131c9 | 8 | #include <math.h> |
c9fb4c5b | 9 | |
256714ea JA |
10 | #ifdef CONFIG_LIBAIO |
11 | #include <libaio.h> | |
12 | #endif | |
13 | ||
4b9e13dc JA |
14 | #ifdef CONFIG_LIBNUMA |
15 | #include <numa.h> | |
16 | #endif | |
17 | ||
c9fb4c5b JA |
18 | #include <sys/types.h> |
19 | #include <sys/stat.h> | |
20 | #include <sys/ioctl.h> | |
21 | #include <sys/syscall.h> | |
22 | #include <sys/resource.h> | |
c3e2fc25 | 23 | #include <sys/mman.h> |
e31b8288 | 24 | #include <sys/uio.h> |
c9fb4c5b JA |
25 | #include <linux/fs.h> |
26 | #include <fcntl.h> | |
27 | #include <unistd.h> | |
c9fb4c5b JA |
28 | #include <string.h> |
29 | #include <pthread.h> | |
30 | #include <sched.h> | |
31 | ||
74efb029 | 32 | #include "../arch/arch.h" |
a5a2429e | 33 | #include "../os/os.h" |
57fa61f0 | 34 | #include "../lib/types.h" |
932131c9 | 35 | #include "../lib/roundup.h" |
9eff5320 | 36 | #include "../lib/rand.h" |
932131c9 | 37 | #include "../minmax.h" |
f3e769a4 | 38 | #include "../os/linux/io_uring.h" |
7d04588a | 39 | #include "../engines/nvme.h" |
ac122fea | 40 | |
e31b8288 | 41 | struct io_sq_ring { |
e2239016 JA |
42 | unsigned *head; |
43 | unsigned *tail; | |
44 | unsigned *ring_mask; | |
45 | unsigned *ring_entries; | |
ce1705de | 46 | unsigned *flags; |
e2239016 | 47 | unsigned *array; |
c9fb4c5b JA |
48 | }; |
49 | ||
e31b8288 | 50 | struct io_cq_ring { |
e2239016 JA |
51 | unsigned *head; |
52 | unsigned *tail; | |
53 | unsigned *ring_mask; | |
54 | unsigned *ring_entries; | |
f0403f94 | 55 | struct io_uring_cqe *cqes; |
c9fb4c5b JA |
56 | }; |
57 | ||
701d1277 | 58 | #define DEPTH 128 |
2e7888ef JA |
59 | #define BATCH_SUBMIT 32 |
60 | #define BATCH_COMPLETE 32 | |
c9fb4c5b JA |
61 | #define BS 4096 |
62 | ||
a7086591 JA |
63 | #define MAX_FDS 16 |
64 | ||
c3e2fc25 | 65 | static unsigned sq_ring_mask, cq_ring_mask; |
e39c34dc | 66 | |
a7086591 JA |
67 | struct file { |
68 | unsigned long max_blocks; | |
beda9d8d JA |
69 | unsigned long max_size; |
70 | unsigned long cur_off; | |
701d1277 | 71 | unsigned pending_ios; |
7d04588a AG |
72 | unsigned int nsid; /* nsid field required for nvme-passthrough */ |
73 | unsigned int lba_shift; /* lba_shift field required for nvme-passthrough */ | |
48e698fa JA |
74 | int real_fd; |
75 | int fixed_fd; | |
932131c9 | 76 | int fileno; |
a7086591 JA |
77 | }; |
78 | ||
932131c9 JA |
79 | #define PLAT_BITS 6 |
80 | #define PLAT_VAL (1 << PLAT_BITS) | |
81 | #define PLAT_GROUP_NR 29 | |
82 | #define PLAT_NR (PLAT_GROUP_NR * PLAT_VAL) | |
83 | ||
c9fb4c5b JA |
84 | struct submitter { |
85 | pthread_t thread; | |
f310970e | 86 | int ring_fd; |
ca8c91c5 | 87 | int enter_ring_fd; |
54319661 | 88 | int index; |
e31b8288 | 89 | struct io_sq_ring sq_ring; |
f0403f94 | 90 | struct io_uring_sqe *sqes; |
e31b8288 | 91 | struct io_cq_ring cq_ring; |
c9fb4c5b | 92 | int inflight; |
932131c9 | 93 | int tid; |
c9fb4c5b JA |
94 | unsigned long reaps; |
95 | unsigned long done; | |
96 | unsigned long calls; | |
97 | volatile int finish; | |
701d1277 | 98 | |
48e698fa JA |
99 | __s32 *fds; |
100 | ||
9eff5320 JA |
101 | struct taus258_state rand_state; |
102 | ||
932131c9 JA |
103 | unsigned long *clock_batch; |
104 | int clock_index; | |
105 | unsigned long *plat; | |
106 | ||
256714ea JA |
107 | #ifdef CONFIG_LIBAIO |
108 | io_context_t aio_ctx; | |
109 | #endif | |
110 | ||
4b9e13dc JA |
111 | int numa_node; |
112 | const char *filename; | |
113 | ||
a7086591 JA |
114 | struct file files[MAX_FDS]; |
115 | unsigned nr_files; | |
116 | unsigned cur_file; | |
e39863e3 | 117 | struct iovec iovecs[]; |
c9fb4c5b JA |
118 | }; |
119 | ||
e39863e3 | 120 | static struct submitter *submitter; |
c9fb4c5b | 121 | static volatile int finish; |
52479d8b | 122 | static int stats_running; |
16d25711 | 123 | static unsigned long max_iops; |
c409e4c2 | 124 | static long t_io_uring_page_size; |
c9fb4c5b | 125 | |
e39863e3 KB |
126 | static int depth = DEPTH; |
127 | static int batch_submit = BATCH_SUBMIT; | |
128 | static int batch_complete = BATCH_COMPLETE; | |
5bd526f2 | 129 | static int bs = BS; |
f0403f94 | 130 | static int polled = 1; /* use IO polling */ |
701d1277 | 131 | static int fixedbufs = 1; /* use fixed user buffers */ |
8c5fa755 | 132 | static int register_files = 1; /* use fixed files */ |
f0403f94 | 133 | static int buffered = 0; /* use buffered IO, not O_DIRECT */ |
3d7d00a3 JA |
134 | static int sq_thread_poll = 0; /* use kernel submission/poller thread */ |
135 | static int sq_thread_cpu = -1; /* pin above thread to this CPU */ | |
8025517d | 136 | static int do_nop = 0; /* no-op SQ ring commands */ |
54319661 | 137 | static int nthreads = 1; |
932131c9 | 138 | static int stats = 0; /* generate IO stats */ |
256714ea | 139 | static int aio = 0; /* use libaio */ |
beda9d8d JA |
140 | static int runtime = 0; /* runtime */ |
141 | static int random_io = 1; /* random or sequential IO */ | |
ca8c91c5 | 142 | static int register_ring = 1; /* register ring */ |
379406bc | 143 | static int use_sync = 0; /* use preadv2 */ |
4b9e13dc | 144 | static int numa_placement = 0; /* set to node of device */ |
7d04588a | 145 | static int pt = 0; /* passthrough I/O or not */ |
256714ea | 146 | |
932131c9 | 147 | static unsigned long tsc_rate; |
c9fb4c5b | 148 | |
203e4c26 JA |
149 | #define TSC_RATE_FILE "tsc-rate" |
150 | ||
84106576 | 151 | static int vectored = 1; |
b3915995 | 152 | |
932131c9 | 153 | static float plist[] = { 1.0, 5.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, |
ad45a465 | 154 | 80.0, 90.0, 95.0, 99.0, 99.5, 99.9, 99.95, 99.99 }; |
932131c9 JA |
155 | static int plist_len = 17; |
156 | ||
7d04588a AG |
157 | static int nvme_identify(int fd, __u32 nsid, enum nvme_identify_cns cns, |
158 | enum nvme_csi csi, void *data) | |
159 | { | |
160 | struct nvme_passthru_cmd cmd = { | |
161 | .opcode = nvme_admin_identify, | |
162 | .nsid = nsid, | |
163 | .addr = (__u64)(uintptr_t)data, | |
164 | .data_len = NVME_IDENTIFY_DATA_SIZE, | |
165 | .cdw10 = cns, | |
166 | .cdw11 = csi << NVME_IDENTIFY_CSI_SHIFT, | |
167 | .timeout_ms = NVME_DEFAULT_IOCTL_TIMEOUT, | |
168 | }; | |
169 | ||
170 | return ioctl(fd, NVME_IOCTL_ADMIN_CMD, &cmd); | |
171 | } | |
172 | ||
173 | static int nvme_get_info(int fd, __u32 *nsid, __u32 *lba_sz, __u64 *nlba) | |
174 | { | |
175 | struct nvme_id_ns ns; | |
176 | int namespace_id; | |
177 | int err; | |
178 | ||
179 | namespace_id = ioctl(fd, NVME_IOCTL_ID); | |
180 | if (namespace_id < 0) { | |
181 | fprintf(stderr, "error failed to fetch namespace-id\n"); | |
182 | close(fd); | |
183 | return -errno; | |
184 | } | |
185 | ||
186 | /* | |
187 | * Identify namespace to get namespace-id, namespace size in LBA's | |
188 | * and LBA data size. | |
189 | */ | |
190 | err = nvme_identify(fd, namespace_id, NVME_IDENTIFY_CNS_NS, | |
191 | NVME_CSI_NVM, &ns); | |
192 | if (err) { | |
193 | fprintf(stderr, "error failed to fetch identify namespace\n"); | |
194 | close(fd); | |
195 | return err; | |
196 | } | |
197 | ||
198 | *nsid = namespace_id; | |
199 | *lba_sz = 1 << ns.lbaf[(ns.flbas & 0x0f)].ds; | |
200 | *nlba = ns.nsze; | |
201 | ||
202 | return 0; | |
203 | } | |
204 | ||
932131c9 JA |
205 | static unsigned long cycles_to_nsec(unsigned long cycles) |
206 | { | |
207 | uint64_t val; | |
208 | ||
209 | if (!tsc_rate) | |
210 | return cycles; | |
211 | ||
212 | val = cycles * 1000000000ULL; | |
213 | return val / tsc_rate; | |
214 | } | |
215 | ||
216 | static unsigned long plat_idx_to_val(unsigned int idx) | |
217 | { | |
218 | unsigned int error_bits; | |
219 | unsigned long k, base; | |
220 | ||
221 | assert(idx < PLAT_NR); | |
222 | ||
223 | /* MSB <= (PLAT_BITS-1), cannot be rounded off. Use | |
224 | * all bits of the sample as index */ | |
225 | if (idx < (PLAT_VAL << 1)) | |
226 | return cycles_to_nsec(idx); | |
227 | ||
228 | /* Find the group and compute the minimum value of that group */ | |
229 | error_bits = (idx >> PLAT_BITS) - 1; | |
230 | base = ((unsigned long) 1) << (error_bits + PLAT_BITS); | |
231 | ||
232 | /* Find its bucket number of the group */ | |
233 | k = idx % PLAT_VAL; | |
234 | ||
235 | /* Return the mean of the range of the bucket */ | |
236 | return cycles_to_nsec(base + ((k + 0.5) * (1 << error_bits))); | |
237 | } | |
238 | ||
c409e4c2 AG |
239 | unsigned int calculate_clat_percentiles(unsigned long *io_u_plat, |
240 | unsigned long nr, unsigned long **output, | |
241 | unsigned long *maxv, unsigned long *minv) | |
932131c9 JA |
242 | { |
243 | unsigned long sum = 0; | |
244 | unsigned int len = plist_len, i, j = 0; | |
245 | unsigned long *ovals = NULL; | |
246 | bool is_last; | |
247 | ||
bb209d68 | 248 | *minv = -1UL; |
932131c9 JA |
249 | *maxv = 0; |
250 | ||
251 | ovals = malloc(len * sizeof(*ovals)); | |
252 | if (!ovals) | |
253 | return 0; | |
254 | ||
255 | /* | |
256 | * Calculate bucket values, note down max and min values | |
257 | */ | |
258 | is_last = false; | |
259 | for (i = 0; i < PLAT_NR && !is_last; i++) { | |
260 | sum += io_u_plat[i]; | |
261 | while (sum >= ((long double) plist[j] / 100.0 * nr)) { | |
262 | assert(plist[j] <= 100.0); | |
263 | ||
264 | ovals[j] = plat_idx_to_val(i); | |
265 | if (ovals[j] < *minv) | |
266 | *minv = ovals[j]; | |
267 | if (ovals[j] > *maxv) | |
268 | *maxv = ovals[j]; | |
269 | ||
270 | is_last = (j == len - 1) != 0; | |
271 | if (is_last) | |
272 | break; | |
273 | ||
274 | j++; | |
275 | } | |
276 | } | |
277 | ||
278 | if (!is_last) | |
279 | fprintf(stderr, "error calculating latency percentiles\n"); | |
280 | ||
281 | *output = ovals; | |
282 | return len; | |
283 | } | |
284 | ||
285 | static void show_clat_percentiles(unsigned long *io_u_plat, unsigned long nr, | |
286 | unsigned int precision) | |
287 | { | |
288 | unsigned int divisor, len, i, j = 0; | |
289 | unsigned long minv, maxv; | |
290 | unsigned long *ovals; | |
291 | int per_line, scale_down, time_width; | |
292 | bool is_last; | |
293 | char fmt[32]; | |
294 | ||
c409e4c2 | 295 | len = calculate_clat_percentiles(io_u_plat, nr, &ovals, &maxv, &minv); |
932131c9 JA |
296 | if (!len || !ovals) |
297 | goto out; | |
298 | ||
299 | if (!tsc_rate) { | |
300 | scale_down = 0; | |
301 | divisor = 1; | |
302 | printf(" percentiles (tsc ticks):\n |"); | |
303 | } else if (minv > 2000 && maxv > 99999) { | |
304 | scale_down = 1; | |
305 | divisor = 1000; | |
306 | printf(" percentiles (usec):\n |"); | |
307 | } else { | |
308 | scale_down = 0; | |
309 | divisor = 1; | |
310 | printf(" percentiles (nsec):\n |"); | |
311 | } | |
312 | ||
313 | time_width = max(5, (int) (log10(maxv / divisor) + 1)); | |
314 | snprintf(fmt, sizeof(fmt), " %%%u.%ufth=[%%%dllu]%%c", precision + 3, | |
315 | precision, time_width); | |
316 | /* fmt will be something like " %5.2fth=[%4llu]%c" */ | |
317 | per_line = (80 - 7) / (precision + 10 + time_width); | |
318 | ||
319 | for (j = 0; j < len; j++) { | |
320 | /* for formatting */ | |
321 | if (j != 0 && (j % per_line) == 0) | |
322 | printf(" |"); | |
323 | ||
324 | /* end of the list */ | |
325 | is_last = (j == len - 1) != 0; | |
326 | ||
327 | for (i = 0; i < scale_down; i++) | |
328 | ovals[j] = (ovals[j] + 999) / 1000; | |
329 | ||
330 | printf(fmt, plist[j], ovals[j], is_last ? '\n' : ','); | |
331 | ||
332 | if (is_last) | |
333 | break; | |
334 | ||
335 | if ((j % per_line) == per_line - 1) /* for formatting */ | |
336 | printf("\n"); | |
337 | } | |
338 | ||
339 | out: | |
340 | free(ovals); | |
341 | } | |
342 | ||
b65c1fc0 | 343 | #ifdef ARCH_HAVE_CPU_CLOCK |
932131c9 JA |
344 | static unsigned int plat_val_to_idx(unsigned long val) |
345 | { | |
346 | unsigned int msb, error_bits, base, offset, idx; | |
347 | ||
348 | /* Find MSB starting from bit 0 */ | |
349 | if (val == 0) | |
350 | msb = 0; | |
351 | else | |
352 | msb = (sizeof(val)*8) - __builtin_clzll(val) - 1; | |
353 | ||
354 | /* | |
355 | * MSB <= (PLAT_BITS-1), cannot be rounded off. Use | |
356 | * all bits of the sample as index | |
357 | */ | |
358 | if (msb <= PLAT_BITS) | |
359 | return val; | |
360 | ||
361 | /* Compute the number of error bits to discard*/ | |
362 | error_bits = msb - PLAT_BITS; | |
363 | ||
364 | /* Compute the number of buckets before the group */ | |
365 | base = (error_bits + 1) << PLAT_BITS; | |
366 | ||
367 | /* | |
368 | * Discard the error bits and apply the mask to find the | |
369 | * index for the buckets in the group | |
370 | */ | |
371 | offset = (PLAT_VAL - 1) & (val >> error_bits); | |
372 | ||
373 | /* Make sure the index does not exceed (array size - 1) */ | |
374 | idx = (base + offset) < (PLAT_NR - 1) ? | |
375 | (base + offset) : (PLAT_NR - 1); | |
376 | ||
377 | return idx; | |
378 | } | |
b65c1fc0 | 379 | #endif |
932131c9 JA |
380 | |
381 | static void add_stat(struct submitter *s, int clock_index, int nr) | |
382 | { | |
383 | #ifdef ARCH_HAVE_CPU_CLOCK | |
384 | unsigned long cycles; | |
385 | unsigned int pidx; | |
386 | ||
265697fc JA |
387 | if (!s->finish && clock_index) { |
388 | cycles = get_cpu_clock(); | |
389 | cycles -= s->clock_batch[clock_index]; | |
390 | pidx = plat_val_to_idx(cycles); | |
391 | s->plat[pidx] += nr; | |
392 | } | |
932131c9 JA |
393 | #endif |
394 | } | |
395 | ||
2ea53ca3 | 396 | static int io_uring_register_buffers(struct submitter *s) |
c9fb4c5b | 397 | { |
8025517d JA |
398 | if (do_nop) |
399 | return 0; | |
400 | ||
bfed648c | 401 | return syscall(__NR_io_uring_register, s->ring_fd, |
55845033 | 402 | IORING_REGISTER_BUFFERS, s->iovecs, roundup_pow2(depth)); |
2ea53ca3 JA |
403 | } |
404 | ||
a7abc9fb JA |
405 | static int io_uring_register_files(struct submitter *s) |
406 | { | |
48e698fa | 407 | int i; |
a7abc9fb | 408 | |
8025517d JA |
409 | if (do_nop) |
410 | return 0; | |
411 | ||
48e698fa JA |
412 | s->fds = calloc(s->nr_files, sizeof(__s32)); |
413 | for (i = 0; i < s->nr_files; i++) { | |
414 | s->fds[i] = s->files[i].real_fd; | |
415 | s->files[i].fixed_fd = i; | |
416 | } | |
a7abc9fb | 417 | |
bfed648c | 418 | return syscall(__NR_io_uring_register, s->ring_fd, |
919850d2 | 419 | IORING_REGISTER_FILES, s->fds, s->nr_files); |
a7abc9fb JA |
420 | } |
421 | ||
2ea53ca3 JA |
422 | static int io_uring_setup(unsigned entries, struct io_uring_params *p) |
423 | { | |
2be18f6b JA |
424 | int ret; |
425 | ||
1db268db JA |
426 | /* |
427 | * Clamp CQ ring size at our SQ ring size, we don't need more entries | |
428 | * than that. | |
429 | */ | |
430 | p->flags |= IORING_SETUP_CQSIZE; | |
431 | p->cq_entries = entries; | |
432 | ||
2be18f6b JA |
433 | p->flags |= IORING_SETUP_COOP_TASKRUN; |
434 | p->flags |= IORING_SETUP_SINGLE_ISSUER; | |
435 | p->flags |= IORING_SETUP_DEFER_TASKRUN; | |
436 | retry: | |
437 | ret = syscall(__NR_io_uring_setup, entries, p); | |
438 | if (!ret) | |
439 | return 0; | |
440 | ||
441 | if (errno == EINVAL && p->flags & IORING_SETUP_COOP_TASKRUN) { | |
442 | p->flags &= ~IORING_SETUP_COOP_TASKRUN; | |
443 | goto retry; | |
444 | } | |
445 | if (errno == EINVAL && p->flags & IORING_SETUP_SINGLE_ISSUER) { | |
446 | p->flags &= ~IORING_SETUP_SINGLE_ISSUER; | |
447 | goto retry; | |
448 | } | |
449 | if (errno == EINVAL && p->flags & IORING_SETUP_DEFER_TASKRUN) { | |
450 | p->flags &= ~IORING_SETUP_DEFER_TASKRUN; | |
451 | goto retry; | |
452 | } | |
453 | ||
454 | return ret; | |
c9fb4c5b JA |
455 | } |
456 | ||
b3915995 JA |
457 | static void io_uring_probe(int fd) |
458 | { | |
459 | struct io_uring_probe *p; | |
460 | int ret; | |
461 | ||
223decdd | 462 | p = calloc(1, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op)); |
b3915995 JA |
463 | if (!p) |
464 | return; | |
465 | ||
b3915995 JA |
466 | ret = syscall(__NR_io_uring_register, fd, IORING_REGISTER_PROBE, p, 256); |
467 | if (ret < 0) | |
468 | goto out; | |
469 | ||
470 | if (IORING_OP_READ > p->ops_len) | |
471 | goto out; | |
472 | ||
473 | if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED)) | |
84106576 | 474 | vectored = 0; |
b3915995 JA |
475 | out: |
476 | free(p); | |
477 | } | |
478 | ||
c3e2fc25 JA |
479 | static int io_uring_enter(struct submitter *s, unsigned int to_submit, |
480 | unsigned int min_complete, unsigned int flags) | |
c9fb4c5b | 481 | { |
ca8c91c5 JA |
482 | if (register_ring) |
483 | flags |= IORING_ENTER_REGISTERED_RING; | |
c377f4f8 | 484 | #ifdef FIO_ARCH_HAS_SYSCALL |
ca8c91c5 | 485 | return __do_syscall6(__NR_io_uring_enter, s->enter_ring_fd, to_submit, |
c377f4f8 JA |
486 | min_complete, flags, NULL, 0); |
487 | #else | |
ca8c91c5 JA |
488 | return syscall(__NR_io_uring_enter, s->enter_ring_fd, to_submit, |
489 | min_complete, flags, NULL, 0); | |
c377f4f8 | 490 | #endif |
c9fb4c5b JA |
491 | } |
492 | ||
701d1277 JA |
493 | static unsigned file_depth(struct submitter *s) |
494 | { | |
e39863e3 | 495 | return (depth + s->nr_files - 1) / s->nr_files; |
701d1277 JA |
496 | } |
497 | ||
501565a1 JA |
498 | static unsigned long long get_offset(struct submitter *s, struct file *f) |
499 | { | |
500 | unsigned long long offset; | |
501 | long r; | |
502 | ||
503 | if (random_io) { | |
4d9521fd JA |
504 | unsigned long long block; |
505 | ||
501565a1 | 506 | r = __rand64(&s->rand_state); |
4d9521fd JA |
507 | block = r % f->max_blocks; |
508 | offset = block * (unsigned long long) bs; | |
501565a1 JA |
509 | } else { |
510 | offset = f->cur_off; | |
511 | f->cur_off += bs; | |
512 | if (f->cur_off + bs > f->max_size) | |
513 | f->cur_off = 0; | |
514 | } | |
515 | ||
516 | return offset; | |
517 | } | |
518 | ||
4ad09b56 | 519 | static struct file *init_new_io(struct submitter *s) |
c9fb4c5b | 520 | { |
a7086591 | 521 | struct file *f; |
c9fb4c5b | 522 | |
701d1277 JA |
523 | if (s->nr_files == 1) { |
524 | f = &s->files[0]; | |
525 | } else { | |
526 | f = &s->files[s->cur_file]; | |
527 | if (f->pending_ios >= file_depth(s)) { | |
528 | s->cur_file++; | |
529 | if (s->cur_file == s->nr_files) | |
530 | s->cur_file = 0; | |
93d1811c | 531 | f = &s->files[s->cur_file]; |
701d1277 JA |
532 | } |
533 | } | |
4ad09b56 | 534 | |
701d1277 | 535 | f->pending_ios++; |
4ad09b56 JA |
536 | return f; |
537 | } | |
538 | ||
539 | static void init_io(struct submitter *s, unsigned index) | |
540 | { | |
541 | struct io_uring_sqe *sqe = &s->sqes[index]; | |
542 | struct file *f; | |
543 | ||
544 | if (do_nop) { | |
545 | sqe->opcode = IORING_OP_NOP; | |
546 | return; | |
547 | } | |
548 | ||
549 | f = init_new_io(s); | |
a7086591 | 550 | |
8c5fa755 JA |
551 | if (register_files) { |
552 | sqe->flags = IOSQE_FIXED_FILE; | |
553 | sqe->fd = f->fixed_fd; | |
554 | } else { | |
555 | sqe->flags = 0; | |
556 | sqe->fd = f->real_fd; | |
557 | } | |
f0403f94 | 558 | if (fixedbufs) { |
48e698fa | 559 | sqe->opcode = IORING_OP_READ_FIXED; |
919850d2 | 560 | sqe->addr = (unsigned long) s->iovecs[index].iov_base; |
5bd526f2 | 561 | sqe->len = bs; |
2ea53ca3 | 562 | sqe->buf_index = index; |
84106576 | 563 | } else if (!vectored) { |
b3915995 JA |
564 | sqe->opcode = IORING_OP_READ; |
565 | sqe->addr = (unsigned long) s->iovecs[index].iov_base; | |
566 | sqe->len = bs; | |
567 | sqe->buf_index = 0; | |
84106576 JA |
568 | } else { |
569 | sqe->opcode = IORING_OP_READV; | |
570 | sqe->addr = (unsigned long) &s->iovecs[index]; | |
571 | sqe->len = 1; | |
572 | sqe->buf_index = 0; | |
f0403f94 | 573 | } |
f0403f94 | 574 | sqe->ioprio = 0; |
501565a1 | 575 | sqe->off = get_offset(s, f); |
932131c9 | 576 | sqe->user_data = (unsigned long) f->fileno; |
52479d8b | 577 | if (stats && stats_running) |
bb209d68 | 578 | sqe->user_data |= ((uint64_t)s->clock_index << 32); |
c9fb4c5b JA |
579 | } |
580 | ||
7d04588a AG |
581 | static void init_io_pt(struct submitter *s, unsigned index) |
582 | { | |
583 | struct io_uring_sqe *sqe = &s->sqes[index << 1]; | |
584 | unsigned long offset; | |
585 | struct file *f; | |
586 | struct nvme_uring_cmd *cmd; | |
587 | unsigned long long slba; | |
588 | unsigned long long nlb; | |
7d04588a | 589 | |
4ad09b56 | 590 | f = init_new_io(s); |
7d04588a | 591 | |
bb2d963c | 592 | offset = get_offset(s, f); |
7d04588a AG |
593 | |
594 | if (register_files) { | |
595 | sqe->fd = f->fixed_fd; | |
596 | sqe->flags = IOSQE_FIXED_FILE; | |
597 | } else { | |
598 | sqe->fd = f->real_fd; | |
599 | sqe->flags = 0; | |
600 | } | |
601 | sqe->opcode = IORING_OP_URING_CMD; | |
602 | sqe->user_data = (unsigned long) f->fileno; | |
603 | if (stats) | |
9ce84fbd | 604 | sqe->user_data |= ((__u64) s->clock_index << 32ULL); |
7d04588a AG |
605 | sqe->cmd_op = NVME_URING_CMD_IO; |
606 | slba = offset >> f->lba_shift; | |
607 | nlb = (bs >> f->lba_shift) - 1; | |
608 | cmd = (struct nvme_uring_cmd *)&sqe->cmd; | |
609 | /* cdw10 and cdw11 represent starting slba*/ | |
610 | cmd->cdw10 = slba & 0xffffffff; | |
611 | cmd->cdw11 = slba >> 32; | |
612 | /* cdw12 represent number of lba to be read*/ | |
613 | cmd->cdw12 = nlb; | |
614 | cmd->addr = (unsigned long) s->iovecs[index].iov_base; | |
615 | cmd->data_len = bs; | |
021ce718 JA |
616 | if (fixedbufs) { |
617 | sqe->uring_cmd_flags = IORING_URING_CMD_FIXED; | |
618 | sqe->buf_index = index; | |
619 | } | |
7d04588a AG |
620 | cmd->nsid = f->nsid; |
621 | cmd->opcode = 2; | |
622 | } | |
623 | ||
256714ea | 624 | static int prep_more_ios_uring(struct submitter *s, int max_ios) |
c9fb4c5b | 625 | { |
e31b8288 | 626 | struct io_sq_ring *ring = &s->sq_ring; |
c5f0a205 JA |
627 | unsigned head, index, tail, next_tail, prepped = 0; |
628 | ||
629 | if (sq_thread_poll) | |
630 | head = atomic_load_acquire(ring->head); | |
631 | else | |
632 | head = *ring->head; | |
c9fb4c5b | 633 | |
c3e2fc25 | 634 | next_tail = tail = *ring->tail; |
c9fb4c5b JA |
635 | do { |
636 | next_tail++; | |
6b6f52b9 | 637 | if (next_tail == head) |
c9fb4c5b JA |
638 | break; |
639 | ||
e39c34dc | 640 | index = tail & sq_ring_mask; |
7d04588a AG |
641 | if (pt) |
642 | init_io_pt(s, index); | |
643 | else | |
644 | init_io(s, index); | |
c9fb4c5b JA |
645 | prepped++; |
646 | tail = next_tail; | |
647 | } while (prepped < max_ios); | |
648 | ||
fc2dc21b JA |
649 | if (prepped) |
650 | atomic_store_release(ring->tail, tail); | |
c9fb4c5b JA |
651 | return prepped; |
652 | } | |
653 | ||
a7086591 | 654 | static int get_file_size(struct file *f) |
c9fb4c5b JA |
655 | { |
656 | struct stat st; | |
657 | ||
48e698fa | 658 | if (fstat(f->real_fd, &st) < 0) |
c9fb4c5b | 659 | return -1; |
7d04588a AG |
660 | if (pt) { |
661 | __u64 nlba; | |
662 | __u32 lbs; | |
663 | int ret; | |
664 | ||
665 | if (!S_ISCHR(st.st_mode)) { | |
666 | fprintf(stderr, "passthrough works with only nvme-ns " | |
667 | "generic devices (/dev/ngXnY)\n"); | |
668 | return -1; | |
669 | } | |
670 | ret = nvme_get_info(f->real_fd, &f->nsid, &lbs, &nlba); | |
671 | if (ret) | |
672 | return -1; | |
673 | if ((bs % lbs) != 0) { | |
674 | printf("error: bs:%d should be a multiple logical_block_size:%d\n", | |
675 | bs, lbs); | |
676 | return -1; | |
677 | } | |
e2a4a77e | 678 | f->max_blocks = nlba; |
7d04588a AG |
679 | f->max_size = nlba; |
680 | f->lba_shift = ilog2(lbs); | |
681 | return 0; | |
682 | } else if (S_ISBLK(st.st_mode)) { | |
c9fb4c5b JA |
683 | unsigned long long bytes; |
684 | ||
48e698fa | 685 | if (ioctl(f->real_fd, BLKGETSIZE64, &bytes) != 0) |
c9fb4c5b JA |
686 | return -1; |
687 | ||
5bd526f2 | 688 | f->max_blocks = bytes / bs; |
beda9d8d | 689 | f->max_size = bytes; |
c9fb4c5b JA |
690 | return 0; |
691 | } else if (S_ISREG(st.st_mode)) { | |
5bd526f2 | 692 | f->max_blocks = st.st_size / bs; |
beda9d8d | 693 | f->max_size = st.st_size; |
c9fb4c5b JA |
694 | return 0; |
695 | } | |
696 | ||
697 | return -1; | |
698 | } | |
699 | ||
256714ea | 700 | static int reap_events_uring(struct submitter *s) |
c9fb4c5b | 701 | { |
e31b8288 | 702 | struct io_cq_ring *ring = &s->cq_ring; |
f0403f94 | 703 | struct io_uring_cqe *cqe; |
e2239016 | 704 | unsigned head, reaped = 0; |
ab85494f | 705 | int last_idx = -1, stat_nr = 0; |
c9fb4c5b | 706 | |
c3e2fc25 | 707 | head = *ring->head; |
c9fb4c5b | 708 | do { |
701d1277 JA |
709 | struct file *f; |
710 | ||
fc2dc21b | 711 | if (head == atomic_load_acquire(ring->tail)) |
c9fb4c5b | 712 | break; |
f0403f94 | 713 | cqe = &ring->cqes[head & cq_ring_mask]; |
8025517d | 714 | if (!do_nop) { |
932131c9 JA |
715 | int fileno = cqe->user_data & 0xffffffff; |
716 | ||
717 | f = &s->files[fileno]; | |
8025517d | 718 | f->pending_ios--; |
5bd526f2 | 719 | if (cqe->res != bs) { |
8025517d | 720 | printf("io: unexpected ret=%d\n", cqe->res); |
154a9582 | 721 | if (polled && cqe->res == -EOPNOTSUPP) |
8066f6b6 | 722 | printf("Your filesystem/driver/kernel doesn't support polled IO\n"); |
8025517d JA |
723 | return -1; |
724 | } | |
c9fb4c5b | 725 | } |
932131c9 JA |
726 | if (stats) { |
727 | int clock_index = cqe->user_data >> 32; | |
728 | ||
ab85494f JA |
729 | if (last_idx != clock_index) { |
730 | if (last_idx != -1) { | |
731 | add_stat(s, last_idx, stat_nr); | |
732 | stat_nr = 0; | |
733 | } | |
734 | last_idx = clock_index; | |
d4af2ece JA |
735 | } |
736 | stat_nr++; | |
932131c9 | 737 | } |
c9fb4c5b JA |
738 | reaped++; |
739 | head++; | |
c9fb4c5b JA |
740 | } while (1); |
741 | ||
ab85494f JA |
742 | if (stat_nr) |
743 | add_stat(s, last_idx, stat_nr); | |
744 | ||
fc2dc21b JA |
745 | if (reaped) { |
746 | s->inflight -= reaped; | |
747 | atomic_store_release(ring->head, head); | |
748 | } | |
c9fb4c5b JA |
749 | return reaped; |
750 | } | |
751 | ||
7d04588a AG |
752 | static int reap_events_uring_pt(struct submitter *s) |
753 | { | |
754 | struct io_cq_ring *ring = &s->cq_ring; | |
755 | struct io_uring_cqe *cqe; | |
756 | unsigned head, reaped = 0; | |
757 | int last_idx = -1, stat_nr = 0; | |
758 | unsigned index; | |
759 | int fileno; | |
760 | ||
761 | head = *ring->head; | |
762 | do { | |
763 | struct file *f; | |
764 | ||
7d04588a AG |
765 | if (head == atomic_load_acquire(ring->tail)) |
766 | break; | |
767 | index = head & cq_ring_mask; | |
768 | cqe = &ring->cqes[index << 1]; | |
769 | fileno = cqe->user_data & 0xffffffff; | |
770 | f = &s->files[fileno]; | |
771 | f->pending_ios--; | |
772 | ||
773 | if (cqe->res != 0) { | |
774 | printf("io: unexpected ret=%d\n", cqe->res); | |
775 | if (polled && cqe->res == -EINVAL) | |
776 | printf("passthrough doesn't support polled IO\n"); | |
777 | return -1; | |
778 | } | |
779 | if (stats) { | |
780 | int clock_index = cqe->user_data >> 32; | |
781 | ||
782 | if (last_idx != clock_index) { | |
783 | if (last_idx != -1) { | |
784 | add_stat(s, last_idx, stat_nr); | |
785 | stat_nr = 0; | |
786 | } | |
787 | last_idx = clock_index; | |
788 | } | |
789 | stat_nr++; | |
790 | } | |
791 | reaped++; | |
792 | head++; | |
793 | } while (1); | |
794 | ||
795 | if (stat_nr) | |
796 | add_stat(s, last_idx, stat_nr); | |
797 | ||
798 | if (reaped) { | |
799 | s->inflight -= reaped; | |
800 | atomic_store_release(ring->head, head); | |
801 | } | |
802 | return reaped; | |
803 | } | |
804 | ||
4b9e13dc JA |
805 | static void set_affinity(struct submitter *s) |
806 | { | |
807 | #ifdef CONFIG_LIBNUMA | |
808 | struct bitmask *mask; | |
809 | ||
810 | if (s->numa_node == -1) | |
811 | return; | |
812 | ||
813 | numa_set_preferred(s->numa_node); | |
814 | ||
815 | mask = numa_allocate_cpumask(); | |
816 | numa_node_to_cpus(s->numa_node, mask); | |
817 | numa_sched_setaffinity(s->tid, mask); | |
818 | #endif | |
819 | } | |
820 | ||
821 | static int detect_node(struct submitter *s, const char *name) | |
822 | { | |
823 | #ifdef CONFIG_LIBNUMA | |
824 | const char *base = basename(name); | |
825 | char str[128]; | |
826 | int ret, fd, node; | |
827 | ||
cc791a99 JA |
828 | if (pt) |
829 | sprintf(str, "/sys/class/nvme-generic/%s/device/numa_node", base); | |
830 | else | |
831 | sprintf(str, "/sys/block/%s/device/numa_node", base); | |
4b9e13dc JA |
832 | fd = open(str, O_RDONLY); |
833 | if (fd < 0) | |
834 | return -1; | |
835 | ||
836 | ret = read(fd, str, sizeof(str)); | |
837 | if (ret < 0) { | |
838 | close(fd); | |
839 | return -1; | |
840 | } | |
841 | node = atoi(str); | |
842 | s->numa_node = node; | |
843 | close(fd); | |
844 | #else | |
845 | s->numa_node = -1; | |
846 | #endif | |
847 | return 0; | |
848 | } | |
849 | ||
850 | static int setup_aio(struct submitter *s) | |
851 | { | |
852 | #ifdef CONFIG_LIBAIO | |
853 | if (polled) { | |
854 | fprintf(stderr, "aio does not support polled IO\n"); | |
855 | polled = 0; | |
856 | } | |
857 | if (sq_thread_poll) { | |
858 | fprintf(stderr, "aio does not support SQPOLL IO\n"); | |
859 | sq_thread_poll = 0; | |
860 | } | |
861 | if (do_nop) { | |
862 | fprintf(stderr, "aio does not support polled IO\n"); | |
863 | do_nop = 0; | |
864 | } | |
865 | if (fixedbufs || register_files) { | |
866 | fprintf(stderr, "aio does not support registered files or buffers\n"); | |
867 | fixedbufs = register_files = 0; | |
868 | } | |
869 | ||
870 | return io_queue_init(roundup_pow2(depth), &s->aio_ctx); | |
871 | #else | |
872 | fprintf(stderr, "Legacy AIO not available on this system/build\n"); | |
873 | errno = EINVAL; | |
874 | return -1; | |
875 | #endif | |
876 | } | |
877 | ||
878 | static int setup_ring(struct submitter *s) | |
879 | { | |
880 | struct io_sq_ring *sring = &s->sq_ring; | |
881 | struct io_cq_ring *cring = &s->cq_ring; | |
882 | struct io_uring_params p; | |
6b6f52b9 | 883 | int ret, fd, i; |
4b9e13dc | 884 | void *ptr; |
7d04588a | 885 | size_t len; |
4b9e13dc JA |
886 | |
887 | memset(&p, 0, sizeof(p)); | |
888 | ||
889 | if (polled && !do_nop) | |
890 | p.flags |= IORING_SETUP_IOPOLL; | |
891 | if (sq_thread_poll) { | |
892 | p.flags |= IORING_SETUP_SQPOLL; | |
893 | if (sq_thread_cpu != -1) { | |
894 | p.flags |= IORING_SETUP_SQ_AFF; | |
895 | p.sq_thread_cpu = sq_thread_cpu; | |
896 | } | |
897 | } | |
7d04588a AG |
898 | if (pt) { |
899 | p.flags |= IORING_SETUP_SQE128; | |
900 | p.flags |= IORING_SETUP_CQE32; | |
901 | } | |
4b9e13dc JA |
902 | |
903 | fd = io_uring_setup(depth, &p); | |
904 | if (fd < 0) { | |
905 | perror("io_uring_setup"); | |
906 | return 1; | |
907 | } | |
908 | s->ring_fd = s->enter_ring_fd = fd; | |
909 | ||
910 | io_uring_probe(fd); | |
911 | ||
912 | if (fixedbufs) { | |
913 | struct rlimit rlim; | |
914 | ||
915 | rlim.rlim_cur = RLIM_INFINITY; | |
916 | rlim.rlim_max = RLIM_INFINITY; | |
917 | /* ignore potential error, not needed on newer kernels */ | |
918 | setrlimit(RLIMIT_MEMLOCK, &rlim); | |
919 | ||
920 | ret = io_uring_register_buffers(s); | |
921 | if (ret < 0) { | |
922 | perror("io_uring_register_buffers"); | |
923 | return 1; | |
924 | } | |
4b9e13dc JA |
925 | } |
926 | ||
927 | if (register_files) { | |
928 | ret = io_uring_register_files(s); | |
929 | if (ret < 0) { | |
930 | perror("io_uring_register_files"); | |
931 | return 1; | |
932 | } | |
933 | } | |
934 | ||
935 | ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(__u32), | |
936 | PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, | |
937 | IORING_OFF_SQ_RING); | |
938 | sring->head = ptr + p.sq_off.head; | |
939 | sring->tail = ptr + p.sq_off.tail; | |
940 | sring->ring_mask = ptr + p.sq_off.ring_mask; | |
941 | sring->ring_entries = ptr + p.sq_off.ring_entries; | |
942 | sring->flags = ptr + p.sq_off.flags; | |
943 | sring->array = ptr + p.sq_off.array; | |
944 | sq_ring_mask = *sring->ring_mask; | |
945 | ||
7d04588a AG |
946 | if (p.flags & IORING_SETUP_SQE128) |
947 | len = 2 * p.sq_entries * sizeof(struct io_uring_sqe); | |
948 | else | |
949 | len = p.sq_entries * sizeof(struct io_uring_sqe); | |
950 | s->sqes = mmap(0, len, | |
4b9e13dc JA |
951 | PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, |
952 | IORING_OFF_SQES); | |
953 | ||
7d04588a AG |
954 | if (p.flags & IORING_SETUP_CQE32) { |
955 | len = p.cq_off.cqes + | |
956 | 2 * p.cq_entries * sizeof(struct io_uring_cqe); | |
957 | } else { | |
958 | len = p.cq_off.cqes + | |
959 | p.cq_entries * sizeof(struct io_uring_cqe); | |
960 | } | |
961 | ptr = mmap(0, len, | |
4b9e13dc JA |
962 | PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, |
963 | IORING_OFF_CQ_RING); | |
964 | cring->head = ptr + p.cq_off.head; | |
965 | cring->tail = ptr + p.cq_off.tail; | |
966 | cring->ring_mask = ptr + p.cq_off.ring_mask; | |
967 | cring->ring_entries = ptr + p.cq_off.ring_entries; | |
968 | cring->cqes = ptr + p.cq_off.cqes; | |
969 | cq_ring_mask = *cring->ring_mask; | |
6b6f52b9 JA |
970 | |
971 | for (i = 0; i < p.sq_entries; i++) | |
972 | sring->array[i] = i; | |
973 | ||
4b9e13dc JA |
974 | return 0; |
975 | } | |
976 | ||
977 | static void *allocate_mem(struct submitter *s, int size) | |
978 | { | |
979 | void *buf; | |
980 | ||
981 | #ifdef CONFIG_LIBNUMA | |
982 | if (s->numa_node != -1) | |
983 | return numa_alloc_onnode(size, s->numa_node); | |
984 | #endif | |
985 | ||
c409e4c2 | 986 | if (posix_memalign(&buf, t_io_uring_page_size, bs)) { |
4b9e13dc JA |
987 | printf("failed alloc\n"); |
988 | return NULL; | |
989 | } | |
990 | ||
991 | return buf; | |
992 | } | |
993 | ||
256714ea | 994 | static int submitter_init(struct submitter *s) |
c9fb4c5b | 995 | { |
4b9e13dc JA |
996 | int i, nr_batch, err; |
997 | static int init_printed; | |
998 | char buf[80]; | |
932131c9 | 999 | s->tid = gettid(); |
4b9e13dc JA |
1000 | printf("submitter=%d, tid=%d, file=%s, node=%d\n", s->index, s->tid, |
1001 | s->filename, s->numa_node); | |
1002 | ||
1003 | set_affinity(s); | |
c9fb4c5b | 1004 | |
6243766b KR |
1005 | __init_rand64(&s->rand_state, s->tid); |
1006 | srand48(s->tid); | |
c9fb4c5b | 1007 | |
932131c9 JA |
1008 | for (i = 0; i < MAX_FDS; i++) |
1009 | s->files[i].fileno = i; | |
1010 | ||
4b9e13dc JA |
1011 | for (i = 0; i < roundup_pow2(depth); i++) { |
1012 | void *buf; | |
1013 | ||
1014 | buf = allocate_mem(s, bs); | |
1015 | if (!buf) | |
ae7ea7a4 | 1016 | return -1; |
4b9e13dc JA |
1017 | s->iovecs[i].iov_base = buf; |
1018 | s->iovecs[i].iov_len = bs; | |
1019 | } | |
1020 | ||
1021 | if (use_sync) { | |
1022 | sprintf(buf, "Engine=preadv2\n"); | |
1023 | err = 0; | |
1024 | } else if (!aio) { | |
1025 | err = setup_ring(s); | |
bea469e8 AG |
1026 | if (!err) |
1027 | sprintf(buf, "Engine=io_uring, sq_ring=%d, cq_ring=%d\n", *s->sq_ring.ring_entries, *s->cq_ring.ring_entries); | |
4b9e13dc JA |
1028 | } else { |
1029 | sprintf(buf, "Engine=aio\n"); | |
1030 | err = setup_aio(s); | |
1031 | } | |
1032 | if (err) { | |
1033 | printf("queue setup failed: %s, %d\n", strerror(errno), err); | |
ae7ea7a4 | 1034 | return -1; |
4b9e13dc JA |
1035 | } |
1036 | ||
1037 | if (!init_printed) { | |
4502ad2c | 1038 | printf("polled=%d, fixedbufs=%d, register_files=%d, buffered=%d, QD=%d\n", polled, fixedbufs, register_files, buffered, depth); |
4b9e13dc JA |
1039 | printf("%s", buf); |
1040 | init_printed = 1; | |
1041 | } | |
1042 | ||
932131c9 JA |
1043 | if (stats) { |
1044 | nr_batch = roundup_pow2(depth / batch_submit); | |
d4af2ece JA |
1045 | if (nr_batch < 2) |
1046 | nr_batch = 2; | |
932131c9 | 1047 | s->clock_batch = calloc(nr_batch, sizeof(unsigned long)); |
52479d8b | 1048 | s->clock_index = 1; |
932131c9 JA |
1049 | |
1050 | s->plat = calloc(PLAT_NR, sizeof(unsigned long)); | |
1051 | } else { | |
1052 | s->clock_batch = NULL; | |
1053 | s->plat = NULL; | |
1054 | nr_batch = 0; | |
1055 | } | |
7d04588a AG |
1056 | /* perform the expensive command initialization part for passthrough here |
1057 | * rather than in the fast path | |
1058 | */ | |
1059 | if (pt) { | |
1060 | for (i = 0; i < roundup_pow2(depth); i++) { | |
1061 | struct io_uring_sqe *sqe = &s->sqes[i << 1]; | |
932131c9 | 1062 | |
7d04588a AG |
1063 | memset(&sqe->cmd, 0, sizeof(struct nvme_uring_cmd)); |
1064 | } | |
1065 | } | |
256714ea JA |
1066 | return nr_batch; |
1067 | } | |
1068 | ||
1069 | #ifdef CONFIG_LIBAIO | |
1070 | static int prep_more_ios_aio(struct submitter *s, int max_ios, struct iocb *iocbs) | |
1071 | { | |
8310c570 | 1072 | uint64_t data; |
256714ea JA |
1073 | struct file *f; |
1074 | unsigned index; | |
256714ea JA |
1075 | |
1076 | index = 0; | |
1077 | while (index < max_ios) { | |
1078 | struct iocb *iocb = &iocbs[index]; | |
1079 | ||
4ad09b56 | 1080 | f = init_new_io(s); |
256714ea | 1081 | |
256714ea | 1082 | io_prep_pread(iocb, f->real_fd, s->iovecs[index].iov_base, |
501565a1 | 1083 | s->iovecs[index].iov_len, get_offset(s, f)); |
256714ea JA |
1084 | |
1085 | data = f->fileno; | |
52479d8b | 1086 | if (stats && stats_running) |
8310c570 | 1087 | data |= (((uint64_t) s->clock_index) << 32); |
256714ea JA |
1088 | iocb->data = (void *) (uintptr_t) data; |
1089 | index++; | |
1090 | } | |
1091 | return index; | |
1092 | } | |
1093 | ||
1094 | static int reap_events_aio(struct submitter *s, struct io_event *events, int evs) | |
1095 | { | |
1096 | int last_idx = -1, stat_nr = 0; | |
1097 | int reaped = 0; | |
1098 | ||
1099 | while (evs) { | |
8310c570 | 1100 | uint64_t data = (uintptr_t) events[reaped].data; |
256714ea JA |
1101 | struct file *f = &s->files[data & 0xffffffff]; |
1102 | ||
1103 | f->pending_ios--; | |
1104 | if (events[reaped].res != bs) { | |
1105 | printf("io: unexpected ret=%ld\n", events[reaped].res); | |
1106 | return -1; | |
1107 | } | |
1108 | if (stats) { | |
1109 | int clock_index = data >> 32; | |
1110 | ||
1111 | if (last_idx != clock_index) { | |
1112 | if (last_idx != -1) { | |
1113 | add_stat(s, last_idx, stat_nr); | |
1114 | stat_nr = 0; | |
1115 | } | |
1116 | last_idx = clock_index; | |
d4af2ece JA |
1117 | } |
1118 | stat_nr++; | |
256714ea JA |
1119 | } |
1120 | reaped++; | |
1121 | evs--; | |
1122 | } | |
1123 | ||
1124 | if (stat_nr) | |
1125 | add_stat(s, last_idx, stat_nr); | |
1126 | ||
1127 | s->inflight -= reaped; | |
1128 | s->done += reaped; | |
1129 | return reaped; | |
1130 | } | |
1131 | ||
1132 | static void *submitter_aio_fn(void *data) | |
1133 | { | |
1134 | struct submitter *s = data; | |
71989c1b | 1135 | int i, ret, prepped; |
256714ea JA |
1136 | struct iocb **iocbsptr; |
1137 | struct iocb *iocbs; | |
1138 | struct io_event *events; | |
71989c1b | 1139 | #ifdef ARCH_HAVE_CPU_CLOCK |
ae7ea7a4 JA |
1140 | int nr_batch; |
1141 | #endif | |
1142 | ||
1143 | ret = submitter_init(s); | |
1144 | if (ret < 0) | |
1145 | goto done; | |
1146 | ||
1147 | #ifdef ARCH_HAVE_CPU_CLOCK | |
1148 | nr_batch = ret; | |
71989c1b | 1149 | #endif |
256714ea JA |
1150 | |
1151 | iocbsptr = calloc(depth, sizeof(struct iocb *)); | |
1152 | iocbs = calloc(depth, sizeof(struct iocb)); | |
1153 | events = calloc(depth, sizeof(struct io_event)); | |
1154 | ||
1155 | for (i = 0; i < depth; i++) | |
1156 | iocbsptr[i] = &iocbs[i]; | |
1157 | ||
1158 | prepped = 0; | |
1159 | do { | |
1160 | int to_wait, to_submit, to_prep; | |
1161 | ||
1162 | if (!prepped && s->inflight < depth) { | |
1163 | to_prep = min(depth - s->inflight, batch_submit); | |
1164 | prepped = prep_more_ios_aio(s, to_prep, iocbs); | |
1165 | #ifdef ARCH_HAVE_CPU_CLOCK | |
1166 | if (prepped && stats) { | |
1167 | s->clock_batch[s->clock_index] = get_cpu_clock(); | |
1168 | s->clock_index = (s->clock_index + 1) & (nr_batch - 1); | |
1169 | } | |
1170 | #endif | |
1171 | } | |
1172 | s->inflight += prepped; | |
1173 | to_submit = prepped; | |
1174 | ||
1175 | if (to_submit && (s->inflight + to_submit <= depth)) | |
1176 | to_wait = 0; | |
1177 | else | |
1178 | to_wait = min(s->inflight + to_submit, batch_complete); | |
1179 | ||
1180 | ret = io_submit(s->aio_ctx, to_submit, iocbsptr); | |
1181 | s->calls++; | |
1182 | if (ret < 0) { | |
1183 | perror("io_submit"); | |
1184 | break; | |
1185 | } else if (ret != to_submit) { | |
1186 | printf("submitted %d, wanted %d\n", ret, to_submit); | |
1187 | break; | |
1188 | } | |
1189 | prepped = 0; | |
1190 | ||
24a24c12 | 1191 | while (to_wait) { |
256714ea JA |
1192 | int r; |
1193 | ||
24a24c12 JA |
1194 | s->calls++; |
1195 | r = io_getevents(s->aio_ctx, to_wait, to_wait, events, NULL); | |
1196 | if (r < 0) { | |
1197 | perror("io_getevents"); | |
1198 | break; | |
1199 | } else if (r != to_wait) { | |
1200 | printf("r=%d, wait=%d\n", r, to_wait); | |
1201 | break; | |
1202 | } | |
1203 | r = reap_events_aio(s, events, r); | |
1204 | s->reaps += r; | |
1205 | to_wait -= r; | |
256714ea JA |
1206 | } |
1207 | } while (!s->finish); | |
1208 | ||
1209 | free(iocbsptr); | |
1210 | free(iocbs); | |
1211 | free(events); | |
ae7ea7a4 | 1212 | done: |
256714ea JA |
1213 | finish = 1; |
1214 | return NULL; | |
1215 | } | |
1216 | #endif | |
1217 | ||
ca8c91c5 JA |
1218 | static void io_uring_unregister_ring(struct submitter *s) |
1219 | { | |
1220 | struct io_uring_rsrc_update up = { | |
1221 | .offset = s->enter_ring_fd, | |
1222 | }; | |
1223 | ||
1224 | syscall(__NR_io_uring_register, s->ring_fd, IORING_UNREGISTER_RING_FDS, | |
1225 | &up, 1); | |
1226 | } | |
1227 | ||
1228 | static int io_uring_register_ring(struct submitter *s) | |
1229 | { | |
1230 | struct io_uring_rsrc_update up = { | |
1231 | .data = s->ring_fd, | |
1232 | .offset = -1U, | |
1233 | }; | |
1234 | int ret; | |
1235 | ||
1236 | ret = syscall(__NR_io_uring_register, s->ring_fd, | |
1237 | IORING_REGISTER_RING_FDS, &up, 1); | |
1238 | if (ret == 1) { | |
1239 | s->enter_ring_fd = up.offset; | |
1240 | return 0; | |
1241 | } | |
1242 | register_ring = 0; | |
1243 | return -1; | |
1244 | } | |
1245 | ||
256714ea JA |
1246 | static void *submitter_uring_fn(void *data) |
1247 | { | |
1248 | struct submitter *s = data; | |
1249 | struct io_sq_ring *ring = &s->sq_ring; | |
b65c1fc0 JA |
1250 | int ret, prepped; |
1251 | #ifdef ARCH_HAVE_CPU_CLOCK | |
ae7ea7a4 JA |
1252 | int nr_batch; |
1253 | #endif | |
1254 | ||
1255 | ret = submitter_init(s); | |
1256 | if (ret < 0) | |
1257 | goto done; | |
1258 | ||
1259 | #ifdef ARCH_HAVE_CPU_CLOCK | |
1260 | nr_batch = ret; | |
b65c1fc0 | 1261 | #endif |
256714ea | 1262 | |
ca8c91c5 JA |
1263 | if (register_ring) |
1264 | io_uring_register_ring(s); | |
1265 | ||
c9fb4c5b JA |
1266 | prepped = 0; |
1267 | do { | |
f310970e | 1268 | int to_wait, to_submit, this_reap, to_prep; |
fc2dc21b | 1269 | unsigned ring_flags = 0; |
c9fb4c5b | 1270 | |
e39863e3 KB |
1271 | if (!prepped && s->inflight < depth) { |
1272 | to_prep = min(depth - s->inflight, batch_submit); | |
256714ea | 1273 | prepped = prep_more_ios_uring(s, to_prep); |
932131c9 JA |
1274 | #ifdef ARCH_HAVE_CPU_CLOCK |
1275 | if (prepped && stats) { | |
1276 | s->clock_batch[s->clock_index] = get_cpu_clock(); | |
1277 | s->clock_index = (s->clock_index + 1) & (nr_batch - 1); | |
1278 | } | |
1279 | #endif | |
f310970e | 1280 | } |
c9fb4c5b JA |
1281 | s->inflight += prepped; |
1282 | submit_more: | |
1283 | to_submit = prepped; | |
1284 | submit: | |
e39863e3 | 1285 | if (to_submit && (s->inflight + to_submit <= depth)) |
c9fb4c5b JA |
1286 | to_wait = 0; |
1287 | else | |
e39863e3 | 1288 | to_wait = min(s->inflight + to_submit, batch_complete); |
c9fb4c5b | 1289 | |
ce1705de JA |
1290 | /* |
1291 | * Only need to call io_uring_enter if we're not using SQ thread | |
1292 | * poll, or if IORING_SQ_NEED_WAKEUP is set. | |
1293 | */ | |
fc2dc21b JA |
1294 | if (sq_thread_poll) |
1295 | ring_flags = atomic_load_acquire(ring->flags); | |
1296 | if (!sq_thread_poll || ring_flags & IORING_SQ_NEED_WAKEUP) { | |
e0abe388 JA |
1297 | unsigned flags = 0; |
1298 | ||
1299 | if (to_wait) | |
1300 | flags = IORING_ENTER_GETEVENTS; | |
fc2dc21b | 1301 | if (ring_flags & IORING_SQ_NEED_WAKEUP) |
b532dd6d | 1302 | flags |= IORING_ENTER_SQ_WAKEUP; |
e0abe388 | 1303 | ret = io_uring_enter(s, to_submit, to_wait, flags); |
ce1705de | 1304 | s->calls++; |
fc2dc21b JA |
1305 | } else { |
1306 | /* for SQPOLL, we submitted it all effectively */ | |
1307 | ret = to_submit; | |
ce1705de | 1308 | } |
c9fb4c5b | 1309 | |
ce1705de JA |
1310 | /* |
1311 | * For non SQ thread poll, we already got the events we needed | |
1312 | * through the io_uring_enter() above. For SQ thread poll, we | |
1313 | * need to loop here until we find enough events. | |
1314 | */ | |
1315 | this_reap = 0; | |
1316 | do { | |
1317 | int r; | |
256714ea | 1318 | |
7d04588a AG |
1319 | if (pt) |
1320 | r = reap_events_uring_pt(s); | |
1321 | else | |
1322 | r = reap_events_uring(s); | |
7d1435e6 JA |
1323 | if (r == -1) { |
1324 | s->finish = 1; | |
ce1705de | 1325 | break; |
7d1435e6 | 1326 | } else if (r > 0) |
ce1705de JA |
1327 | this_reap += r; |
1328 | } while (sq_thread_poll && this_reap < to_wait); | |
c9fb4c5b JA |
1329 | s->reaps += this_reap; |
1330 | ||
1331 | if (ret >= 0) { | |
1332 | if (!ret) { | |
1333 | to_submit = 0; | |
1334 | if (s->inflight) | |
1335 | goto submit; | |
1336 | continue; | |
1337 | } else if (ret < to_submit) { | |
1338 | int diff = to_submit - ret; | |
1339 | ||
1340 | s->done += ret; | |
1341 | prepped -= diff; | |
1342 | goto submit_more; | |
1343 | } | |
1344 | s->done += ret; | |
1345 | prepped = 0; | |
1346 | continue; | |
1347 | } else if (ret < 0) { | |
ac122fea | 1348 | if (errno == EAGAIN) { |
c9fb4c5b JA |
1349 | if (s->finish) |
1350 | break; | |
1351 | if (this_reap) | |
1352 | goto submit; | |
c9fb4c5b JA |
1353 | to_submit = 0; |
1354 | goto submit; | |
1355 | } | |
ac122fea | 1356 | printf("io_submit: %s\n", strerror(errno)); |
c9fb4c5b JA |
1357 | break; |
1358 | } | |
1359 | } while (!s->finish); | |
a7086591 | 1360 | |
ca8c91c5 JA |
1361 | if (register_ring) |
1362 | io_uring_unregister_ring(s); | |
1363 | ||
ae7ea7a4 | 1364 | done: |
c9fb4c5b JA |
1365 | finish = 1; |
1366 | return NULL; | |
1367 | } | |
1368 | ||
a7648136 | 1369 | #ifdef CONFIG_PWRITEV2 |
379406bc JA |
1370 | static void *submitter_sync_fn(void *data) |
1371 | { | |
1372 | struct submitter *s = data; | |
1373 | int ret; | |
1374 | ||
ae7ea7a4 JA |
1375 | if (submitter_init(s) < 0) |
1376 | goto done; | |
379406bc JA |
1377 | |
1378 | do { | |
1379 | uint64_t offset; | |
1380 | struct file *f; | |
379406bc | 1381 | |
4ad09b56 | 1382 | f = init_new_io(s); |
379406bc | 1383 | |
379406bc JA |
1384 | #ifdef ARCH_HAVE_CPU_CLOCK |
1385 | if (stats) | |
1386 | s->clock_batch[s->clock_index] = get_cpu_clock(); | |
1387 | #endif | |
1388 | ||
1389 | s->inflight++; | |
1390 | s->calls++; | |
1391 | ||
501565a1 | 1392 | offset = get_offset(s, f); |
379406bc JA |
1393 | if (polled) |
1394 | ret = preadv2(f->real_fd, &s->iovecs[0], 1, offset, RWF_HIPRI); | |
1395 | else | |
1396 | ret = preadv2(f->real_fd, &s->iovecs[0], 1, offset, 0); | |
1397 | ||
1398 | if (ret < 0) { | |
1399 | perror("preadv2"); | |
1400 | break; | |
1401 | } else if (ret != bs) { | |
1402 | break; | |
1403 | } | |
1404 | ||
1405 | s->done++; | |
1406 | s->inflight--; | |
1407 | f->pending_ios--; | |
1408 | if (stats) | |
1409 | add_stat(s, s->clock_index, 1); | |
1410 | } while (!s->finish); | |
1411 | ||
ae7ea7a4 | 1412 | done: |
379406bc JA |
1413 | finish = 1; |
1414 | return NULL; | |
1415 | } | |
a7648136 JA |
1416 | #else |
1417 | static void *submitter_sync_fn(void *data) | |
1418 | { | |
1419 | finish = 1; | |
1420 | return NULL; | |
1421 | } | |
1422 | #endif | |
379406bc | 1423 | |
54319661 JA |
1424 | static struct submitter *get_submitter(int offset) |
1425 | { | |
1426 | void *ret; | |
1427 | ||
1428 | ret = submitter; | |
1429 | if (offset) | |
1430 | ret += offset * (sizeof(*submitter) + depth * sizeof(struct iovec)); | |
1431 | return ret; | |
1432 | } | |
1433 | ||
65e1a5e8 | 1434 | static void do_finish(const char *reason) |
c9fb4c5b | 1435 | { |
54319661 | 1436 | int j; |
4b9e13dc | 1437 | |
65e1a5e8 | 1438 | printf("Exiting on %s\n", reason); |
54319661 JA |
1439 | for (j = 0; j < nthreads; j++) { |
1440 | struct submitter *s = get_submitter(j); | |
1441 | s->finish = 1; | |
1442 | } | |
4b9e13dc JA |
1443 | if (max_iops > 1000000) { |
1444 | double miops = (double) max_iops / 1000000.0; | |
1445 | printf("Maximum IOPS=%.2fM\n", miops); | |
1446 | } else if (max_iops > 100000) { | |
1447 | double kiops = (double) max_iops / 1000.0; | |
1448 | printf("Maximum IOPS=%.2fK\n", kiops); | |
1449 | } else { | |
18b557a0 | 1450 | printf("Maximum IOPS=%lu\n", max_iops); |
4b9e13dc | 1451 | } |
c9fb4c5b JA |
1452 | finish = 1; |
1453 | } | |
1454 | ||
65e1a5e8 EV |
1455 | static void sig_int(int sig) |
1456 | { | |
1457 | do_finish("signal"); | |
1458 | } | |
1459 | ||
c9fb4c5b JA |
1460 | static void arm_sig_int(void) |
1461 | { | |
1462 | struct sigaction act; | |
1463 | ||
1464 | memset(&act, 0, sizeof(act)); | |
1465 | act.sa_handler = sig_int; | |
1466 | act.sa_flags = SA_RESTART; | |
1467 | sigaction(SIGINT, &act, NULL); | |
2cf71009 BP |
1468 | |
1469 | /* Windows uses SIGBREAK as a quit signal from other applications */ | |
1470 | #ifdef WIN32 | |
1471 | sigaction(SIGBREAK, &act, NULL); | |
1472 | #endif | |
c9fb4c5b JA |
1473 | } |
1474 | ||
d79ff8c9 | 1475 | static void usage(char *argv, int status) |
e39863e3 | 1476 | { |
65e1a5e8 EV |
1477 | char runtime_str[16]; |
1478 | snprintf(runtime_str, sizeof(runtime_str), "%d", runtime); | |
e39863e3 | 1479 | printf("%s [options] -- [filenames]\n" |
35268e11 AJ |
1480 | " -d <int> : IO Depth, default %d\n" |
1481 | " -s <int> : Batch submit, default %d\n" | |
1482 | " -c <int> : Batch complete, default %d\n" | |
1483 | " -b <int> : Block size, default %d\n" | |
1484 | " -p <bool> : Polled IO, default %d\n" | |
1485 | " -B <bool> : Fixed buffers, default %d\n" | |
1486 | " -F <bool> : Register files, default %d\n" | |
0862f718 | 1487 | " -n <int> : Number of threads, default %d\n" |
2686fc22 | 1488 | " -O <bool> : Use O_DIRECT, default %d\n" |
4a39c524 EV |
1489 | " -N <bool> : Perform just no-op requests, default %d\n" |
1490 | " -t <bool> : Track IO latencies, default %d\n" | |
256714ea | 1491 | " -T <int> : TSC rate in HZ\n" |
beda9d8d JA |
1492 | " -r <int> : Runtime in seconds, default %s\n" |
1493 | " -R <bool> : Use random IO, default %d\n" | |
ca8c91c5 | 1494 | " -a <bool> : Use legacy aio, default %d\n" |
3be2f0ca | 1495 | " -S <bool> : Use sync IO (preadv2), default %d\n" |
4b9e13dc | 1496 | " -X <bool> : Use registered ring %d\n" |
7d04588a AG |
1497 | " -P <bool> : Automatically place on device home node %d\n" |
1498 | " -u <bool> : Use nvme-passthrough I/O, default %d\n", | |
35268e11 | 1499 | argv, DEPTH, BATCH_SUBMIT, BATCH_COMPLETE, BS, polled, |
4502ad2c | 1500 | fixedbufs, register_files, nthreads, !buffered, do_nop, |
ca8c91c5 | 1501 | stats, runtime == 0 ? "unlimited" : runtime_str, random_io, aio, |
7d04588a | 1502 | use_sync, register_ring, numa_placement, pt); |
d79ff8c9 | 1503 | exit(status); |
e39863e3 KB |
1504 | } |
1505 | ||
203e4c26 JA |
1506 | static void read_tsc_rate(void) |
1507 | { | |
1508 | char buffer[32]; | |
1509 | int fd, ret; | |
1510 | ||
1511 | if (tsc_rate) | |
1512 | return; | |
1513 | ||
1514 | fd = open(TSC_RATE_FILE, O_RDONLY); | |
1515 | if (fd < 0) | |
1516 | return; | |
1517 | ||
1518 | ret = read(fd, buffer, sizeof(buffer)); | |
1519 | if (ret < 0) { | |
1520 | close(fd); | |
1521 | return; | |
1522 | } | |
1523 | ||
1524 | tsc_rate = strtoul(buffer, NULL, 10); | |
1525 | printf("Using TSC rate %luHz\n", tsc_rate); | |
1526 | close(fd); | |
1527 | } | |
1528 | ||
1529 | static void write_tsc_rate(void) | |
1530 | { | |
1531 | char buffer[32]; | |
1532 | struct stat sb; | |
1533 | int fd, ret; | |
1534 | ||
1535 | if (!stat(TSC_RATE_FILE, &sb)) | |
1536 | return; | |
1537 | ||
1538 | fd = open(TSC_RATE_FILE, O_WRONLY | O_CREAT, 0644); | |
1539 | if (fd < 0) | |
1540 | return; | |
1541 | ||
1542 | memset(buffer, 0, sizeof(buffer)); | |
1543 | sprintf(buffer, "%lu", tsc_rate); | |
1544 | ret = write(fd, buffer, strlen(buffer)); | |
1545 | if (ret < 0) | |
1546 | perror("write"); | |
1547 | close(fd); | |
1548 | } | |
1549 | ||
c9fb4c5b JA |
1550 | int main(int argc, char *argv[]) |
1551 | { | |
e39863e3 | 1552 | struct submitter *s; |
05138221 | 1553 | unsigned long done, calls, reap; |
4b9e13dc | 1554 | int i, j, flags, fd, opt, threads_per_f, threads_rem = 0, nfiles; |
d79ff8c9 | 1555 | struct file f; |
c3e2fc25 | 1556 | void *ret; |
c9fb4c5b | 1557 | |
0862f718 JA |
1558 | if (!do_nop && argc < 2) |
1559 | usage(argv[0], 1); | |
c9fb4c5b | 1560 | |
7d04588a | 1561 | while ((opt = getopt(argc, argv, "d:s:c:b:p:B:F:n:N:O:t:T:a:r:D:R:X:S:P:u:h?")) != -1) { |
e39863e3 | 1562 | switch (opt) { |
256714ea JA |
1563 | case 'a': |
1564 | aio = !!atoi(optarg); | |
1565 | break; | |
e39863e3 KB |
1566 | case 'd': |
1567 | depth = atoi(optarg); | |
1568 | break; | |
1569 | case 's': | |
1570 | batch_submit = atoi(optarg); | |
932131c9 JA |
1571 | if (!batch_submit) |
1572 | batch_submit = 1; | |
e39863e3 KB |
1573 | break; |
1574 | case 'c': | |
1575 | batch_complete = atoi(optarg); | |
932131c9 JA |
1576 | if (!batch_complete) |
1577 | batch_complete = 1; | |
e39863e3 | 1578 | break; |
5bd526f2 JA |
1579 | case 'b': |
1580 | bs = atoi(optarg); | |
1581 | break; | |
1582 | case 'p': | |
1583 | polled = !!atoi(optarg); | |
1584 | break; | |
6a87c3b0 JA |
1585 | case 'B': |
1586 | fixedbufs = !!atoi(optarg); | |
1587 | break; | |
1588 | case 'F': | |
1589 | register_files = !!atoi(optarg); | |
1590 | break; | |
54319661 JA |
1591 | case 'n': |
1592 | nthreads = atoi(optarg); | |
caf2b9ac JA |
1593 | if (!nthreads) { |
1594 | printf("Threads must be non-zero\n"); | |
1595 | usage(argv[0], 1); | |
1596 | } | |
54319661 | 1597 | break; |
0862f718 JA |
1598 | case 'N': |
1599 | do_nop = !!atoi(optarg); | |
1600 | break; | |
2686fc22 JA |
1601 | case 'O': |
1602 | buffered = !atoi(optarg); | |
1603 | break; | |
932131c9 JA |
1604 | case 't': |
1605 | #ifndef ARCH_HAVE_CPU_CLOCK | |
1606 | fprintf(stderr, "Stats not supported on this CPU\n"); | |
1607 | return 1; | |
1608 | #endif | |
1609 | stats = !!atoi(optarg); | |
1610 | break; | |
1611 | case 'T': | |
1612 | #ifndef ARCH_HAVE_CPU_CLOCK | |
1613 | fprintf(stderr, "Stats not supported on this CPU\n"); | |
1614 | return 1; | |
1615 | #endif | |
1616 | tsc_rate = strtoul(optarg, NULL, 10); | |
203e4c26 | 1617 | write_tsc_rate(); |
932131c9 | 1618 | break; |
65e1a5e8 EV |
1619 | case 'r': |
1620 | runtime = atoi(optarg); | |
1621 | break; | |
beda9d8d JA |
1622 | case 'R': |
1623 | random_io = !!atoi(optarg); | |
1624 | break; | |
ca8c91c5 JA |
1625 | case 'X': |
1626 | register_ring = !!atoi(optarg); | |
1627 | break; | |
379406bc | 1628 | case 'S': |
a7648136 | 1629 | #ifdef CONFIG_PWRITEV2 |
379406bc | 1630 | use_sync = !!atoi(optarg); |
a7648136 JA |
1631 | #else |
1632 | fprintf(stderr, "preadv2 not supported\n"); | |
1633 | exit(1); | |
1634 | #endif | |
379406bc | 1635 | break; |
4b9e13dc JA |
1636 | case 'P': |
1637 | numa_placement = !!atoi(optarg); | |
1638 | break; | |
7d04588a AG |
1639 | case 'u': |
1640 | pt = !!atoi(optarg); | |
1641 | break; | |
e39863e3 KB |
1642 | case 'h': |
1643 | case '?': | |
1644 | default: | |
d79ff8c9 | 1645 | usage(argv[0], 0); |
e39863e3 KB |
1646 | break; |
1647 | } | |
1648 | } | |
1649 | ||
203e4c26 JA |
1650 | if (stats) |
1651 | read_tsc_rate(); | |
1652 | ||
c5347611 JA |
1653 | if (batch_complete > depth) |
1654 | batch_complete = depth; | |
1655 | if (batch_submit > depth) | |
1656 | batch_submit = depth; | |
1657 | ||
54319661 | 1658 | submitter = calloc(nthreads, sizeof(*submitter) + |
55845033 | 1659 | roundup_pow2(depth) * sizeof(struct iovec)); |
54319661 JA |
1660 | for (j = 0; j < nthreads; j++) { |
1661 | s = get_submitter(j); | |
4b9e13dc | 1662 | s->numa_node = -1; |
54319661 JA |
1663 | s->index = j; |
1664 | s->done = s->calls = s->reaps = 0; | |
1665 | } | |
e39863e3 | 1666 | |
701d1277 | 1667 | flags = O_RDONLY | O_NOATIME; |
a7086591 JA |
1668 | if (!buffered) |
1669 | flags |= O_DIRECT; | |
1670 | ||
54319661 | 1671 | j = 0; |
e39863e3 | 1672 | i = optind; |
d79ff8c9 | 1673 | nfiles = argc - i; |
2ac00585 JA |
1674 | if (!do_nop) { |
1675 | if (!nfiles) { | |
1676 | printf("No files specified\n"); | |
1677 | usage(argv[0], 1); | |
1678 | } | |
1679 | threads_per_f = nthreads / nfiles; | |
1680 | /* make sure each thread gets assigned files */ | |
1681 | if (threads_per_f == 0) { | |
1682 | threads_per_f = 1; | |
1683 | } else { | |
1684 | threads_rem = nthreads - threads_per_f * nfiles; | |
1685 | } | |
d79ff8c9 | 1686 | } |
8025517d | 1687 | while (!do_nop && i < argc) { |
d79ff8c9 AJ |
1688 | int k, limit; |
1689 | ||
1690 | memset(&f, 0, sizeof(f)); | |
a7086591 JA |
1691 | |
1692 | fd = open(argv[i], flags); | |
1693 | if (fd < 0) { | |
1694 | perror("open"); | |
1695 | return 1; | |
1696 | } | |
d79ff8c9 AJ |
1697 | f.real_fd = fd; |
1698 | if (get_file_size(&f)) { | |
a7086591 JA |
1699 | printf("failed getting size of device/file\n"); |
1700 | return 1; | |
1701 | } | |
d79ff8c9 | 1702 | if (f.max_blocks <= 1) { |
a7086591 JA |
1703 | printf("Zero file/device size?\n"); |
1704 | return 1; | |
1705 | } | |
d79ff8c9 AJ |
1706 | f.max_blocks--; |
1707 | ||
1708 | limit = threads_per_f; | |
1709 | limit += threads_rem > 0 ? 1 : 0; | |
1710 | for (k = 0; k < limit; k++) { | |
1711 | s = get_submitter((j + k) % nthreads); | |
a7086591 | 1712 | |
d79ff8c9 AJ |
1713 | if (s->nr_files == MAX_FDS) { |
1714 | printf("Max number of files (%d) reached\n", MAX_FDS); | |
1715 | break; | |
1716 | } | |
1717 | ||
1718 | memcpy(&s->files[s->nr_files], &f, sizeof(f)); | |
1719 | ||
4b9e13dc JA |
1720 | if (numa_placement) |
1721 | detect_node(s, argv[i]); | |
1722 | ||
1723 | s->filename = argv[i]; | |
d79ff8c9 AJ |
1724 | s->nr_files++; |
1725 | } | |
1726 | threads_rem--; | |
a7086591 | 1727 | i++; |
d79ff8c9 | 1728 | j += limit; |
a7086591 JA |
1729 | } |
1730 | ||
c9fb4c5b JA |
1731 | arm_sig_int(); |
1732 | ||
c409e4c2 AG |
1733 | t_io_uring_page_size = sysconf(_SC_PAGESIZE); |
1734 | if (t_io_uring_page_size < 0) | |
1735 | t_io_uring_page_size = 4096; | |
a0639afe | 1736 | |
54319661 JA |
1737 | for (j = 0; j < nthreads; j++) { |
1738 | s = get_submitter(j); | |
379406bc JA |
1739 | if (use_sync) |
1740 | pthread_create(&s->thread, NULL, submitter_sync_fn, s); | |
1741 | else if (!aio) | |
256714ea JA |
1742 | pthread_create(&s->thread, NULL, submitter_uring_fn, s); |
1743 | #ifdef CONFIG_LIBAIO | |
1744 | else | |
1745 | pthread_create(&s->thread, NULL, submitter_aio_fn, s); | |
1746 | #endif | |
54319661 | 1747 | } |
c9fb4c5b | 1748 | |
05138221 | 1749 | reap = calls = done = 0; |
c9fb4c5b JA |
1750 | do { |
1751 | unsigned long this_done = 0; | |
1752 | unsigned long this_reap = 0; | |
1753 | unsigned long this_call = 0; | |
1754 | unsigned long rpc = 0, ipc = 0; | |
f3057d26 | 1755 | unsigned long iops, bw; |
c9fb4c5b JA |
1756 | |
1757 | sleep(1); | |
65e1a5e8 EV |
1758 | if (runtime && !--runtime) |
1759 | do_finish("timeout"); | |
a1f17100 JA |
1760 | |
1761 | /* don't print partial run, if interrupted by signal */ | |
1762 | if (finish) | |
1763 | break; | |
52479d8b JA |
1764 | |
1765 | /* one second in to the run, enable stats */ | |
1766 | if (stats) | |
1767 | stats_running = 1; | |
1768 | ||
54319661 | 1769 | for (j = 0; j < nthreads; j++) { |
7d1ce4b7 | 1770 | s = get_submitter(j); |
54319661 JA |
1771 | this_done += s->done; |
1772 | this_call += s->calls; | |
1773 | this_reap += s->reaps; | |
1774 | } | |
c9fb4c5b JA |
1775 | if (this_call - calls) { |
1776 | rpc = (this_done - done) / (this_call - calls); | |
1777 | ipc = (this_reap - reap) / (this_call - calls); | |
191561c1 JA |
1778 | } else |
1779 | rpc = ipc = -1; | |
22fd3501 | 1780 | iops = this_done - done; |
f3057d26 JA |
1781 | if (bs > 1048576) |
1782 | bw = iops * (bs / 1048576); | |
1783 | else | |
1784 | bw = iops / (1048576 / bs); | |
4b9e13dc JA |
1785 | if (iops > 1000000) { |
1786 | double miops = (double) iops / 1000000.0; | |
1787 | printf("IOPS=%.2fM, ", miops); | |
1788 | } else if (iops > 100000) { | |
1789 | double kiops = (double) iops / 1000.0; | |
1790 | printf("IOPS=%.2fK, ", kiops); | |
1791 | } else { | |
53b5fa1e | 1792 | printf("IOPS=%lu, ", iops); |
4b9e13dc | 1793 | } |
16d25711 | 1794 | max_iops = max(max_iops, iops); |
55037c48 JA |
1795 | if (!do_nop) { |
1796 | if (bw > 2000) { | |
1797 | double bw_g = (double) bw / 1000.0; | |
1798 | ||
1799 | printf("BW=%.2fGiB/s, ", bw_g); | |
1800 | } else { | |
1801 | printf("BW=%luMiB/s, ", bw); | |
1802 | } | |
1803 | } | |
4b9e13dc | 1804 | printf("IOS/call=%ld/%ld\n", rpc, ipc); |
c9fb4c5b JA |
1805 | done = this_done; |
1806 | calls = this_call; | |
1807 | reap = this_reap; | |
1808 | } while (!finish); | |
1809 | ||
54319661 JA |
1810 | for (j = 0; j < nthreads; j++) { |
1811 | s = get_submitter(j); | |
1812 | pthread_join(s->thread, &ret); | |
1813 | close(s->ring_fd); | |
932131c9 JA |
1814 | |
1815 | if (stats) { | |
1816 | unsigned long nr; | |
1817 | ||
1818 | printf("%d: Latency percentiles:\n", s->tid); | |
1819 | for (i = 0, nr = 0; i < PLAT_NR; i++) | |
1820 | nr += s->plat[i]; | |
1821 | show_clat_percentiles(s->plat, nr, 4); | |
1822 | free(s->clock_batch); | |
1823 | free(s->plat); | |
1824 | } | |
54319661 | 1825 | } |
932131c9 | 1826 | |
54319661 | 1827 | free(submitter); |
c9fb4c5b JA |
1828 | return 0; |
1829 | } |