| 1 | #include <stdio.h> |
| 2 | #include <errno.h> |
| 3 | #include <assert.h> |
| 4 | #include <stdlib.h> |
| 5 | #include <stddef.h> |
| 6 | #include <signal.h> |
| 7 | #include <inttypes.h> |
| 8 | #include <math.h> |
| 9 | |
| 10 | #ifdef CONFIG_LIBAIO |
| 11 | #include <libaio.h> |
| 12 | #endif |
| 13 | |
| 14 | #ifdef CONFIG_LIBNUMA |
| 15 | #include <numa.h> |
| 16 | #endif |
| 17 | |
| 18 | #include <sys/types.h> |
| 19 | #include <sys/stat.h> |
| 20 | #include <sys/ioctl.h> |
| 21 | #include <sys/syscall.h> |
| 22 | #include <sys/resource.h> |
| 23 | #include <sys/mman.h> |
| 24 | #include <sys/uio.h> |
| 25 | #include <linux/fs.h> |
| 26 | #include <fcntl.h> |
| 27 | #include <unistd.h> |
| 28 | #include <string.h> |
| 29 | #include <pthread.h> |
| 30 | #include <sched.h> |
| 31 | #include <libgen.h> |
| 32 | |
| 33 | #include "../arch/arch.h" |
| 34 | #include "../os/os.h" |
| 35 | #include "../lib/types.h" |
| 36 | #include "../lib/roundup.h" |
| 37 | #include "../lib/rand.h" |
| 38 | #include "../minmax.h" |
| 39 | #include "../os/linux/io_uring.h" |
| 40 | #include "../engines/nvme.h" |
| 41 | |
| 42 | struct io_sq_ring { |
| 43 | unsigned *head; |
| 44 | unsigned *tail; |
| 45 | unsigned *ring_mask; |
| 46 | unsigned *ring_entries; |
| 47 | unsigned *flags; |
| 48 | unsigned *array; |
| 49 | }; |
| 50 | |
| 51 | struct io_cq_ring { |
| 52 | unsigned *head; |
| 53 | unsigned *tail; |
| 54 | unsigned *ring_mask; |
| 55 | unsigned *ring_entries; |
| 56 | struct io_uring_cqe *cqes; |
| 57 | }; |
| 58 | |
| 59 | #define DEPTH 128 |
| 60 | #define BATCH_SUBMIT 32 |
| 61 | #define BATCH_COMPLETE 32 |
| 62 | #define BS 4096 |
| 63 | |
| 64 | #define MAX_FDS 16 |
| 65 | |
| 66 | static unsigned sq_ring_mask, cq_ring_mask; |
| 67 | |
| 68 | struct file { |
| 69 | unsigned long max_blocks; |
| 70 | unsigned long max_size; |
| 71 | unsigned long cur_off; |
| 72 | unsigned pending_ios; |
| 73 | unsigned int nsid; /* nsid field required for nvme-passthrough */ |
| 74 | unsigned int lba_shift; /* lba_shift field required for nvme-passthrough */ |
| 75 | int real_fd; |
| 76 | int fixed_fd; |
| 77 | int fileno; |
| 78 | }; |
| 79 | |
| 80 | #define PLAT_BITS 6 |
| 81 | #define PLAT_VAL (1 << PLAT_BITS) |
| 82 | #define PLAT_GROUP_NR 29 |
| 83 | #define PLAT_NR (PLAT_GROUP_NR * PLAT_VAL) |
| 84 | |
| 85 | struct submitter { |
| 86 | pthread_t thread; |
| 87 | int ring_fd; |
| 88 | int enter_ring_fd; |
| 89 | int index; |
| 90 | struct io_sq_ring sq_ring; |
| 91 | struct io_uring_sqe *sqes; |
| 92 | struct io_cq_ring cq_ring; |
| 93 | int inflight; |
| 94 | int tid; |
| 95 | unsigned long reaps; |
| 96 | unsigned long done; |
| 97 | unsigned long calls; |
| 98 | unsigned long io_errors; |
| 99 | volatile int finish; |
| 100 | |
| 101 | __s32 *fds; |
| 102 | |
| 103 | struct taus258_state rand_state; |
| 104 | |
| 105 | unsigned long *clock_batch; |
| 106 | int clock_index; |
| 107 | unsigned long *plat; |
| 108 | |
| 109 | #ifdef CONFIG_LIBAIO |
| 110 | io_context_t aio_ctx; |
| 111 | #endif |
| 112 | |
| 113 | int numa_node; |
| 114 | int per_file_depth; |
| 115 | const char *filename; |
| 116 | |
| 117 | struct file files[MAX_FDS]; |
| 118 | unsigned nr_files; |
| 119 | unsigned cur_file; |
| 120 | struct iovec iovecs[]; |
| 121 | }; |
| 122 | |
| 123 | static struct submitter *submitter; |
| 124 | static volatile int finish; |
| 125 | static int stats_running; |
| 126 | static unsigned long max_iops; |
| 127 | static long t_io_uring_page_size; |
| 128 | |
| 129 | static int depth = DEPTH; |
| 130 | static int batch_submit = BATCH_SUBMIT; |
| 131 | static int batch_complete = BATCH_COMPLETE; |
| 132 | static int bs = BS; |
| 133 | static int polled = 1; /* use IO polling */ |
| 134 | static int fixedbufs = 1; /* use fixed user buffers */ |
| 135 | static int register_files = 1; /* use fixed files */ |
| 136 | static int buffered = 0; /* use buffered IO, not O_DIRECT */ |
| 137 | static int sq_thread_poll = 0; /* use kernel submission/poller thread */ |
| 138 | static int sq_thread_cpu = -1; /* pin above thread to this CPU */ |
| 139 | static int do_nop = 0; /* no-op SQ ring commands */ |
| 140 | static int nthreads = 1; |
| 141 | static int stats = 0; /* generate IO stats */ |
| 142 | static int aio = 0; /* use libaio */ |
| 143 | static int runtime = 0; /* runtime */ |
| 144 | static int random_io = 1; /* random or sequential IO */ |
| 145 | static int register_ring = 1; /* register ring */ |
| 146 | static int use_sync = 0; /* use preadv2 */ |
| 147 | static int numa_placement = 0; /* set to node of device */ |
| 148 | static int pt = 0; /* passthrough I/O or not */ |
| 149 | |
| 150 | static unsigned long tsc_rate; |
| 151 | |
| 152 | #define TSC_RATE_FILE "tsc-rate" |
| 153 | |
| 154 | static int vectored = 1; |
| 155 | |
| 156 | static float plist[] = { 1.0, 5.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, |
| 157 | 80.0, 90.0, 95.0, 99.0, 99.5, 99.9, 99.95, 99.99 }; |
| 158 | static int plist_len = 17; |
| 159 | |
| 160 | static int nvme_identify(int fd, __u32 nsid, enum nvme_identify_cns cns, |
| 161 | enum nvme_csi csi, void *data) |
| 162 | { |
| 163 | struct nvme_passthru_cmd cmd = { |
| 164 | .opcode = nvme_admin_identify, |
| 165 | .nsid = nsid, |
| 166 | .addr = (__u64)(uintptr_t)data, |
| 167 | .data_len = NVME_IDENTIFY_DATA_SIZE, |
| 168 | .cdw10 = cns, |
| 169 | .cdw11 = csi << NVME_IDENTIFY_CSI_SHIFT, |
| 170 | .timeout_ms = NVME_DEFAULT_IOCTL_TIMEOUT, |
| 171 | }; |
| 172 | |
| 173 | return ioctl(fd, NVME_IOCTL_ADMIN_CMD, &cmd); |
| 174 | } |
| 175 | |
| 176 | static int nvme_get_info(int fd, __u32 *nsid, __u32 *lba_sz, __u64 *nlba) |
| 177 | { |
| 178 | struct nvme_id_ns ns; |
| 179 | int namespace_id; |
| 180 | int err; |
| 181 | |
| 182 | namespace_id = ioctl(fd, NVME_IOCTL_ID); |
| 183 | if (namespace_id < 0) { |
| 184 | fprintf(stderr, "error failed to fetch namespace-id\n"); |
| 185 | close(fd); |
| 186 | return -errno; |
| 187 | } |
| 188 | |
| 189 | /* |
| 190 | * Identify namespace to get namespace-id, namespace size in LBA's |
| 191 | * and LBA data size. |
| 192 | */ |
| 193 | err = nvme_identify(fd, namespace_id, NVME_IDENTIFY_CNS_NS, |
| 194 | NVME_CSI_NVM, &ns); |
| 195 | if (err) { |
| 196 | fprintf(stderr, "error failed to fetch identify namespace\n"); |
| 197 | close(fd); |
| 198 | return err; |
| 199 | } |
| 200 | |
| 201 | *nsid = namespace_id; |
| 202 | *lba_sz = 1 << ns.lbaf[(ns.flbas & 0x0f)].ds; |
| 203 | *nlba = ns.nsze; |
| 204 | |
| 205 | return 0; |
| 206 | } |
| 207 | |
| 208 | static unsigned long cycles_to_nsec(unsigned long cycles) |
| 209 | { |
| 210 | uint64_t val; |
| 211 | |
| 212 | if (!tsc_rate) |
| 213 | return cycles; |
| 214 | |
| 215 | val = cycles * 1000000000ULL; |
| 216 | return val / tsc_rate; |
| 217 | } |
| 218 | |
| 219 | static unsigned long plat_idx_to_val(unsigned int idx) |
| 220 | { |
| 221 | unsigned int error_bits; |
| 222 | unsigned long k, base; |
| 223 | |
| 224 | assert(idx < PLAT_NR); |
| 225 | |
| 226 | /* MSB <= (PLAT_BITS-1), cannot be rounded off. Use |
| 227 | * all bits of the sample as index */ |
| 228 | if (idx < (PLAT_VAL << 1)) |
| 229 | return cycles_to_nsec(idx); |
| 230 | |
| 231 | /* Find the group and compute the minimum value of that group */ |
| 232 | error_bits = (idx >> PLAT_BITS) - 1; |
| 233 | base = ((unsigned long) 1) << (error_bits + PLAT_BITS); |
| 234 | |
| 235 | /* Find its bucket number of the group */ |
| 236 | k = idx % PLAT_VAL; |
| 237 | |
| 238 | /* Return the mean of the range of the bucket */ |
| 239 | return cycles_to_nsec(base + ((k + 0.5) * (1 << error_bits))); |
| 240 | } |
| 241 | |
| 242 | unsigned int calculate_clat_percentiles(unsigned long *io_u_plat, |
| 243 | unsigned long nr, unsigned long **output, |
| 244 | unsigned long *maxv, unsigned long *minv) |
| 245 | { |
| 246 | unsigned long sum = 0; |
| 247 | unsigned int len = plist_len, i, j = 0; |
| 248 | unsigned long *ovals = NULL; |
| 249 | bool is_last; |
| 250 | |
| 251 | *minv = -1UL; |
| 252 | *maxv = 0; |
| 253 | |
| 254 | ovals = malloc(len * sizeof(*ovals)); |
| 255 | if (!ovals) |
| 256 | return 0; |
| 257 | |
| 258 | /* |
| 259 | * Calculate bucket values, note down max and min values |
| 260 | */ |
| 261 | is_last = false; |
| 262 | for (i = 0; i < PLAT_NR && !is_last; i++) { |
| 263 | sum += io_u_plat[i]; |
| 264 | while (sum >= ((long double) plist[j] / 100.0 * nr)) { |
| 265 | assert(plist[j] <= 100.0); |
| 266 | |
| 267 | ovals[j] = plat_idx_to_val(i); |
| 268 | if (ovals[j] < *minv) |
| 269 | *minv = ovals[j]; |
| 270 | if (ovals[j] > *maxv) |
| 271 | *maxv = ovals[j]; |
| 272 | |
| 273 | is_last = (j == len - 1) != 0; |
| 274 | if (is_last) |
| 275 | break; |
| 276 | |
| 277 | j++; |
| 278 | } |
| 279 | } |
| 280 | |
| 281 | if (!is_last) |
| 282 | fprintf(stderr, "error calculating latency percentiles\n"); |
| 283 | |
| 284 | *output = ovals; |
| 285 | return len; |
| 286 | } |
| 287 | |
| 288 | static void show_clat_percentiles(unsigned long *io_u_plat, unsigned long nr, |
| 289 | unsigned int precision) |
| 290 | { |
| 291 | unsigned int divisor, len, i, j = 0; |
| 292 | unsigned long minv, maxv; |
| 293 | unsigned long *ovals; |
| 294 | int per_line, scale_down, time_width; |
| 295 | bool is_last; |
| 296 | char fmt[32]; |
| 297 | |
| 298 | len = calculate_clat_percentiles(io_u_plat, nr, &ovals, &maxv, &minv); |
| 299 | if (!len || !ovals) |
| 300 | goto out; |
| 301 | |
| 302 | if (!tsc_rate) { |
| 303 | scale_down = 0; |
| 304 | divisor = 1; |
| 305 | printf(" percentiles (tsc ticks):\n |"); |
| 306 | } else if (minv > 2000 && maxv > 99999) { |
| 307 | scale_down = 1; |
| 308 | divisor = 1000; |
| 309 | printf(" percentiles (usec):\n |"); |
| 310 | } else { |
| 311 | scale_down = 0; |
| 312 | divisor = 1; |
| 313 | printf(" percentiles (nsec):\n |"); |
| 314 | } |
| 315 | |
| 316 | time_width = max(5, (int) (log10(maxv / divisor) + 1)); |
| 317 | snprintf(fmt, sizeof(fmt), " %%%u.%ufth=[%%%dllu]%%c", precision + 3, |
| 318 | precision, time_width); |
| 319 | /* fmt will be something like " %5.2fth=[%4llu]%c" */ |
| 320 | per_line = (80 - 7) / (precision + 10 + time_width); |
| 321 | |
| 322 | for (j = 0; j < len; j++) { |
| 323 | /* for formatting */ |
| 324 | if (j != 0 && (j % per_line) == 0) |
| 325 | printf(" |"); |
| 326 | |
| 327 | /* end of the list */ |
| 328 | is_last = (j == len - 1) != 0; |
| 329 | |
| 330 | for (i = 0; i < scale_down; i++) |
| 331 | ovals[j] = (ovals[j] + 999) / 1000; |
| 332 | |
| 333 | printf(fmt, plist[j], ovals[j], is_last ? '\n' : ','); |
| 334 | |
| 335 | if (is_last) |
| 336 | break; |
| 337 | |
| 338 | if ((j % per_line) == per_line - 1) /* for formatting */ |
| 339 | printf("\n"); |
| 340 | } |
| 341 | |
| 342 | out: |
| 343 | free(ovals); |
| 344 | } |
| 345 | |
| 346 | #ifdef ARCH_HAVE_CPU_CLOCK |
| 347 | static unsigned int plat_val_to_idx(unsigned long val) |
| 348 | { |
| 349 | unsigned int msb, error_bits, base, offset, idx; |
| 350 | |
| 351 | /* Find MSB starting from bit 0 */ |
| 352 | if (val == 0) |
| 353 | msb = 0; |
| 354 | else |
| 355 | msb = (sizeof(val)*8) - __builtin_clzll(val) - 1; |
| 356 | |
| 357 | /* |
| 358 | * MSB <= (PLAT_BITS-1), cannot be rounded off. Use |
| 359 | * all bits of the sample as index |
| 360 | */ |
| 361 | if (msb <= PLAT_BITS) |
| 362 | return val; |
| 363 | |
| 364 | /* Compute the number of error bits to discard*/ |
| 365 | error_bits = msb - PLAT_BITS; |
| 366 | |
| 367 | /* Compute the number of buckets before the group */ |
| 368 | base = (error_bits + 1) << PLAT_BITS; |
| 369 | |
| 370 | /* |
| 371 | * Discard the error bits and apply the mask to find the |
| 372 | * index for the buckets in the group |
| 373 | */ |
| 374 | offset = (PLAT_VAL - 1) & (val >> error_bits); |
| 375 | |
| 376 | /* Make sure the index does not exceed (array size - 1) */ |
| 377 | idx = (base + offset) < (PLAT_NR - 1) ? |
| 378 | (base + offset) : (PLAT_NR - 1); |
| 379 | |
| 380 | return idx; |
| 381 | } |
| 382 | #endif |
| 383 | |
| 384 | static void add_stat(struct submitter *s, int clock_index, int nr) |
| 385 | { |
| 386 | #ifdef ARCH_HAVE_CPU_CLOCK |
| 387 | unsigned long cycles; |
| 388 | unsigned int pidx; |
| 389 | |
| 390 | if (!s->finish && clock_index) { |
| 391 | cycles = get_cpu_clock(); |
| 392 | cycles -= s->clock_batch[clock_index]; |
| 393 | pidx = plat_val_to_idx(cycles); |
| 394 | s->plat[pidx] += nr; |
| 395 | } |
| 396 | #endif |
| 397 | } |
| 398 | |
| 399 | static int io_uring_register_buffers(struct submitter *s) |
| 400 | { |
| 401 | if (do_nop) |
| 402 | return 0; |
| 403 | |
| 404 | return syscall(__NR_io_uring_register, s->ring_fd, |
| 405 | IORING_REGISTER_BUFFERS, s->iovecs, roundup_pow2(depth)); |
| 406 | } |
| 407 | |
| 408 | static int io_uring_register_files(struct submitter *s) |
| 409 | { |
| 410 | int i; |
| 411 | |
| 412 | if (do_nop) |
| 413 | return 0; |
| 414 | |
| 415 | s->fds = calloc(s->nr_files, sizeof(__s32)); |
| 416 | for (i = 0; i < s->nr_files; i++) { |
| 417 | s->fds[i] = s->files[i].real_fd; |
| 418 | s->files[i].fixed_fd = i; |
| 419 | } |
| 420 | |
| 421 | return syscall(__NR_io_uring_register, s->ring_fd, |
| 422 | IORING_REGISTER_FILES, s->fds, s->nr_files); |
| 423 | } |
| 424 | |
| 425 | static int io_uring_setup(unsigned entries, struct io_uring_params *p) |
| 426 | { |
| 427 | int ret; |
| 428 | |
| 429 | /* |
| 430 | * Clamp CQ ring size at our SQ ring size, we don't need more entries |
| 431 | * than that. |
| 432 | */ |
| 433 | p->flags |= IORING_SETUP_CQSIZE; |
| 434 | p->cq_entries = entries; |
| 435 | |
| 436 | p->flags |= IORING_SETUP_COOP_TASKRUN; |
| 437 | p->flags |= IORING_SETUP_SINGLE_ISSUER; |
| 438 | p->flags |= IORING_SETUP_DEFER_TASKRUN; |
| 439 | retry: |
| 440 | ret = syscall(__NR_io_uring_setup, entries, p); |
| 441 | if (!ret) |
| 442 | return 0; |
| 443 | |
| 444 | if (errno == EINVAL && p->flags & IORING_SETUP_COOP_TASKRUN) { |
| 445 | p->flags &= ~IORING_SETUP_COOP_TASKRUN; |
| 446 | goto retry; |
| 447 | } |
| 448 | if (errno == EINVAL && p->flags & IORING_SETUP_SINGLE_ISSUER) { |
| 449 | p->flags &= ~IORING_SETUP_SINGLE_ISSUER; |
| 450 | goto retry; |
| 451 | } |
| 452 | if (errno == EINVAL && p->flags & IORING_SETUP_DEFER_TASKRUN) { |
| 453 | p->flags &= ~IORING_SETUP_DEFER_TASKRUN; |
| 454 | goto retry; |
| 455 | } |
| 456 | |
| 457 | return ret; |
| 458 | } |
| 459 | |
| 460 | static void io_uring_probe(int fd) |
| 461 | { |
| 462 | struct io_uring_probe *p; |
| 463 | int ret; |
| 464 | |
| 465 | p = calloc(1, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op)); |
| 466 | if (!p) |
| 467 | return; |
| 468 | |
| 469 | ret = syscall(__NR_io_uring_register, fd, IORING_REGISTER_PROBE, p, 256); |
| 470 | if (ret < 0) |
| 471 | goto out; |
| 472 | |
| 473 | if (IORING_OP_READ > p->ops_len) |
| 474 | goto out; |
| 475 | |
| 476 | if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED)) |
| 477 | vectored = 0; |
| 478 | out: |
| 479 | free(p); |
| 480 | } |
| 481 | |
| 482 | static int io_uring_enter(struct submitter *s, unsigned int to_submit, |
| 483 | unsigned int min_complete, unsigned int flags) |
| 484 | { |
| 485 | if (register_ring) |
| 486 | flags |= IORING_ENTER_REGISTERED_RING; |
| 487 | #ifdef FIO_ARCH_HAS_SYSCALL |
| 488 | return __do_syscall6(__NR_io_uring_enter, s->enter_ring_fd, to_submit, |
| 489 | min_complete, flags, NULL, 0); |
| 490 | #else |
| 491 | return syscall(__NR_io_uring_enter, s->enter_ring_fd, to_submit, |
| 492 | min_complete, flags, NULL, 0); |
| 493 | #endif |
| 494 | } |
| 495 | |
| 496 | static unsigned long long get_offset(struct submitter *s, struct file *f) |
| 497 | { |
| 498 | unsigned long long offset; |
| 499 | long r; |
| 500 | |
| 501 | if (random_io) { |
| 502 | unsigned long long block; |
| 503 | |
| 504 | r = __rand64(&s->rand_state); |
| 505 | block = r % f->max_blocks; |
| 506 | offset = block * (unsigned long long) bs; |
| 507 | } else { |
| 508 | offset = f->cur_off; |
| 509 | f->cur_off += bs; |
| 510 | if (f->cur_off + bs > f->max_size) |
| 511 | f->cur_off = 0; |
| 512 | } |
| 513 | |
| 514 | return offset; |
| 515 | } |
| 516 | |
| 517 | static struct file *get_next_file(struct submitter *s) |
| 518 | { |
| 519 | struct file *f; |
| 520 | |
| 521 | if (s->nr_files == 1) { |
| 522 | f = &s->files[0]; |
| 523 | } else { |
| 524 | f = &s->files[s->cur_file]; |
| 525 | if (f->pending_ios >= s->per_file_depth) { |
| 526 | s->cur_file++; |
| 527 | if (s->cur_file == s->nr_files) |
| 528 | s->cur_file = 0; |
| 529 | f = &s->files[s->cur_file]; |
| 530 | } |
| 531 | } |
| 532 | |
| 533 | f->pending_ios++; |
| 534 | return f; |
| 535 | } |
| 536 | |
| 537 | static void init_io(struct submitter *s, unsigned index) |
| 538 | { |
| 539 | struct io_uring_sqe *sqe = &s->sqes[index]; |
| 540 | struct file *f; |
| 541 | |
| 542 | if (do_nop) { |
| 543 | sqe->opcode = IORING_OP_NOP; |
| 544 | return; |
| 545 | } |
| 546 | |
| 547 | f = get_next_file(s); |
| 548 | |
| 549 | if (register_files) { |
| 550 | sqe->flags = IOSQE_FIXED_FILE; |
| 551 | sqe->fd = f->fixed_fd; |
| 552 | } else { |
| 553 | sqe->flags = 0; |
| 554 | sqe->fd = f->real_fd; |
| 555 | } |
| 556 | if (fixedbufs) { |
| 557 | sqe->opcode = IORING_OP_READ_FIXED; |
| 558 | sqe->addr = (unsigned long) s->iovecs[index].iov_base; |
| 559 | sqe->len = bs; |
| 560 | sqe->buf_index = index; |
| 561 | } else if (!vectored) { |
| 562 | sqe->opcode = IORING_OP_READ; |
| 563 | sqe->addr = (unsigned long) s->iovecs[index].iov_base; |
| 564 | sqe->len = bs; |
| 565 | sqe->buf_index = 0; |
| 566 | } else { |
| 567 | sqe->opcode = IORING_OP_READV; |
| 568 | sqe->addr = (unsigned long) &s->iovecs[index]; |
| 569 | sqe->len = 1; |
| 570 | sqe->buf_index = 0; |
| 571 | } |
| 572 | sqe->ioprio = 0; |
| 573 | sqe->off = get_offset(s, f); |
| 574 | sqe->user_data = (unsigned long) f->fileno; |
| 575 | if (stats && stats_running) |
| 576 | sqe->user_data |= ((uint64_t)s->clock_index << 32); |
| 577 | } |
| 578 | |
| 579 | static void init_io_pt(struct submitter *s, unsigned index) |
| 580 | { |
| 581 | struct io_uring_sqe *sqe = &s->sqes[index << 1]; |
| 582 | unsigned long offset; |
| 583 | struct file *f; |
| 584 | struct nvme_uring_cmd *cmd; |
| 585 | unsigned long long slba; |
| 586 | unsigned long long nlb; |
| 587 | |
| 588 | f = get_next_file(s); |
| 589 | |
| 590 | offset = get_offset(s, f); |
| 591 | |
| 592 | if (register_files) { |
| 593 | sqe->fd = f->fixed_fd; |
| 594 | sqe->flags = IOSQE_FIXED_FILE; |
| 595 | } else { |
| 596 | sqe->fd = f->real_fd; |
| 597 | sqe->flags = 0; |
| 598 | } |
| 599 | sqe->opcode = IORING_OP_URING_CMD; |
| 600 | sqe->user_data = (unsigned long) f->fileno; |
| 601 | if (stats) |
| 602 | sqe->user_data |= ((__u64) s->clock_index << 32ULL); |
| 603 | sqe->cmd_op = NVME_URING_CMD_IO; |
| 604 | slba = offset >> f->lba_shift; |
| 605 | nlb = (bs >> f->lba_shift) - 1; |
| 606 | cmd = (struct nvme_uring_cmd *)&sqe->cmd; |
| 607 | /* cdw10 and cdw11 represent starting slba*/ |
| 608 | cmd->cdw10 = slba & 0xffffffff; |
| 609 | cmd->cdw11 = slba >> 32; |
| 610 | /* cdw12 represent number of lba to be read*/ |
| 611 | cmd->cdw12 = nlb; |
| 612 | cmd->addr = (unsigned long) s->iovecs[index].iov_base; |
| 613 | cmd->data_len = bs; |
| 614 | if (fixedbufs) { |
| 615 | sqe->uring_cmd_flags = IORING_URING_CMD_FIXED; |
| 616 | sqe->buf_index = index; |
| 617 | } |
| 618 | cmd->nsid = f->nsid; |
| 619 | cmd->opcode = 2; |
| 620 | } |
| 621 | |
| 622 | static int prep_more_ios_uring(struct submitter *s, int max_ios) |
| 623 | { |
| 624 | struct io_sq_ring *ring = &s->sq_ring; |
| 625 | unsigned head, index, tail, next_tail, prepped = 0; |
| 626 | |
| 627 | if (sq_thread_poll) |
| 628 | head = atomic_load_acquire(ring->head); |
| 629 | else |
| 630 | head = *ring->head; |
| 631 | |
| 632 | next_tail = tail = *ring->tail; |
| 633 | do { |
| 634 | next_tail++; |
| 635 | if (next_tail == head) |
| 636 | break; |
| 637 | |
| 638 | index = tail & sq_ring_mask; |
| 639 | if (pt) |
| 640 | init_io_pt(s, index); |
| 641 | else |
| 642 | init_io(s, index); |
| 643 | prepped++; |
| 644 | tail = next_tail; |
| 645 | } while (prepped < max_ios); |
| 646 | |
| 647 | if (prepped) |
| 648 | atomic_store_release(ring->tail, tail); |
| 649 | return prepped; |
| 650 | } |
| 651 | |
| 652 | static int get_file_size(struct file *f) |
| 653 | { |
| 654 | struct stat st; |
| 655 | |
| 656 | if (fstat(f->real_fd, &st) < 0) |
| 657 | return -1; |
| 658 | if (pt) { |
| 659 | __u64 nlba; |
| 660 | __u32 lbs; |
| 661 | int ret; |
| 662 | |
| 663 | if (!S_ISCHR(st.st_mode)) { |
| 664 | fprintf(stderr, "passthrough works with only nvme-ns " |
| 665 | "generic devices (/dev/ngXnY)\n"); |
| 666 | return -1; |
| 667 | } |
| 668 | ret = nvme_get_info(f->real_fd, &f->nsid, &lbs, &nlba); |
| 669 | if (ret) |
| 670 | return -1; |
| 671 | if ((bs % lbs) != 0) { |
| 672 | printf("error: bs:%d should be a multiple logical_block_size:%d\n", |
| 673 | bs, lbs); |
| 674 | return -1; |
| 675 | } |
| 676 | f->max_blocks = nlba; |
| 677 | f->max_size = nlba; |
| 678 | f->lba_shift = ilog2(lbs); |
| 679 | return 0; |
| 680 | } else if (S_ISBLK(st.st_mode)) { |
| 681 | unsigned long long bytes; |
| 682 | |
| 683 | if (ioctl(f->real_fd, BLKGETSIZE64, &bytes) != 0) |
| 684 | return -1; |
| 685 | |
| 686 | f->max_blocks = bytes / bs; |
| 687 | f->max_size = bytes; |
| 688 | return 0; |
| 689 | } else if (S_ISREG(st.st_mode)) { |
| 690 | f->max_blocks = st.st_size / bs; |
| 691 | f->max_size = st.st_size; |
| 692 | return 0; |
| 693 | } |
| 694 | |
| 695 | return -1; |
| 696 | } |
| 697 | |
| 698 | static int reap_events_uring(struct submitter *s) |
| 699 | { |
| 700 | struct io_cq_ring *ring = &s->cq_ring; |
| 701 | struct io_uring_cqe *cqe; |
| 702 | unsigned head, reaped = 0; |
| 703 | int last_idx = -1, stat_nr = 0; |
| 704 | |
| 705 | head = *ring->head; |
| 706 | do { |
| 707 | struct file *f; |
| 708 | |
| 709 | if (head == atomic_load_acquire(ring->tail)) |
| 710 | break; |
| 711 | cqe = &ring->cqes[head & cq_ring_mask]; |
| 712 | if (!do_nop) { |
| 713 | int fileno = cqe->user_data & 0xffffffff; |
| 714 | |
| 715 | f = &s->files[fileno]; |
| 716 | f->pending_ios--; |
| 717 | if (cqe->res != bs) { |
| 718 | if (cqe->res == -ENODATA || cqe->res == -EIO) { |
| 719 | s->io_errors++; |
| 720 | } else { |
| 721 | printf("io: unexpected ret=%d\n", cqe->res); |
| 722 | if (polled && cqe->res == -EOPNOTSUPP) |
| 723 | printf("Your filesystem/driver/kernel doesn't support polled IO\n"); |
| 724 | return -1; |
| 725 | } |
| 726 | } |
| 727 | } |
| 728 | if (stats) { |
| 729 | int clock_index = cqe->user_data >> 32; |
| 730 | |
| 731 | if (last_idx != clock_index) { |
| 732 | if (last_idx != -1) { |
| 733 | add_stat(s, last_idx, stat_nr); |
| 734 | stat_nr = 0; |
| 735 | } |
| 736 | last_idx = clock_index; |
| 737 | } |
| 738 | stat_nr++; |
| 739 | } |
| 740 | reaped++; |
| 741 | head++; |
| 742 | } while (1); |
| 743 | |
| 744 | if (stat_nr) |
| 745 | add_stat(s, last_idx, stat_nr); |
| 746 | |
| 747 | if (reaped) { |
| 748 | s->inflight -= reaped; |
| 749 | atomic_store_release(ring->head, head); |
| 750 | } |
| 751 | return reaped; |
| 752 | } |
| 753 | |
| 754 | static int reap_events_uring_pt(struct submitter *s) |
| 755 | { |
| 756 | struct io_cq_ring *ring = &s->cq_ring; |
| 757 | struct io_uring_cqe *cqe; |
| 758 | unsigned head, reaped = 0; |
| 759 | int last_idx = -1, stat_nr = 0; |
| 760 | unsigned index; |
| 761 | int fileno; |
| 762 | |
| 763 | head = *ring->head; |
| 764 | do { |
| 765 | struct file *f; |
| 766 | |
| 767 | if (head == atomic_load_acquire(ring->tail)) |
| 768 | break; |
| 769 | index = head & cq_ring_mask; |
| 770 | cqe = &ring->cqes[index << 1]; |
| 771 | fileno = cqe->user_data & 0xffffffff; |
| 772 | f = &s->files[fileno]; |
| 773 | f->pending_ios--; |
| 774 | |
| 775 | if (cqe->res != 0) { |
| 776 | printf("io: unexpected ret=%d\n", cqe->res); |
| 777 | if (polled && cqe->res == -EINVAL) |
| 778 | printf("passthrough doesn't support polled IO\n"); |
| 779 | return -1; |
| 780 | } |
| 781 | if (stats) { |
| 782 | int clock_index = cqe->user_data >> 32; |
| 783 | |
| 784 | if (last_idx != clock_index) { |
| 785 | if (last_idx != -1) { |
| 786 | add_stat(s, last_idx, stat_nr); |
| 787 | stat_nr = 0; |
| 788 | } |
| 789 | last_idx = clock_index; |
| 790 | } |
| 791 | stat_nr++; |
| 792 | } |
| 793 | reaped++; |
| 794 | head++; |
| 795 | } while (1); |
| 796 | |
| 797 | if (stat_nr) |
| 798 | add_stat(s, last_idx, stat_nr); |
| 799 | |
| 800 | if (reaped) { |
| 801 | s->inflight -= reaped; |
| 802 | atomic_store_release(ring->head, head); |
| 803 | } |
| 804 | return reaped; |
| 805 | } |
| 806 | |
| 807 | static void set_affinity(struct submitter *s) |
| 808 | { |
| 809 | #ifdef CONFIG_LIBNUMA |
| 810 | struct bitmask *mask; |
| 811 | |
| 812 | if (s->numa_node == -1) |
| 813 | return; |
| 814 | |
| 815 | numa_set_preferred(s->numa_node); |
| 816 | |
| 817 | mask = numa_allocate_cpumask(); |
| 818 | numa_node_to_cpus(s->numa_node, mask); |
| 819 | numa_sched_setaffinity(s->tid, mask); |
| 820 | #endif |
| 821 | } |
| 822 | |
| 823 | static int detect_node(struct submitter *s, char *name) |
| 824 | { |
| 825 | #ifdef CONFIG_LIBNUMA |
| 826 | const char *base = basename(name); |
| 827 | char str[128]; |
| 828 | int ret, fd, node; |
| 829 | |
| 830 | if (pt) |
| 831 | sprintf(str, "/sys/class/nvme-generic/%s/device/numa_node", base); |
| 832 | else |
| 833 | sprintf(str, "/sys/block/%s/device/numa_node", base); |
| 834 | fd = open(str, O_RDONLY); |
| 835 | if (fd < 0) |
| 836 | return -1; |
| 837 | |
| 838 | ret = read(fd, str, sizeof(str)); |
| 839 | if (ret < 0) { |
| 840 | close(fd); |
| 841 | return -1; |
| 842 | } |
| 843 | node = atoi(str); |
| 844 | s->numa_node = node; |
| 845 | close(fd); |
| 846 | #else |
| 847 | s->numa_node = -1; |
| 848 | #endif |
| 849 | return 0; |
| 850 | } |
| 851 | |
| 852 | static int setup_aio(struct submitter *s) |
| 853 | { |
| 854 | #ifdef CONFIG_LIBAIO |
| 855 | if (polled) { |
| 856 | fprintf(stderr, "aio does not support polled IO\n"); |
| 857 | polled = 0; |
| 858 | } |
| 859 | if (sq_thread_poll) { |
| 860 | fprintf(stderr, "aio does not support SQPOLL IO\n"); |
| 861 | sq_thread_poll = 0; |
| 862 | } |
| 863 | if (do_nop) { |
| 864 | fprintf(stderr, "aio does not support polled IO\n"); |
| 865 | do_nop = 0; |
| 866 | } |
| 867 | if (fixedbufs || register_files) { |
| 868 | fprintf(stderr, "aio does not support registered files or buffers\n"); |
| 869 | fixedbufs = register_files = 0; |
| 870 | } |
| 871 | |
| 872 | s->per_file_depth = (depth + s->nr_files - 1) / s->nr_files; |
| 873 | return io_queue_init(roundup_pow2(depth), &s->aio_ctx); |
| 874 | #else |
| 875 | fprintf(stderr, "Legacy AIO not available on this system/build\n"); |
| 876 | errno = EINVAL; |
| 877 | return -1; |
| 878 | #endif |
| 879 | } |
| 880 | |
| 881 | static int setup_ring(struct submitter *s) |
| 882 | { |
| 883 | struct io_sq_ring *sring = &s->sq_ring; |
| 884 | struct io_cq_ring *cring = &s->cq_ring; |
| 885 | struct io_uring_params p; |
| 886 | int ret, fd, i; |
| 887 | void *ptr; |
| 888 | size_t len; |
| 889 | |
| 890 | memset(&p, 0, sizeof(p)); |
| 891 | |
| 892 | if (polled && !do_nop) |
| 893 | p.flags |= IORING_SETUP_IOPOLL; |
| 894 | if (sq_thread_poll) { |
| 895 | p.flags |= IORING_SETUP_SQPOLL; |
| 896 | if (sq_thread_cpu != -1) { |
| 897 | p.flags |= IORING_SETUP_SQ_AFF; |
| 898 | p.sq_thread_cpu = sq_thread_cpu; |
| 899 | } |
| 900 | } |
| 901 | if (pt) { |
| 902 | p.flags |= IORING_SETUP_SQE128; |
| 903 | p.flags |= IORING_SETUP_CQE32; |
| 904 | } |
| 905 | |
| 906 | fd = io_uring_setup(depth, &p); |
| 907 | if (fd < 0) { |
| 908 | perror("io_uring_setup"); |
| 909 | return 1; |
| 910 | } |
| 911 | s->ring_fd = s->enter_ring_fd = fd; |
| 912 | |
| 913 | io_uring_probe(fd); |
| 914 | |
| 915 | if (fixedbufs) { |
| 916 | struct rlimit rlim; |
| 917 | |
| 918 | rlim.rlim_cur = RLIM_INFINITY; |
| 919 | rlim.rlim_max = RLIM_INFINITY; |
| 920 | /* ignore potential error, not needed on newer kernels */ |
| 921 | setrlimit(RLIMIT_MEMLOCK, &rlim); |
| 922 | |
| 923 | ret = io_uring_register_buffers(s); |
| 924 | if (ret < 0) { |
| 925 | perror("io_uring_register_buffers"); |
| 926 | return 1; |
| 927 | } |
| 928 | } |
| 929 | |
| 930 | if (register_files) { |
| 931 | ret = io_uring_register_files(s); |
| 932 | if (ret < 0) { |
| 933 | perror("io_uring_register_files"); |
| 934 | return 1; |
| 935 | } |
| 936 | } |
| 937 | |
| 938 | ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(__u32), |
| 939 | PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, |
| 940 | IORING_OFF_SQ_RING); |
| 941 | sring->head = ptr + p.sq_off.head; |
| 942 | sring->tail = ptr + p.sq_off.tail; |
| 943 | sring->ring_mask = ptr + p.sq_off.ring_mask; |
| 944 | sring->ring_entries = ptr + p.sq_off.ring_entries; |
| 945 | sring->flags = ptr + p.sq_off.flags; |
| 946 | sring->array = ptr + p.sq_off.array; |
| 947 | sq_ring_mask = *sring->ring_mask; |
| 948 | |
| 949 | if (p.flags & IORING_SETUP_SQE128) |
| 950 | len = 2 * p.sq_entries * sizeof(struct io_uring_sqe); |
| 951 | else |
| 952 | len = p.sq_entries * sizeof(struct io_uring_sqe); |
| 953 | s->sqes = mmap(0, len, |
| 954 | PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, |
| 955 | IORING_OFF_SQES); |
| 956 | |
| 957 | if (p.flags & IORING_SETUP_CQE32) { |
| 958 | len = p.cq_off.cqes + |
| 959 | 2 * p.cq_entries * sizeof(struct io_uring_cqe); |
| 960 | } else { |
| 961 | len = p.cq_off.cqes + |
| 962 | p.cq_entries * sizeof(struct io_uring_cqe); |
| 963 | } |
| 964 | ptr = mmap(0, len, |
| 965 | PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, |
| 966 | IORING_OFF_CQ_RING); |
| 967 | cring->head = ptr + p.cq_off.head; |
| 968 | cring->tail = ptr + p.cq_off.tail; |
| 969 | cring->ring_mask = ptr + p.cq_off.ring_mask; |
| 970 | cring->ring_entries = ptr + p.cq_off.ring_entries; |
| 971 | cring->cqes = ptr + p.cq_off.cqes; |
| 972 | cq_ring_mask = *cring->ring_mask; |
| 973 | |
| 974 | for (i = 0; i < p.sq_entries; i++) |
| 975 | sring->array[i] = i; |
| 976 | |
| 977 | s->per_file_depth = INT_MAX; |
| 978 | if (s->nr_files) |
| 979 | s->per_file_depth = (depth + s->nr_files - 1) / s->nr_files; |
| 980 | return 0; |
| 981 | } |
| 982 | |
| 983 | static void *allocate_mem(struct submitter *s, int size) |
| 984 | { |
| 985 | void *buf; |
| 986 | |
| 987 | #ifdef CONFIG_LIBNUMA |
| 988 | if (s->numa_node != -1) |
| 989 | return numa_alloc_onnode(size, s->numa_node); |
| 990 | #endif |
| 991 | |
| 992 | if (posix_memalign(&buf, t_io_uring_page_size, bs)) { |
| 993 | printf("failed alloc\n"); |
| 994 | return NULL; |
| 995 | } |
| 996 | |
| 997 | return buf; |
| 998 | } |
| 999 | |
| 1000 | static int submitter_init(struct submitter *s) |
| 1001 | { |
| 1002 | int i, nr_batch, err; |
| 1003 | static int init_printed; |
| 1004 | char buf[80]; |
| 1005 | s->tid = gettid(); |
| 1006 | printf("submitter=%d, tid=%d, file=%s, nfiles=%d, node=%d\n", s->index, s->tid, |
| 1007 | s->filename, s->nr_files, s->numa_node); |
| 1008 | |
| 1009 | set_affinity(s); |
| 1010 | |
| 1011 | __init_rand64(&s->rand_state, s->tid); |
| 1012 | srand48(s->tid); |
| 1013 | |
| 1014 | for (i = 0; i < MAX_FDS; i++) |
| 1015 | s->files[i].fileno = i; |
| 1016 | |
| 1017 | for (i = 0; i < roundup_pow2(depth); i++) { |
| 1018 | void *buf; |
| 1019 | |
| 1020 | buf = allocate_mem(s, bs); |
| 1021 | if (!buf) |
| 1022 | return -1; |
| 1023 | s->iovecs[i].iov_base = buf; |
| 1024 | s->iovecs[i].iov_len = bs; |
| 1025 | } |
| 1026 | |
| 1027 | if (use_sync) { |
| 1028 | sprintf(buf, "Engine=preadv2\n"); |
| 1029 | err = 0; |
| 1030 | } else if (!aio) { |
| 1031 | err = setup_ring(s); |
| 1032 | if (!err) |
| 1033 | sprintf(buf, "Engine=io_uring, sq_ring=%d, cq_ring=%d\n", *s->sq_ring.ring_entries, *s->cq_ring.ring_entries); |
| 1034 | } else { |
| 1035 | sprintf(buf, "Engine=aio\n"); |
| 1036 | err = setup_aio(s); |
| 1037 | } |
| 1038 | if (err) { |
| 1039 | printf("queue setup failed: %s, %d\n", strerror(errno), err); |
| 1040 | return -1; |
| 1041 | } |
| 1042 | |
| 1043 | if (!init_printed) { |
| 1044 | printf("polled=%d, fixedbufs=%d, register_files=%d, buffered=%d, QD=%d\n", polled, fixedbufs, register_files, buffered, depth); |
| 1045 | printf("%s", buf); |
| 1046 | init_printed = 1; |
| 1047 | } |
| 1048 | |
| 1049 | if (stats) { |
| 1050 | nr_batch = roundup_pow2(depth / batch_submit); |
| 1051 | if (nr_batch < 2) |
| 1052 | nr_batch = 2; |
| 1053 | s->clock_batch = calloc(nr_batch, sizeof(unsigned long)); |
| 1054 | s->clock_index = 1; |
| 1055 | |
| 1056 | s->plat = calloc(PLAT_NR, sizeof(unsigned long)); |
| 1057 | } else { |
| 1058 | s->clock_batch = NULL; |
| 1059 | s->plat = NULL; |
| 1060 | nr_batch = 0; |
| 1061 | } |
| 1062 | /* perform the expensive command initialization part for passthrough here |
| 1063 | * rather than in the fast path |
| 1064 | */ |
| 1065 | if (pt) { |
| 1066 | for (i = 0; i < roundup_pow2(depth); i++) { |
| 1067 | struct io_uring_sqe *sqe = &s->sqes[i << 1]; |
| 1068 | |
| 1069 | memset(&sqe->cmd, 0, sizeof(struct nvme_uring_cmd)); |
| 1070 | } |
| 1071 | } |
| 1072 | return nr_batch; |
| 1073 | } |
| 1074 | |
| 1075 | #ifdef CONFIG_LIBAIO |
| 1076 | static int prep_more_ios_aio(struct submitter *s, int max_ios, struct iocb *iocbs) |
| 1077 | { |
| 1078 | uint64_t data; |
| 1079 | struct file *f; |
| 1080 | unsigned index; |
| 1081 | |
| 1082 | index = 0; |
| 1083 | while (index < max_ios) { |
| 1084 | struct iocb *iocb = &iocbs[index]; |
| 1085 | |
| 1086 | f = get_next_file(s); |
| 1087 | |
| 1088 | io_prep_pread(iocb, f->real_fd, s->iovecs[index].iov_base, |
| 1089 | s->iovecs[index].iov_len, get_offset(s, f)); |
| 1090 | |
| 1091 | data = f->fileno; |
| 1092 | if (stats && stats_running) |
| 1093 | data |= (((uint64_t) s->clock_index) << 32); |
| 1094 | iocb->data = (void *) (uintptr_t) data; |
| 1095 | index++; |
| 1096 | } |
| 1097 | return index; |
| 1098 | } |
| 1099 | |
| 1100 | static int reap_events_aio(struct submitter *s, struct io_event *events, int evs) |
| 1101 | { |
| 1102 | int last_idx = -1, stat_nr = 0; |
| 1103 | int reaped = 0; |
| 1104 | |
| 1105 | while (evs) { |
| 1106 | uint64_t data = (uintptr_t) events[reaped].data; |
| 1107 | struct file *f = &s->files[data & 0xffffffff]; |
| 1108 | |
| 1109 | f->pending_ios--; |
| 1110 | if (events[reaped].res != bs) { |
| 1111 | if (events[reaped].res == -ENODATA || |
| 1112 | events[reaped].res == -EIO) { |
| 1113 | s->io_errors++; |
| 1114 | } else { |
| 1115 | printf("io: unexpected ret=%ld\n", events[reaped].res); |
| 1116 | return -1; |
| 1117 | } |
| 1118 | } else if (stats) { |
| 1119 | int clock_index = data >> 32; |
| 1120 | |
| 1121 | if (last_idx != clock_index) { |
| 1122 | if (last_idx != -1) { |
| 1123 | add_stat(s, last_idx, stat_nr); |
| 1124 | stat_nr = 0; |
| 1125 | } |
| 1126 | last_idx = clock_index; |
| 1127 | } |
| 1128 | stat_nr++; |
| 1129 | } |
| 1130 | reaped++; |
| 1131 | evs--; |
| 1132 | } |
| 1133 | |
| 1134 | if (stat_nr) |
| 1135 | add_stat(s, last_idx, stat_nr); |
| 1136 | |
| 1137 | s->inflight -= reaped; |
| 1138 | s->done += reaped; |
| 1139 | return reaped; |
| 1140 | } |
| 1141 | |
| 1142 | static void *submitter_aio_fn(void *data) |
| 1143 | { |
| 1144 | struct submitter *s = data; |
| 1145 | int i, ret, prepped; |
| 1146 | struct iocb **iocbsptr; |
| 1147 | struct iocb *iocbs; |
| 1148 | struct io_event *events; |
| 1149 | #ifdef ARCH_HAVE_CPU_CLOCK |
| 1150 | int nr_batch; |
| 1151 | #endif |
| 1152 | |
| 1153 | ret = submitter_init(s); |
| 1154 | if (ret < 0) |
| 1155 | goto done; |
| 1156 | |
| 1157 | #ifdef ARCH_HAVE_CPU_CLOCK |
| 1158 | nr_batch = ret; |
| 1159 | #endif |
| 1160 | |
| 1161 | iocbsptr = calloc(depth, sizeof(struct iocb *)); |
| 1162 | iocbs = calloc(depth, sizeof(struct iocb)); |
| 1163 | events = calloc(depth, sizeof(struct io_event)); |
| 1164 | |
| 1165 | for (i = 0; i < depth; i++) |
| 1166 | iocbsptr[i] = &iocbs[i]; |
| 1167 | |
| 1168 | prepped = 0; |
| 1169 | do { |
| 1170 | int to_wait, to_submit, to_prep; |
| 1171 | |
| 1172 | if (!prepped && s->inflight < depth) { |
| 1173 | to_prep = min(depth - s->inflight, batch_submit); |
| 1174 | prepped = prep_more_ios_aio(s, to_prep, iocbs); |
| 1175 | #ifdef ARCH_HAVE_CPU_CLOCK |
| 1176 | if (prepped && stats) { |
| 1177 | s->clock_batch[s->clock_index] = get_cpu_clock(); |
| 1178 | s->clock_index = (s->clock_index + 1) & (nr_batch - 1); |
| 1179 | } |
| 1180 | #endif |
| 1181 | } |
| 1182 | s->inflight += prepped; |
| 1183 | to_submit = prepped; |
| 1184 | |
| 1185 | if (to_submit && (s->inflight + to_submit <= depth)) |
| 1186 | to_wait = 0; |
| 1187 | else |
| 1188 | to_wait = min(s->inflight + to_submit, batch_complete); |
| 1189 | |
| 1190 | ret = io_submit(s->aio_ctx, to_submit, iocbsptr); |
| 1191 | s->calls++; |
| 1192 | if (ret < 0) { |
| 1193 | perror("io_submit"); |
| 1194 | break; |
| 1195 | } else if (ret != to_submit) { |
| 1196 | printf("submitted %d, wanted %d\n", ret, to_submit); |
| 1197 | break; |
| 1198 | } |
| 1199 | prepped = 0; |
| 1200 | |
| 1201 | while (to_wait) { |
| 1202 | int r; |
| 1203 | |
| 1204 | s->calls++; |
| 1205 | r = io_getevents(s->aio_ctx, to_wait, to_wait, events, NULL); |
| 1206 | if (r < 0) { |
| 1207 | perror("io_getevents"); |
| 1208 | break; |
| 1209 | } else if (r != to_wait) { |
| 1210 | printf("r=%d, wait=%d\n", r, to_wait); |
| 1211 | break; |
| 1212 | } |
| 1213 | r = reap_events_aio(s, events, r); |
| 1214 | s->reaps += r; |
| 1215 | to_wait -= r; |
| 1216 | } |
| 1217 | } while (!s->finish); |
| 1218 | |
| 1219 | free(iocbsptr); |
| 1220 | free(iocbs); |
| 1221 | free(events); |
| 1222 | done: |
| 1223 | finish = 1; |
| 1224 | return NULL; |
| 1225 | } |
| 1226 | #endif |
| 1227 | |
| 1228 | static void io_uring_unregister_ring(struct submitter *s) |
| 1229 | { |
| 1230 | struct io_uring_rsrc_update up = { |
| 1231 | .offset = s->enter_ring_fd, |
| 1232 | }; |
| 1233 | |
| 1234 | syscall(__NR_io_uring_register, s->ring_fd, IORING_UNREGISTER_RING_FDS, |
| 1235 | &up, 1); |
| 1236 | } |
| 1237 | |
| 1238 | static int io_uring_register_ring(struct submitter *s) |
| 1239 | { |
| 1240 | struct io_uring_rsrc_update up = { |
| 1241 | .data = s->ring_fd, |
| 1242 | .offset = -1U, |
| 1243 | }; |
| 1244 | int ret; |
| 1245 | |
| 1246 | ret = syscall(__NR_io_uring_register, s->ring_fd, |
| 1247 | IORING_REGISTER_RING_FDS, &up, 1); |
| 1248 | if (ret == 1) { |
| 1249 | s->enter_ring_fd = up.offset; |
| 1250 | return 0; |
| 1251 | } |
| 1252 | register_ring = 0; |
| 1253 | return -1; |
| 1254 | } |
| 1255 | |
| 1256 | static void *submitter_uring_fn(void *data) |
| 1257 | { |
| 1258 | struct submitter *s = data; |
| 1259 | struct io_sq_ring *ring = &s->sq_ring; |
| 1260 | int ret, prepped; |
| 1261 | #ifdef ARCH_HAVE_CPU_CLOCK |
| 1262 | int nr_batch; |
| 1263 | #endif |
| 1264 | |
| 1265 | ret = submitter_init(s); |
| 1266 | if (ret < 0) |
| 1267 | goto done; |
| 1268 | |
| 1269 | #ifdef ARCH_HAVE_CPU_CLOCK |
| 1270 | nr_batch = ret; |
| 1271 | #endif |
| 1272 | |
| 1273 | if (register_ring) |
| 1274 | io_uring_register_ring(s); |
| 1275 | |
| 1276 | prepped = 0; |
| 1277 | do { |
| 1278 | int to_wait, to_submit, this_reap, to_prep; |
| 1279 | unsigned ring_flags = 0; |
| 1280 | |
| 1281 | if (!prepped && s->inflight < depth) { |
| 1282 | to_prep = min(depth - s->inflight, batch_submit); |
| 1283 | prepped = prep_more_ios_uring(s, to_prep); |
| 1284 | #ifdef ARCH_HAVE_CPU_CLOCK |
| 1285 | if (prepped && stats) { |
| 1286 | s->clock_batch[s->clock_index] = get_cpu_clock(); |
| 1287 | s->clock_index = (s->clock_index + 1) & (nr_batch - 1); |
| 1288 | } |
| 1289 | #endif |
| 1290 | } |
| 1291 | s->inflight += prepped; |
| 1292 | submit_more: |
| 1293 | to_submit = prepped; |
| 1294 | submit: |
| 1295 | if (to_submit && (s->inflight + to_submit <= depth)) |
| 1296 | to_wait = 0; |
| 1297 | else |
| 1298 | to_wait = min(s->inflight + to_submit, batch_complete); |
| 1299 | |
| 1300 | /* |
| 1301 | * Only need to call io_uring_enter if we're not using SQ thread |
| 1302 | * poll, or if IORING_SQ_NEED_WAKEUP is set. |
| 1303 | */ |
| 1304 | if (sq_thread_poll) |
| 1305 | ring_flags = atomic_load_acquire(ring->flags); |
| 1306 | if (!sq_thread_poll || ring_flags & IORING_SQ_NEED_WAKEUP) { |
| 1307 | unsigned flags = 0; |
| 1308 | |
| 1309 | if (to_wait) |
| 1310 | flags = IORING_ENTER_GETEVENTS; |
| 1311 | if (ring_flags & IORING_SQ_NEED_WAKEUP) |
| 1312 | flags |= IORING_ENTER_SQ_WAKEUP; |
| 1313 | ret = io_uring_enter(s, to_submit, to_wait, flags); |
| 1314 | s->calls++; |
| 1315 | } else { |
| 1316 | /* for SQPOLL, we submitted it all effectively */ |
| 1317 | ret = to_submit; |
| 1318 | } |
| 1319 | |
| 1320 | /* |
| 1321 | * For non SQ thread poll, we already got the events we needed |
| 1322 | * through the io_uring_enter() above. For SQ thread poll, we |
| 1323 | * need to loop here until we find enough events. |
| 1324 | */ |
| 1325 | this_reap = 0; |
| 1326 | do { |
| 1327 | int r; |
| 1328 | |
| 1329 | if (pt) |
| 1330 | r = reap_events_uring_pt(s); |
| 1331 | else |
| 1332 | r = reap_events_uring(s); |
| 1333 | if (r == -1) { |
| 1334 | s->finish = 1; |
| 1335 | break; |
| 1336 | } else if (r > 0) |
| 1337 | this_reap += r; |
| 1338 | } while (sq_thread_poll && this_reap < to_wait); |
| 1339 | s->reaps += this_reap; |
| 1340 | |
| 1341 | if (ret >= 0) { |
| 1342 | if (!ret) { |
| 1343 | to_submit = 0; |
| 1344 | if (s->inflight) |
| 1345 | goto submit; |
| 1346 | continue; |
| 1347 | } else if (ret < to_submit) { |
| 1348 | int diff = to_submit - ret; |
| 1349 | |
| 1350 | s->done += ret; |
| 1351 | prepped -= diff; |
| 1352 | goto submit_more; |
| 1353 | } |
| 1354 | s->done += ret; |
| 1355 | prepped = 0; |
| 1356 | continue; |
| 1357 | } else if (ret < 0) { |
| 1358 | if (errno == EAGAIN) { |
| 1359 | if (s->finish) |
| 1360 | break; |
| 1361 | if (this_reap) |
| 1362 | goto submit; |
| 1363 | to_submit = 0; |
| 1364 | goto submit; |
| 1365 | } |
| 1366 | printf("io_submit: %s\n", strerror(errno)); |
| 1367 | break; |
| 1368 | } |
| 1369 | } while (!s->finish); |
| 1370 | |
| 1371 | if (register_ring) |
| 1372 | io_uring_unregister_ring(s); |
| 1373 | |
| 1374 | done: |
| 1375 | finish = 1; |
| 1376 | return NULL; |
| 1377 | } |
| 1378 | |
| 1379 | #ifdef CONFIG_PWRITEV2 |
| 1380 | static void *submitter_sync_fn(void *data) |
| 1381 | { |
| 1382 | struct submitter *s = data; |
| 1383 | int ret; |
| 1384 | |
| 1385 | if (submitter_init(s) < 0) |
| 1386 | goto done; |
| 1387 | |
| 1388 | do { |
| 1389 | uint64_t offset; |
| 1390 | struct file *f; |
| 1391 | |
| 1392 | f = get_next_file(s); |
| 1393 | |
| 1394 | #ifdef ARCH_HAVE_CPU_CLOCK |
| 1395 | if (stats) |
| 1396 | s->clock_batch[s->clock_index] = get_cpu_clock(); |
| 1397 | #endif |
| 1398 | |
| 1399 | s->inflight++; |
| 1400 | s->calls++; |
| 1401 | |
| 1402 | offset = get_offset(s, f); |
| 1403 | if (polled) |
| 1404 | ret = preadv2(f->real_fd, &s->iovecs[0], 1, offset, RWF_HIPRI); |
| 1405 | else |
| 1406 | ret = preadv2(f->real_fd, &s->iovecs[0], 1, offset, 0); |
| 1407 | |
| 1408 | if (ret < 0) { |
| 1409 | perror("preadv2"); |
| 1410 | break; |
| 1411 | } else if (ret != bs) { |
| 1412 | break; |
| 1413 | } |
| 1414 | |
| 1415 | s->done++; |
| 1416 | s->inflight--; |
| 1417 | f->pending_ios--; |
| 1418 | if (stats) |
| 1419 | add_stat(s, s->clock_index, 1); |
| 1420 | } while (!s->finish); |
| 1421 | |
| 1422 | done: |
| 1423 | finish = 1; |
| 1424 | return NULL; |
| 1425 | } |
| 1426 | #else |
| 1427 | static void *submitter_sync_fn(void *data) |
| 1428 | { |
| 1429 | finish = 1; |
| 1430 | return NULL; |
| 1431 | } |
| 1432 | #endif |
| 1433 | |
| 1434 | static struct submitter *get_submitter(int offset) |
| 1435 | { |
| 1436 | void *ret; |
| 1437 | |
| 1438 | ret = submitter; |
| 1439 | if (offset) |
| 1440 | ret += offset * (sizeof(*submitter) + depth * sizeof(struct iovec)); |
| 1441 | return ret; |
| 1442 | } |
| 1443 | |
| 1444 | static void do_finish(const char *reason) |
| 1445 | { |
| 1446 | int j; |
| 1447 | |
| 1448 | printf("Exiting on %s\n", reason); |
| 1449 | for (j = 0; j < nthreads; j++) { |
| 1450 | struct submitter *s = get_submitter(j); |
| 1451 | s->finish = 1; |
| 1452 | } |
| 1453 | if (max_iops > 1000000) { |
| 1454 | double miops = (double) max_iops / 1000000.0; |
| 1455 | printf("Maximum IOPS=%.2fM\n", miops); |
| 1456 | } else if (max_iops > 100000) { |
| 1457 | double kiops = (double) max_iops / 1000.0; |
| 1458 | printf("Maximum IOPS=%.2fK\n", kiops); |
| 1459 | } else { |
| 1460 | printf("Maximum IOPS=%lu\n", max_iops); |
| 1461 | } |
| 1462 | finish = 1; |
| 1463 | } |
| 1464 | |
| 1465 | static void sig_int(int sig) |
| 1466 | { |
| 1467 | do_finish("signal"); |
| 1468 | } |
| 1469 | |
| 1470 | static void arm_sig_int(void) |
| 1471 | { |
| 1472 | struct sigaction act; |
| 1473 | |
| 1474 | memset(&act, 0, sizeof(act)); |
| 1475 | act.sa_handler = sig_int; |
| 1476 | act.sa_flags = SA_RESTART; |
| 1477 | sigaction(SIGINT, &act, NULL); |
| 1478 | |
| 1479 | /* Windows uses SIGBREAK as a quit signal from other applications */ |
| 1480 | #ifdef WIN32 |
| 1481 | sigaction(SIGBREAK, &act, NULL); |
| 1482 | #endif |
| 1483 | } |
| 1484 | |
| 1485 | static void usage(char *argv, int status) |
| 1486 | { |
| 1487 | char runtime_str[16]; |
| 1488 | snprintf(runtime_str, sizeof(runtime_str), "%d", runtime); |
| 1489 | printf("%s [options] -- [filenames]\n" |
| 1490 | " -d <int> : IO Depth, default %d\n" |
| 1491 | " -s <int> : Batch submit, default %d\n" |
| 1492 | " -c <int> : Batch complete, default %d\n" |
| 1493 | " -b <int> : Block size, default %d\n" |
| 1494 | " -p <bool> : Polled IO, default %d\n" |
| 1495 | " -B <bool> : Fixed buffers, default %d\n" |
| 1496 | " -F <bool> : Register files, default %d\n" |
| 1497 | " -n <int> : Number of threads, default %d\n" |
| 1498 | " -O <bool> : Use O_DIRECT, default %d\n" |
| 1499 | " -N <bool> : Perform just no-op requests, default %d\n" |
| 1500 | " -t <bool> : Track IO latencies, default %d\n" |
| 1501 | " -T <int> : TSC rate in HZ\n" |
| 1502 | " -r <int> : Runtime in seconds, default %s\n" |
| 1503 | " -R <bool> : Use random IO, default %d\n" |
| 1504 | " -a <bool> : Use legacy aio, default %d\n" |
| 1505 | " -S <bool> : Use sync IO (preadv2), default %d\n" |
| 1506 | " -X <bool> : Use registered ring %d\n" |
| 1507 | " -P <bool> : Automatically place on device home node %d\n" |
| 1508 | " -u <bool> : Use nvme-passthrough I/O, default %d\n", |
| 1509 | argv, DEPTH, BATCH_SUBMIT, BATCH_COMPLETE, BS, polled, |
| 1510 | fixedbufs, register_files, nthreads, !buffered, do_nop, |
| 1511 | stats, runtime == 0 ? "unlimited" : runtime_str, random_io, aio, |
| 1512 | use_sync, register_ring, numa_placement, pt); |
| 1513 | exit(status); |
| 1514 | } |
| 1515 | |
| 1516 | static void read_tsc_rate(void) |
| 1517 | { |
| 1518 | char buffer[32]; |
| 1519 | int fd, ret; |
| 1520 | |
| 1521 | if (tsc_rate) |
| 1522 | return; |
| 1523 | |
| 1524 | fd = open(TSC_RATE_FILE, O_RDONLY); |
| 1525 | if (fd < 0) |
| 1526 | return; |
| 1527 | |
| 1528 | ret = read(fd, buffer, sizeof(buffer)); |
| 1529 | if (ret < 0) { |
| 1530 | close(fd); |
| 1531 | return; |
| 1532 | } |
| 1533 | |
| 1534 | tsc_rate = strtoul(buffer, NULL, 10); |
| 1535 | printf("Using TSC rate %luHz\n", tsc_rate); |
| 1536 | close(fd); |
| 1537 | } |
| 1538 | |
| 1539 | static void write_tsc_rate(void) |
| 1540 | { |
| 1541 | char buffer[32]; |
| 1542 | struct stat sb; |
| 1543 | int fd, ret; |
| 1544 | |
| 1545 | if (!stat(TSC_RATE_FILE, &sb)) |
| 1546 | return; |
| 1547 | |
| 1548 | fd = open(TSC_RATE_FILE, O_WRONLY | O_CREAT, 0644); |
| 1549 | if (fd < 0) |
| 1550 | return; |
| 1551 | |
| 1552 | memset(buffer, 0, sizeof(buffer)); |
| 1553 | sprintf(buffer, "%lu", tsc_rate); |
| 1554 | ret = write(fd, buffer, strlen(buffer)); |
| 1555 | if (ret < 0) |
| 1556 | perror("write"); |
| 1557 | close(fd); |
| 1558 | } |
| 1559 | |
| 1560 | int main(int argc, char *argv[]) |
| 1561 | { |
| 1562 | struct submitter *s; |
| 1563 | unsigned long done, calls, reap, io_errors; |
| 1564 | int i, j, flags, fd, opt, threads_per_f, threads_rem = 0, nfiles; |
| 1565 | struct file f; |
| 1566 | void *ret; |
| 1567 | |
| 1568 | if (!do_nop && argc < 2) |
| 1569 | usage(argv[0], 1); |
| 1570 | |
| 1571 | while ((opt = getopt(argc, argv, "d:s:c:b:p:B:F:n:N:O:t:T:a:r:D:R:X:S:P:u:h?")) != -1) { |
| 1572 | switch (opt) { |
| 1573 | case 'a': |
| 1574 | aio = !!atoi(optarg); |
| 1575 | break; |
| 1576 | case 'd': |
| 1577 | depth = atoi(optarg); |
| 1578 | break; |
| 1579 | case 's': |
| 1580 | batch_submit = atoi(optarg); |
| 1581 | if (!batch_submit) |
| 1582 | batch_submit = 1; |
| 1583 | break; |
| 1584 | case 'c': |
| 1585 | batch_complete = atoi(optarg); |
| 1586 | if (!batch_complete) |
| 1587 | batch_complete = 1; |
| 1588 | break; |
| 1589 | case 'b': |
| 1590 | bs = atoi(optarg); |
| 1591 | break; |
| 1592 | case 'p': |
| 1593 | polled = !!atoi(optarg); |
| 1594 | break; |
| 1595 | case 'B': |
| 1596 | fixedbufs = !!atoi(optarg); |
| 1597 | break; |
| 1598 | case 'F': |
| 1599 | register_files = !!atoi(optarg); |
| 1600 | break; |
| 1601 | case 'n': |
| 1602 | nthreads = atoi(optarg); |
| 1603 | if (!nthreads) { |
| 1604 | printf("Threads must be non-zero\n"); |
| 1605 | usage(argv[0], 1); |
| 1606 | } |
| 1607 | break; |
| 1608 | case 'N': |
| 1609 | do_nop = !!atoi(optarg); |
| 1610 | break; |
| 1611 | case 'O': |
| 1612 | buffered = !atoi(optarg); |
| 1613 | break; |
| 1614 | case 't': |
| 1615 | #ifndef ARCH_HAVE_CPU_CLOCK |
| 1616 | fprintf(stderr, "Stats not supported on this CPU\n"); |
| 1617 | return 1; |
| 1618 | #endif |
| 1619 | stats = !!atoi(optarg); |
| 1620 | break; |
| 1621 | case 'T': |
| 1622 | #ifndef ARCH_HAVE_CPU_CLOCK |
| 1623 | fprintf(stderr, "Stats not supported on this CPU\n"); |
| 1624 | return 1; |
| 1625 | #endif |
| 1626 | tsc_rate = strtoul(optarg, NULL, 10); |
| 1627 | write_tsc_rate(); |
| 1628 | break; |
| 1629 | case 'r': |
| 1630 | runtime = atoi(optarg); |
| 1631 | break; |
| 1632 | case 'R': |
| 1633 | random_io = !!atoi(optarg); |
| 1634 | break; |
| 1635 | case 'X': |
| 1636 | register_ring = !!atoi(optarg); |
| 1637 | break; |
| 1638 | case 'S': |
| 1639 | #ifdef CONFIG_PWRITEV2 |
| 1640 | use_sync = !!atoi(optarg); |
| 1641 | #else |
| 1642 | fprintf(stderr, "preadv2 not supported\n"); |
| 1643 | exit(1); |
| 1644 | #endif |
| 1645 | break; |
| 1646 | case 'P': |
| 1647 | numa_placement = !!atoi(optarg); |
| 1648 | break; |
| 1649 | case 'u': |
| 1650 | pt = !!atoi(optarg); |
| 1651 | break; |
| 1652 | case 'h': |
| 1653 | case '?': |
| 1654 | default: |
| 1655 | usage(argv[0], 0); |
| 1656 | break; |
| 1657 | } |
| 1658 | } |
| 1659 | |
| 1660 | if (stats) |
| 1661 | read_tsc_rate(); |
| 1662 | |
| 1663 | if (batch_complete > depth) |
| 1664 | batch_complete = depth; |
| 1665 | if (batch_submit > depth) |
| 1666 | batch_submit = depth; |
| 1667 | |
| 1668 | submitter = calloc(nthreads, sizeof(*submitter) + |
| 1669 | roundup_pow2(depth) * sizeof(struct iovec)); |
| 1670 | for (j = 0; j < nthreads; j++) { |
| 1671 | s = get_submitter(j); |
| 1672 | s->numa_node = -1; |
| 1673 | s->index = j; |
| 1674 | s->done = s->calls = s->reaps = s->io_errors = 0; |
| 1675 | } |
| 1676 | |
| 1677 | flags = O_RDONLY | O_NOATIME; |
| 1678 | if (!buffered) |
| 1679 | flags |= O_DIRECT; |
| 1680 | |
| 1681 | j = 0; |
| 1682 | i = optind; |
| 1683 | nfiles = argc - i; |
| 1684 | if (!do_nop) { |
| 1685 | if (!nfiles) { |
| 1686 | printf("No files specified\n"); |
| 1687 | usage(argv[0], 1); |
| 1688 | } |
| 1689 | threads_per_f = nthreads / nfiles; |
| 1690 | /* make sure each thread gets assigned files */ |
| 1691 | if (threads_per_f == 0) { |
| 1692 | threads_per_f = 1; |
| 1693 | } else { |
| 1694 | threads_rem = nthreads - threads_per_f * nfiles; |
| 1695 | } |
| 1696 | } |
| 1697 | while (!do_nop && i < argc) { |
| 1698 | int k, limit; |
| 1699 | |
| 1700 | memset(&f, 0, sizeof(f)); |
| 1701 | |
| 1702 | fd = open(argv[i], flags); |
| 1703 | if (fd < 0) { |
| 1704 | perror("open"); |
| 1705 | return 1; |
| 1706 | } |
| 1707 | f.real_fd = fd; |
| 1708 | if (get_file_size(&f)) { |
| 1709 | printf("failed getting size of device/file\n"); |
| 1710 | return 1; |
| 1711 | } |
| 1712 | if (f.max_blocks <= 1) { |
| 1713 | printf("Zero file/device size?\n"); |
| 1714 | return 1; |
| 1715 | } |
| 1716 | f.max_blocks--; |
| 1717 | |
| 1718 | limit = threads_per_f; |
| 1719 | limit += threads_rem > 0 ? 1 : 0; |
| 1720 | for (k = 0; k < limit; k++) { |
| 1721 | s = get_submitter((j + k) % nthreads); |
| 1722 | |
| 1723 | if (s->nr_files == MAX_FDS) { |
| 1724 | printf("Max number of files (%d) reached\n", MAX_FDS); |
| 1725 | break; |
| 1726 | } |
| 1727 | |
| 1728 | memcpy(&s->files[s->nr_files], &f, sizeof(f)); |
| 1729 | |
| 1730 | if (numa_placement) |
| 1731 | detect_node(s, argv[i]); |
| 1732 | |
| 1733 | s->filename = argv[i]; |
| 1734 | s->nr_files++; |
| 1735 | } |
| 1736 | threads_rem--; |
| 1737 | i++; |
| 1738 | j += limit; |
| 1739 | } |
| 1740 | |
| 1741 | arm_sig_int(); |
| 1742 | |
| 1743 | t_io_uring_page_size = sysconf(_SC_PAGESIZE); |
| 1744 | if (t_io_uring_page_size < 0) |
| 1745 | t_io_uring_page_size = 4096; |
| 1746 | |
| 1747 | for (j = 0; j < nthreads; j++) { |
| 1748 | s = get_submitter(j); |
| 1749 | if (use_sync) |
| 1750 | pthread_create(&s->thread, NULL, submitter_sync_fn, s); |
| 1751 | else if (!aio) |
| 1752 | pthread_create(&s->thread, NULL, submitter_uring_fn, s); |
| 1753 | #ifdef CONFIG_LIBAIO |
| 1754 | else |
| 1755 | pthread_create(&s->thread, NULL, submitter_aio_fn, s); |
| 1756 | #endif |
| 1757 | } |
| 1758 | |
| 1759 | reap = calls = done = io_errors = 0; |
| 1760 | do { |
| 1761 | unsigned long this_done = 0; |
| 1762 | unsigned long this_reap = 0; |
| 1763 | unsigned long this_call = 0; |
| 1764 | unsigned long this_io_errors = 0; |
| 1765 | unsigned long rpc = 0, ipc = 0; |
| 1766 | unsigned long iops, bw; |
| 1767 | |
| 1768 | sleep(1); |
| 1769 | if (runtime && !--runtime) |
| 1770 | do_finish("timeout"); |
| 1771 | |
| 1772 | /* don't print partial run, if interrupted by signal */ |
| 1773 | if (finish) |
| 1774 | break; |
| 1775 | |
| 1776 | /* one second in to the run, enable stats */ |
| 1777 | if (stats) |
| 1778 | stats_running = 1; |
| 1779 | |
| 1780 | for (j = 0; j < nthreads; j++) { |
| 1781 | s = get_submitter(j); |
| 1782 | this_done += s->done; |
| 1783 | this_call += s->calls; |
| 1784 | this_reap += s->reaps; |
| 1785 | this_io_errors += s->io_errors; |
| 1786 | } |
| 1787 | if (this_call - calls) { |
| 1788 | rpc = (this_done - done) / (this_call - calls); |
| 1789 | ipc = (this_reap - reap) / (this_call - calls); |
| 1790 | } else |
| 1791 | rpc = ipc = -1; |
| 1792 | iops = this_done - done; |
| 1793 | iops -= this_io_errors - io_errors; |
| 1794 | if (bs > 1048576) |
| 1795 | bw = iops * (bs / 1048576); |
| 1796 | else |
| 1797 | bw = iops / (1048576 / bs); |
| 1798 | if (iops > 1000000) { |
| 1799 | double miops = (double) iops / 1000000.0; |
| 1800 | printf("IOPS=%.2fM, ", miops); |
| 1801 | } else if (iops > 100000) { |
| 1802 | double kiops = (double) iops / 1000.0; |
| 1803 | printf("IOPS=%.2fK, ", kiops); |
| 1804 | } else { |
| 1805 | printf("IOPS=%lu, ", iops); |
| 1806 | } |
| 1807 | max_iops = max(max_iops, iops); |
| 1808 | if (!do_nop) { |
| 1809 | if (bw > 2000) { |
| 1810 | double bw_g = (double) bw / 1000.0; |
| 1811 | |
| 1812 | printf("BW=%.2fGiB/s, ", bw_g); |
| 1813 | } else { |
| 1814 | printf("BW=%luMiB/s, ", bw); |
| 1815 | } |
| 1816 | } |
| 1817 | printf("IOS/call=%ld/%ld\n", rpc, ipc); |
| 1818 | done = this_done; |
| 1819 | calls = this_call; |
| 1820 | reap = this_reap; |
| 1821 | io_errors = this_io_errors; |
| 1822 | } while (!finish); |
| 1823 | |
| 1824 | for (j = 0; j < nthreads; j++) { |
| 1825 | s = get_submitter(j); |
| 1826 | pthread_join(s->thread, &ret); |
| 1827 | close(s->ring_fd); |
| 1828 | |
| 1829 | if (s->io_errors) |
| 1830 | printf("%d: %lu IO errors\n", s->tid, s->io_errors); |
| 1831 | |
| 1832 | if (stats) { |
| 1833 | unsigned long nr; |
| 1834 | |
| 1835 | printf("%d: Latency percentiles:\n", s->tid); |
| 1836 | for (i = 0, nr = 0; i < PLAT_NR; i++) |
| 1837 | nr += s->plat[i]; |
| 1838 | show_clat_percentiles(s->plat, nr, 4); |
| 1839 | free(s->clock_batch); |
| 1840 | free(s->plat); |
| 1841 | } |
| 1842 | } |
| 1843 | |
| 1844 | free(submitter); |
| 1845 | return 0; |
| 1846 | } |