| 1 | /* |
| 2 | * blktrace support code for fio |
| 3 | */ |
| 4 | #include <stdio.h> |
| 5 | #include <stdlib.h> |
| 6 | #include <unistd.h> |
| 7 | #include <errno.h> |
| 8 | |
| 9 | #include "flist.h" |
| 10 | #include "fio.h" |
| 11 | #include "iolog.h" |
| 12 | #include "blktrace.h" |
| 13 | #include "blktrace_api.h" |
| 14 | #include "oslib/linux-dev-lookup.h" |
| 15 | |
| 16 | struct file_cache { |
| 17 | unsigned int maj; |
| 18 | unsigned int min; |
| 19 | unsigned int fileno; |
| 20 | }; |
| 21 | |
| 22 | /* |
| 23 | * Just discard the pdu by seeking past it. |
| 24 | */ |
| 25 | static int discard_pdu(FILE* f, struct blk_io_trace *t) |
| 26 | { |
| 27 | if (t->pdu_len == 0) |
| 28 | return 0; |
| 29 | |
| 30 | dprint(FD_BLKTRACE, "discard pdu len %u\n", t->pdu_len); |
| 31 | if (fseek(f, t->pdu_len, SEEK_CUR) < 0) |
| 32 | return -errno; |
| 33 | |
| 34 | return t->pdu_len; |
| 35 | } |
| 36 | |
| 37 | /* |
| 38 | * Check if this is a blktrace binary data file. We read a single trace |
| 39 | * into memory and check for the magic signature. |
| 40 | */ |
| 41 | bool is_blktrace(const char *filename, int *need_swap) |
| 42 | { |
| 43 | struct blk_io_trace t; |
| 44 | int fd, ret; |
| 45 | |
| 46 | fd = open(filename, O_RDONLY); |
| 47 | if (fd < 0) |
| 48 | return false; |
| 49 | |
| 50 | ret = read(fd, &t, sizeof(t)); |
| 51 | close(fd); |
| 52 | |
| 53 | if (ret < 0) { |
| 54 | perror("read blktrace"); |
| 55 | return false; |
| 56 | } else if (ret != sizeof(t)) { |
| 57 | log_err("fio: short read on blktrace file\n"); |
| 58 | return false; |
| 59 | } |
| 60 | |
| 61 | if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) { |
| 62 | *need_swap = 0; |
| 63 | return true; |
| 64 | } |
| 65 | |
| 66 | /* |
| 67 | * Maybe it needs to be endian swapped... |
| 68 | */ |
| 69 | t.magic = fio_swap32(t.magic); |
| 70 | if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) { |
| 71 | *need_swap = 1; |
| 72 | return true; |
| 73 | } |
| 74 | |
| 75 | return false; |
| 76 | } |
| 77 | |
| 78 | #define FMINORBITS 20 |
| 79 | #define FMINORMASK ((1U << FMINORBITS) - 1) |
| 80 | #define FMAJOR(dev) ((unsigned int) ((dev) >> FMINORBITS)) |
| 81 | #define FMINOR(dev) ((unsigned int) ((dev) & FMINORMASK)) |
| 82 | |
| 83 | static void trace_add_open_close_event(struct thread_data *td, int fileno, enum file_log_act action) |
| 84 | { |
| 85 | struct io_piece *ipo; |
| 86 | |
| 87 | ipo = calloc(1, sizeof(*ipo)); |
| 88 | init_ipo(ipo); |
| 89 | |
| 90 | ipo->ddir = DDIR_INVAL; |
| 91 | ipo->fileno = fileno; |
| 92 | ipo->file_action = action; |
| 93 | flist_add_tail(&ipo->list, &td->io_log_list); |
| 94 | } |
| 95 | |
| 96 | static int trace_add_file(struct thread_data *td, __u32 device, |
| 97 | struct file_cache *cache) |
| 98 | { |
| 99 | unsigned int maj = FMAJOR(device); |
| 100 | unsigned int min = FMINOR(device); |
| 101 | struct fio_file *f; |
| 102 | char dev[256]; |
| 103 | unsigned int i; |
| 104 | |
| 105 | if (cache->maj == maj && cache->min == min) |
| 106 | return cache->fileno; |
| 107 | |
| 108 | cache->maj = maj; |
| 109 | cache->min = min; |
| 110 | |
| 111 | /* |
| 112 | * check for this file in our list |
| 113 | */ |
| 114 | for_each_file(td, f, i) |
| 115 | if (f->major == maj && f->minor == min) { |
| 116 | cache->fileno = f->fileno; |
| 117 | return cache->fileno; |
| 118 | } |
| 119 | |
| 120 | strcpy(dev, "/dev"); |
| 121 | if (blktrace_lookup_device(td->o.replay_redirect, dev, maj, min)) { |
| 122 | int fileno; |
| 123 | |
| 124 | if (td->o.replay_redirect) |
| 125 | dprint(FD_BLKTRACE, "device lookup: %d/%d\n overridden" |
| 126 | " with: %s\n", maj, min, |
| 127 | td->o.replay_redirect); |
| 128 | else |
| 129 | dprint(FD_BLKTRACE, "device lookup: %d/%d\n", maj, min); |
| 130 | |
| 131 | dprint(FD_BLKTRACE, "add devices %s\n", dev); |
| 132 | fileno = add_file_exclusive(td, dev); |
| 133 | td->o.open_files++; |
| 134 | td->files[fileno]->major = maj; |
| 135 | td->files[fileno]->minor = min; |
| 136 | trace_add_open_close_event(td, fileno, FIO_LOG_OPEN_FILE); |
| 137 | cache->fileno = fileno; |
| 138 | } |
| 139 | |
| 140 | return cache->fileno; |
| 141 | } |
| 142 | |
| 143 | static void t_bytes_align(struct thread_options *o, struct blk_io_trace *t) |
| 144 | { |
| 145 | if (!o->replay_align) |
| 146 | return; |
| 147 | |
| 148 | t->bytes = (t->bytes + o->replay_align - 1) & ~(o->replay_align - 1); |
| 149 | } |
| 150 | |
| 151 | /* |
| 152 | * Store blk_io_trace data in an ipo for later retrieval. |
| 153 | */ |
| 154 | static void store_ipo(struct thread_data *td, unsigned long long offset, |
| 155 | unsigned int bytes, int rw, unsigned long long ttime, |
| 156 | int fileno) |
| 157 | { |
| 158 | struct io_piece *ipo; |
| 159 | |
| 160 | ipo = calloc(1, sizeof(*ipo)); |
| 161 | init_ipo(ipo); |
| 162 | |
| 163 | ipo->offset = offset * 512; |
| 164 | if (td->o.replay_scale) |
| 165 | ipo->offset = ipo->offset / td->o.replay_scale; |
| 166 | ipo_bytes_align(td->o.replay_align, ipo); |
| 167 | ipo->len = bytes; |
| 168 | ipo->delay = ttime / 1000; |
| 169 | if (rw) |
| 170 | ipo->ddir = DDIR_WRITE; |
| 171 | else |
| 172 | ipo->ddir = DDIR_READ; |
| 173 | ipo->fileno = fileno; |
| 174 | |
| 175 | dprint(FD_BLKTRACE, "store ddir=%d, off=%llu, len=%lu, delay=%lu\n", |
| 176 | ipo->ddir, ipo->offset, |
| 177 | ipo->len, ipo->delay); |
| 178 | queue_io_piece(td, ipo); |
| 179 | } |
| 180 | |
| 181 | static bool handle_trace_notify(struct blk_io_trace *t) |
| 182 | { |
| 183 | switch (t->action) { |
| 184 | case BLK_TN_PROCESS: |
| 185 | dprint(FD_BLKTRACE, "got process notify: %x, %d\n", |
| 186 | t->action, t->pid); |
| 187 | break; |
| 188 | case BLK_TN_TIMESTAMP: |
| 189 | dprint(FD_BLKTRACE, "got timestamp notify: %x, %d\n", |
| 190 | t->action, t->pid); |
| 191 | break; |
| 192 | case BLK_TN_MESSAGE: |
| 193 | break; |
| 194 | default: |
| 195 | dprint(FD_BLKTRACE, "unknown trace act %x\n", t->action); |
| 196 | break; |
| 197 | } |
| 198 | return false; |
| 199 | } |
| 200 | |
| 201 | static bool handle_trace_discard(struct thread_data *td, |
| 202 | struct blk_io_trace *t, |
| 203 | unsigned long long ttime, |
| 204 | unsigned long *ios, unsigned long long *bs, |
| 205 | struct file_cache *cache) |
| 206 | { |
| 207 | struct io_piece *ipo; |
| 208 | int fileno; |
| 209 | |
| 210 | if (td->o.replay_skip & (1u << DDIR_TRIM)) |
| 211 | return false; |
| 212 | |
| 213 | ipo = calloc(1, sizeof(*ipo)); |
| 214 | init_ipo(ipo); |
| 215 | fileno = trace_add_file(td, t->device, cache); |
| 216 | |
| 217 | ios[DDIR_TRIM]++; |
| 218 | if (t->bytes > bs[DDIR_TRIM]) |
| 219 | bs[DDIR_TRIM] = t->bytes; |
| 220 | |
| 221 | td->o.size += t->bytes; |
| 222 | |
| 223 | INIT_FLIST_HEAD(&ipo->list); |
| 224 | |
| 225 | ipo->offset = t->sector * 512; |
| 226 | if (td->o.replay_scale) |
| 227 | ipo->offset = ipo->offset / td->o.replay_scale; |
| 228 | ipo_bytes_align(td->o.replay_align, ipo); |
| 229 | ipo->len = t->bytes; |
| 230 | ipo->delay = ttime / 1000; |
| 231 | ipo->ddir = DDIR_TRIM; |
| 232 | ipo->fileno = fileno; |
| 233 | |
| 234 | dprint(FD_BLKTRACE, "store discard, off=%llu, len=%lu, delay=%lu\n", |
| 235 | ipo->offset, ipo->len, |
| 236 | ipo->delay); |
| 237 | queue_io_piece(td, ipo); |
| 238 | return true; |
| 239 | } |
| 240 | |
| 241 | static void dump_trace(struct blk_io_trace *t) |
| 242 | { |
| 243 | log_err("blktrace: ignoring zero byte trace: action=%x\n", t->action); |
| 244 | } |
| 245 | |
| 246 | static bool handle_trace_fs(struct thread_data *td, struct blk_io_trace *t, |
| 247 | unsigned long long ttime, unsigned long *ios, |
| 248 | unsigned long long *bs, struct file_cache *cache) |
| 249 | { |
| 250 | int rw; |
| 251 | int fileno; |
| 252 | |
| 253 | fileno = trace_add_file(td, t->device, cache); |
| 254 | |
| 255 | rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0; |
| 256 | |
| 257 | if (rw) { |
| 258 | if (td->o.replay_skip & (1u << DDIR_WRITE)) |
| 259 | return false; |
| 260 | } else { |
| 261 | if (td->o.replay_skip & (1u << DDIR_READ)) |
| 262 | return false; |
| 263 | } |
| 264 | |
| 265 | if (!t->bytes) { |
| 266 | if (!fio_did_warn(FIO_WARN_BTRACE_ZERO)) |
| 267 | dump_trace(t); |
| 268 | return false; |
| 269 | } |
| 270 | |
| 271 | if (t->bytes > bs[rw]) |
| 272 | bs[rw] = t->bytes; |
| 273 | |
| 274 | ios[rw]++; |
| 275 | td->o.size += t->bytes; |
| 276 | store_ipo(td, t->sector, t->bytes, rw, ttime, fileno); |
| 277 | return true; |
| 278 | } |
| 279 | |
| 280 | static bool handle_trace_flush(struct thread_data *td, struct blk_io_trace *t, |
| 281 | unsigned long long ttime, unsigned long *ios, |
| 282 | struct file_cache *cache) |
| 283 | { |
| 284 | struct io_piece *ipo; |
| 285 | int fileno; |
| 286 | |
| 287 | if (td->o.replay_skip & (1u << DDIR_SYNC)) |
| 288 | return false; |
| 289 | |
| 290 | ipo = calloc(1, sizeof(*ipo)); |
| 291 | init_ipo(ipo); |
| 292 | fileno = trace_add_file(td, t->device, cache); |
| 293 | |
| 294 | ipo->delay = ttime / 1000; |
| 295 | ipo->ddir = DDIR_SYNC; |
| 296 | ipo->fileno = fileno; |
| 297 | |
| 298 | ios[DDIR_SYNC]++; |
| 299 | dprint(FD_BLKTRACE, "store flush delay=%lu\n", ipo->delay); |
| 300 | queue_io_piece(td, ipo); |
| 301 | return true; |
| 302 | } |
| 303 | |
| 304 | /* |
| 305 | * We only care for queue traces, most of the others are side effects |
| 306 | * due to internal workings of the block layer. |
| 307 | */ |
| 308 | static bool queue_trace(struct thread_data *td, struct blk_io_trace *t, |
| 309 | unsigned long *ios, unsigned long long *bs, |
| 310 | struct file_cache *cache) |
| 311 | { |
| 312 | unsigned long long *last_ttime = &td->io_log_blktrace_last_ttime; |
| 313 | unsigned long long delay = 0; |
| 314 | |
| 315 | if ((t->action & 0xffff) != __BLK_TA_QUEUE) |
| 316 | return false; |
| 317 | |
| 318 | if (!(t->action & BLK_TC_ACT(BLK_TC_NOTIFY))) { |
| 319 | if (!*last_ttime || td->o.no_stall || t->time < *last_ttime) |
| 320 | delay = 0; |
| 321 | else if (td->o.replay_time_scale == 100) |
| 322 | delay = t->time - *last_ttime; |
| 323 | else { |
| 324 | double tmp = t->time - *last_ttime; |
| 325 | double scale; |
| 326 | |
| 327 | scale = (double) 100.0 / (double) td->o.replay_time_scale; |
| 328 | tmp *= scale; |
| 329 | delay = tmp; |
| 330 | } |
| 331 | *last_ttime = t->time; |
| 332 | } |
| 333 | |
| 334 | t_bytes_align(&td->o, t); |
| 335 | |
| 336 | if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY)) |
| 337 | return handle_trace_notify(t); |
| 338 | else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD)) |
| 339 | return handle_trace_discard(td, t, delay, ios, bs, cache); |
| 340 | else if (t->action & BLK_TC_ACT(BLK_TC_FLUSH)) |
| 341 | return handle_trace_flush(td, t, delay, ios, cache); |
| 342 | else |
| 343 | return handle_trace_fs(td, t, delay, ios, bs, cache); |
| 344 | } |
| 345 | |
| 346 | static void byteswap_trace(struct blk_io_trace *t) |
| 347 | { |
| 348 | t->magic = fio_swap32(t->magic); |
| 349 | t->sequence = fio_swap32(t->sequence); |
| 350 | t->time = fio_swap64(t->time); |
| 351 | t->sector = fio_swap64(t->sector); |
| 352 | t->bytes = fio_swap32(t->bytes); |
| 353 | t->action = fio_swap32(t->action); |
| 354 | t->pid = fio_swap32(t->pid); |
| 355 | t->device = fio_swap32(t->device); |
| 356 | t->cpu = fio_swap32(t->cpu); |
| 357 | t->error = fio_swap16(t->error); |
| 358 | t->pdu_len = fio_swap16(t->pdu_len); |
| 359 | } |
| 360 | |
| 361 | static bool t_is_write(struct blk_io_trace *t) |
| 362 | { |
| 363 | return (t->action & BLK_TC_ACT(BLK_TC_WRITE | BLK_TC_DISCARD)) != 0; |
| 364 | } |
| 365 | |
| 366 | static enum fio_ddir t_get_ddir(struct blk_io_trace *t) |
| 367 | { |
| 368 | if (t->action & BLK_TC_ACT(BLK_TC_READ)) |
| 369 | return DDIR_READ; |
| 370 | else if (t->action & BLK_TC_ACT(BLK_TC_WRITE)) |
| 371 | return DDIR_WRITE; |
| 372 | else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD)) |
| 373 | return DDIR_TRIM; |
| 374 | |
| 375 | return DDIR_INVAL; |
| 376 | } |
| 377 | |
| 378 | static void depth_inc(struct blk_io_trace *t, int *depth) |
| 379 | { |
| 380 | enum fio_ddir ddir; |
| 381 | |
| 382 | ddir = t_get_ddir(t); |
| 383 | if (ddir != DDIR_INVAL) |
| 384 | depth[ddir]++; |
| 385 | } |
| 386 | |
| 387 | static void depth_dec(struct blk_io_trace *t, int *depth) |
| 388 | { |
| 389 | enum fio_ddir ddir; |
| 390 | |
| 391 | ddir = t_get_ddir(t); |
| 392 | if (ddir != DDIR_INVAL) |
| 393 | depth[ddir]--; |
| 394 | } |
| 395 | |
| 396 | static void depth_end(struct blk_io_trace *t, int *this_depth, int *depth) |
| 397 | { |
| 398 | enum fio_ddir ddir = DDIR_INVAL; |
| 399 | |
| 400 | ddir = t_get_ddir(t); |
| 401 | if (ddir != DDIR_INVAL) { |
| 402 | depth[ddir] = max(depth[ddir], this_depth[ddir]); |
| 403 | this_depth[ddir] = 0; |
| 404 | } |
| 405 | } |
| 406 | |
| 407 | /* |
| 408 | * Load a blktrace file by reading all the blk_io_trace entries, and storing |
| 409 | * them as io_pieces like the fio text version would do. |
| 410 | */ |
| 411 | bool init_blktrace_read(struct thread_data *td, const char *filename, int need_swap) |
| 412 | { |
| 413 | int old_state; |
| 414 | |
| 415 | td->io_log_rfile = fopen(filename, "rb"); |
| 416 | if (!td->io_log_rfile) { |
| 417 | td_verror(td, errno, "open blktrace file"); |
| 418 | goto err; |
| 419 | } |
| 420 | td->io_log_blktrace_swap = need_swap; |
| 421 | td->io_log_blktrace_last_ttime = 0; |
| 422 | td->o.size = 0; |
| 423 | |
| 424 | free_release_files(td); |
| 425 | |
| 426 | old_state = td_bump_runstate(td, TD_SETTING_UP); |
| 427 | |
| 428 | if (!read_blktrace(td)) { |
| 429 | goto err; |
| 430 | } |
| 431 | |
| 432 | td_restore_runstate(td, old_state); |
| 433 | |
| 434 | if (!td->files_index) { |
| 435 | log_err("fio: did not find replay device(s)\n"); |
| 436 | return false; |
| 437 | } |
| 438 | |
| 439 | return true; |
| 440 | |
| 441 | err: |
| 442 | if (td->io_log_rfile) { |
| 443 | fclose(td->io_log_rfile); |
| 444 | td->io_log_rfile = NULL; |
| 445 | } |
| 446 | return false; |
| 447 | } |
| 448 | |
| 449 | bool read_blktrace(struct thread_data* td) |
| 450 | { |
| 451 | struct blk_io_trace t; |
| 452 | struct file_cache cache = { }; |
| 453 | unsigned long ios[DDIR_RWDIR_SYNC_CNT] = { }; |
| 454 | unsigned long long rw_bs[DDIR_RWDIR_CNT] = { }; |
| 455 | unsigned long skipped_writes; |
| 456 | FILE *f = td->io_log_rfile; |
| 457 | int i, max_depth; |
| 458 | struct fio_file *fiof; |
| 459 | int this_depth[DDIR_RWDIR_CNT] = { }; |
| 460 | int depth[DDIR_RWDIR_CNT] = { }; |
| 461 | int64_t items_to_fetch = 0; |
| 462 | |
| 463 | if (td->o.read_iolog_chunked) { |
| 464 | items_to_fetch = iolog_items_to_fetch(td); |
| 465 | if (!items_to_fetch) |
| 466 | return true; |
| 467 | } |
| 468 | |
| 469 | skipped_writes = 0; |
| 470 | do { |
| 471 | int ret = fread(&t, 1, sizeof(t), f); |
| 472 | |
| 473 | if (ferror(f)) { |
| 474 | td_verror(td, errno, "read blktrace file"); |
| 475 | goto err; |
| 476 | } else if (feof(f)) { |
| 477 | break; |
| 478 | } else if (ret < (int) sizeof(t)) { |
| 479 | log_err("fio: iolog short read\n"); |
| 480 | break; |
| 481 | } |
| 482 | |
| 483 | if (td->io_log_blktrace_swap) |
| 484 | byteswap_trace(&t); |
| 485 | |
| 486 | if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) { |
| 487 | log_err("fio: bad magic in blktrace data: %x\n", |
| 488 | t.magic); |
| 489 | goto err; |
| 490 | } |
| 491 | if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) { |
| 492 | log_err("fio: bad blktrace version %d\n", |
| 493 | t.magic & 0xff); |
| 494 | goto err; |
| 495 | } |
| 496 | ret = discard_pdu(f, &t); |
| 497 | if (ret < 0) { |
| 498 | td_verror(td, -ret, "blktrace lseek"); |
| 499 | goto err; |
| 500 | } |
| 501 | if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) { |
| 502 | if ((t.action & 0xffff) == __BLK_TA_QUEUE) |
| 503 | depth_inc(&t, this_depth); |
| 504 | else if (((t.action & 0xffff) == __BLK_TA_BACKMERGE) || |
| 505 | ((t.action & 0xffff) == __BLK_TA_FRONTMERGE)) |
| 506 | depth_dec(&t, this_depth); |
| 507 | else if ((t.action & 0xffff) == __BLK_TA_COMPLETE) |
| 508 | depth_end(&t, this_depth, depth); |
| 509 | |
| 510 | if (t_is_write(&t) && read_only) { |
| 511 | skipped_writes++; |
| 512 | continue; |
| 513 | } |
| 514 | } |
| 515 | |
| 516 | if (!queue_trace(td, &t, ios, rw_bs, &cache)) |
| 517 | continue; |
| 518 | |
| 519 | if (td->o.read_iolog_chunked) { |
| 520 | td->io_log_current++; |
| 521 | items_to_fetch--; |
| 522 | if (items_to_fetch == 0) |
| 523 | break; |
| 524 | } |
| 525 | } while (1); |
| 526 | |
| 527 | if (td->o.read_iolog_chunked) { |
| 528 | td->io_log_highmark = td->io_log_current; |
| 529 | td->io_log_checkmark = (td->io_log_highmark + 1) / 2; |
| 530 | fio_gettime(&td->io_log_highmark_time, NULL); |
| 531 | } |
| 532 | |
| 533 | if (skipped_writes) |
| 534 | log_err("fio: %s skips replay of %lu writes due to read-only\n", |
| 535 | td->o.name, skipped_writes); |
| 536 | |
| 537 | if (td->o.read_iolog_chunked) { |
| 538 | if (td->io_log_current == 0) { |
| 539 | return false; |
| 540 | } |
| 541 | td->o.td_ddir = TD_DDIR_RW; |
| 542 | if ((rw_bs[DDIR_READ] > td->o.max_bs[DDIR_READ] || |
| 543 | rw_bs[DDIR_WRITE] > td->o.max_bs[DDIR_WRITE] || |
| 544 | rw_bs[DDIR_TRIM] > td->o.max_bs[DDIR_TRIM]) && |
| 545 | td->orig_buffer) |
| 546 | { |
| 547 | td->o.max_bs[DDIR_READ] = max(td->o.max_bs[DDIR_READ], rw_bs[DDIR_READ]); |
| 548 | td->o.max_bs[DDIR_WRITE] = max(td->o.max_bs[DDIR_WRITE], rw_bs[DDIR_WRITE]); |
| 549 | td->o.max_bs[DDIR_TRIM] = max(td->o.max_bs[DDIR_TRIM], rw_bs[DDIR_TRIM]); |
| 550 | io_u_quiesce(td); |
| 551 | free_io_mem(td); |
| 552 | init_io_u_buffers(td); |
| 553 | } |
| 554 | return true; |
| 555 | } |
| 556 | |
| 557 | for_each_file(td, fiof, i) |
| 558 | trace_add_open_close_event(td, fiof->fileno, FIO_LOG_CLOSE_FILE); |
| 559 | |
| 560 | fclose(td->io_log_rfile); |
| 561 | td->io_log_rfile = NULL; |
| 562 | |
| 563 | /* |
| 564 | * For stacked devices, we don't always get a COMPLETE event so |
| 565 | * the depth grows to insane values. Limit it to something sane(r). |
| 566 | */ |
| 567 | max_depth = 0; |
| 568 | for (i = 0; i < DDIR_RWDIR_CNT; i++) { |
| 569 | if (depth[i] > 1024) |
| 570 | depth[i] = 1024; |
| 571 | else if (!depth[i] && ios[i]) |
| 572 | depth[i] = 1; |
| 573 | max_depth = max(depth[i], max_depth); |
| 574 | } |
| 575 | |
| 576 | if (!ios[DDIR_READ] && !ios[DDIR_WRITE] && !ios[DDIR_TRIM] && |
| 577 | !ios[DDIR_SYNC]) { |
| 578 | log_err("fio: found no ios in blktrace data\n"); |
| 579 | return false; |
| 580 | } |
| 581 | |
| 582 | td->o.td_ddir = 0; |
| 583 | if (ios[DDIR_READ]) { |
| 584 | td->o.td_ddir |= TD_DDIR_READ; |
| 585 | td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ]; |
| 586 | } |
| 587 | if (ios[DDIR_WRITE]) { |
| 588 | td->o.td_ddir |= TD_DDIR_WRITE; |
| 589 | td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE]; |
| 590 | } |
| 591 | if (ios[DDIR_TRIM]) { |
| 592 | td->o.td_ddir |= TD_DDIR_TRIM; |
| 593 | td->o.max_bs[DDIR_TRIM] = rw_bs[DDIR_TRIM]; |
| 594 | } |
| 595 | |
| 596 | /* |
| 597 | * If depth wasn't manually set, use probed depth |
| 598 | */ |
| 599 | if (!fio_option_is_set(&td->o, iodepth)) |
| 600 | td->o.iodepth = td->o.iodepth_low = max_depth; |
| 601 | |
| 602 | return true; |
| 603 | err: |
| 604 | fclose(f); |
| 605 | return false; |
| 606 | } |
| 607 | |
| 608 | static int init_merge_param_list(fio_fp64_t *vals, struct blktrace_cursor *bcs, |
| 609 | int nr_logs, int def, size_t off) |
| 610 | { |
| 611 | int i = 0, len = 0; |
| 612 | |
| 613 | while (len < FIO_IO_U_LIST_MAX_LEN && vals[len].u.f != 0.0) |
| 614 | len++; |
| 615 | |
| 616 | if (len && len != nr_logs) |
| 617 | return len; |
| 618 | |
| 619 | for (i = 0; i < nr_logs; i++) { |
| 620 | int *val = (int *)((char *)&bcs[i] + off); |
| 621 | *val = def; |
| 622 | if (len) |
| 623 | *val = (int)vals[i].u.f; |
| 624 | } |
| 625 | |
| 626 | return 0; |
| 627 | |
| 628 | } |
| 629 | |
| 630 | static int find_earliest_io(struct blktrace_cursor *bcs, int nr_logs) |
| 631 | { |
| 632 | __u64 time = ~(__u64)0; |
| 633 | int idx = 0, i; |
| 634 | |
| 635 | for (i = 0; i < nr_logs; i++) { |
| 636 | if (bcs[i].t.time < time) { |
| 637 | time = bcs[i].t.time; |
| 638 | idx = i; |
| 639 | } |
| 640 | } |
| 641 | |
| 642 | return idx; |
| 643 | } |
| 644 | |
| 645 | static void merge_finish_file(struct blktrace_cursor *bcs, int i, int *nr_logs) |
| 646 | { |
| 647 | bcs[i].iter++; |
| 648 | if (bcs[i].iter < bcs[i].nr_iter) { |
| 649 | fseek(bcs[i].f, 0, SEEK_SET); |
| 650 | return; |
| 651 | } |
| 652 | |
| 653 | *nr_logs -= 1; |
| 654 | |
| 655 | /* close file */ |
| 656 | fclose(bcs[i].f); |
| 657 | |
| 658 | /* keep active files contiguous */ |
| 659 | memmove(&bcs[i], &bcs[*nr_logs], sizeof(bcs[i])); |
| 660 | } |
| 661 | |
| 662 | static int read_trace(struct thread_data *td, struct blktrace_cursor *bc) |
| 663 | { |
| 664 | int ret = 0; |
| 665 | struct blk_io_trace *t = &bc->t; |
| 666 | |
| 667 | read_skip: |
| 668 | /* read an io trace */ |
| 669 | ret = fread(&t, 1, sizeof(t), bc->f); |
| 670 | if (ferror(bc->f)) { |
| 671 | td_verror(td, errno, "read blktrace file"); |
| 672 | return ret; |
| 673 | } else if (feof(bc->f)) { |
| 674 | if (!bc->length) |
| 675 | bc->length = bc->t.time; |
| 676 | return ret; |
| 677 | } else if (ret < (int) sizeof(*t)) { |
| 678 | log_err("fio: iolog short read\n"); |
| 679 | return -1; |
| 680 | } |
| 681 | |
| 682 | if (bc->swap) |
| 683 | byteswap_trace(t); |
| 684 | |
| 685 | /* skip over actions that fio does not care about */ |
| 686 | if ((t->action & 0xffff) != __BLK_TA_QUEUE || |
| 687 | t_get_ddir(t) == DDIR_INVAL) { |
| 688 | ret = discard_pdu(bc->f, t); |
| 689 | if (ret < 0) { |
| 690 | td_verror(td, -ret, "blktrace lseek"); |
| 691 | return ret; |
| 692 | } |
| 693 | goto read_skip; |
| 694 | } |
| 695 | |
| 696 | t->time = (t->time + bc->iter * bc->length) * bc->scalar / 100; |
| 697 | |
| 698 | return ret; |
| 699 | } |
| 700 | |
| 701 | static int write_trace(FILE *fp, struct blk_io_trace *t) |
| 702 | { |
| 703 | /* pdu is not used so just write out only the io trace */ |
| 704 | t->pdu_len = 0; |
| 705 | return fwrite((void *)t, sizeof(*t), 1, fp); |
| 706 | } |
| 707 | |
| 708 | int merge_blktrace_iologs(struct thread_data *td) |
| 709 | { |
| 710 | int nr_logs = get_max_str_idx(td->o.read_iolog_file); |
| 711 | struct blktrace_cursor *bcs = malloc(sizeof(struct blktrace_cursor) * |
| 712 | nr_logs); |
| 713 | struct blktrace_cursor *bc; |
| 714 | FILE *merge_fp; |
| 715 | char *str, *ptr, *name, *merge_buf; |
| 716 | int i, ret; |
| 717 | |
| 718 | ret = init_merge_param_list(td->o.merge_blktrace_scalars, bcs, nr_logs, |
| 719 | 100, offsetof(struct blktrace_cursor, |
| 720 | scalar)); |
| 721 | if (ret) { |
| 722 | log_err("fio: merge_blktrace_scalars(%d) != nr_logs(%d)\n", |
| 723 | ret, nr_logs); |
| 724 | goto err_param; |
| 725 | } |
| 726 | |
| 727 | ret = init_merge_param_list(td->o.merge_blktrace_iters, bcs, nr_logs, |
| 728 | 1, offsetof(struct blktrace_cursor, |
| 729 | nr_iter)); |
| 730 | if (ret) { |
| 731 | log_err("fio: merge_blktrace_iters(%d) != nr_logs(%d)\n", |
| 732 | ret, nr_logs); |
| 733 | goto err_param; |
| 734 | } |
| 735 | |
| 736 | /* setup output file */ |
| 737 | merge_fp = fopen(td->o.merge_blktrace_file, "w"); |
| 738 | merge_buf = malloc(128 * 1024); |
| 739 | if (!merge_buf) |
| 740 | goto err_out_file; |
| 741 | ret = setvbuf(merge_fp, merge_buf, _IOFBF, 128 * 1024); |
| 742 | if (ret) |
| 743 | goto err_merge_buf; |
| 744 | |
| 745 | /* setup input files */ |
| 746 | str = ptr = strdup(td->o.read_iolog_file); |
| 747 | nr_logs = 0; |
| 748 | for (i = 0; (name = get_next_str(&ptr)) != NULL; i++) { |
| 749 | bcs[i].f = fopen(name, "rb"); |
| 750 | if (!bcs[i].f) { |
| 751 | log_err("fio: could not open file: %s\n", name); |
| 752 | ret = -errno; |
| 753 | free(str); |
| 754 | goto err_file; |
| 755 | } |
| 756 | nr_logs++; |
| 757 | |
| 758 | if (!is_blktrace(name, &bcs[i].swap)) { |
| 759 | log_err("fio: file is not a blktrace: %s\n", name); |
| 760 | free(str); |
| 761 | goto err_file; |
| 762 | } |
| 763 | |
| 764 | ret = read_trace(td, &bcs[i]); |
| 765 | if (ret < 0) { |
| 766 | free(str); |
| 767 | goto err_file; |
| 768 | } else if (!ret) { |
| 769 | merge_finish_file(bcs, i, &nr_logs); |
| 770 | i--; |
| 771 | } |
| 772 | } |
| 773 | free(str); |
| 774 | |
| 775 | /* merge files */ |
| 776 | while (nr_logs) { |
| 777 | i = find_earliest_io(bcs, nr_logs); |
| 778 | bc = &bcs[i]; |
| 779 | /* skip over the pdu */ |
| 780 | ret = discard_pdu(bc->f, &bc->t); |
| 781 | if (ret < 0) { |
| 782 | td_verror(td, -ret, "blktrace lseek"); |
| 783 | goto err_file; |
| 784 | } |
| 785 | |
| 786 | ret = write_trace(merge_fp, &bc->t); |
| 787 | ret = read_trace(td, bc); |
| 788 | if (ret < 0) |
| 789 | goto err_file; |
| 790 | else if (!ret) |
| 791 | merge_finish_file(bcs, i, &nr_logs); |
| 792 | } |
| 793 | |
| 794 | /* set iolog file to read from the newly merged file */ |
| 795 | td->o.read_iolog_file = td->o.merge_blktrace_file; |
| 796 | ret = 0; |
| 797 | |
| 798 | err_file: |
| 799 | /* cleanup */ |
| 800 | for (i = 0; i < nr_logs; i++) { |
| 801 | fclose(bcs[i].f); |
| 802 | } |
| 803 | err_merge_buf: |
| 804 | free(merge_buf); |
| 805 | err_out_file: |
| 806 | fflush(merge_fp); |
| 807 | fclose(merge_fp); |
| 808 | err_param: |
| 809 | free(bcs); |
| 810 | |
| 811 | return ret; |
| 812 | } |