2 * blktrace support code for fio
13 #include "blktrace_api.h"
14 #include "oslib/linux-dev-lookup.h"
23 * Just discard the pdu by seeking past it.
25 static int discard_pdu(FILE* f, struct blk_io_trace *t)
30 dprint(FD_BLKTRACE, "discard pdu len %u\n", t->pdu_len);
31 if (fseek(f, t->pdu_len, SEEK_CUR) < 0)
38 * Check if this is a blktrace binary data file. We read a single trace
39 * into memory and check for the magic signature.
41 bool is_blktrace(const char *filename, int *need_swap)
43 struct blk_io_trace t;
46 fd = open(filename, O_RDONLY);
50 ret = read(fd, &t, sizeof(t));
54 perror("read blktrace");
56 } else if (ret != sizeof(t)) {
57 log_err("fio: short read on blktrace file\n");
61 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) {
67 * Maybe it needs to be endian swapped...
69 t.magic = fio_swap32(t.magic);
70 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) {
79 #define FMINORMASK ((1U << FMINORBITS) - 1)
80 #define FMAJOR(dev) ((unsigned int) ((dev) >> FMINORBITS))
81 #define FMINOR(dev) ((unsigned int) ((dev) & FMINORMASK))
83 static void trace_add_open_close_event(struct thread_data *td, int fileno, enum file_log_act action)
87 ipo = calloc(1, sizeof(*ipo));
90 ipo->ddir = DDIR_INVAL;
92 ipo->file_action = action;
93 flist_add_tail(&ipo->list, &td->io_log_list);
96 static int trace_add_file(struct thread_data *td, __u32 device,
97 struct file_cache *cache)
99 unsigned int maj = FMAJOR(device);
100 unsigned int min = FMINOR(device);
105 if (cache->maj == maj && cache->min == min)
106 return cache->fileno;
112 * check for this file in our list
114 for_each_file(td, f, i)
115 if (f->major == maj && f->minor == min) {
116 cache->fileno = f->fileno;
117 return cache->fileno;
121 if (blktrace_lookup_device(td->o.replay_redirect, dev, maj, min)) {
124 if (td->o.replay_redirect)
125 dprint(FD_BLKTRACE, "device lookup: %d/%d\n overridden"
126 " with: %s\n", maj, min,
127 td->o.replay_redirect);
129 dprint(FD_BLKTRACE, "device lookup: %d/%d\n", maj, min);
131 dprint(FD_BLKTRACE, "add devices %s\n", dev);
132 fileno = add_file_exclusive(td, dev);
134 td->files[fileno]->major = maj;
135 td->files[fileno]->minor = min;
136 trace_add_open_close_event(td, fileno, FIO_LOG_OPEN_FILE);
137 cache->fileno = fileno;
140 return cache->fileno;
143 static void t_bytes_align(struct thread_options *o, struct blk_io_trace *t)
145 if (!o->replay_align)
148 t->bytes = (t->bytes + o->replay_align - 1) & ~(o->replay_align - 1);
152 * Store blk_io_trace data in an ipo for later retrieval.
154 static void store_ipo(struct thread_data *td, unsigned long long offset,
155 unsigned int bytes, int rw, unsigned long long ttime,
158 struct io_piece *ipo;
160 ipo = calloc(1, sizeof(*ipo));
163 ipo->offset = offset * 512;
164 if (td->o.replay_scale)
165 ipo->offset = ipo->offset / td->o.replay_scale;
166 ipo_bytes_align(td->o.replay_align, ipo);
168 ipo->delay = ttime / 1000;
170 ipo->ddir = DDIR_WRITE;
172 ipo->ddir = DDIR_READ;
173 ipo->fileno = fileno;
175 dprint(FD_BLKTRACE, "store ddir=%d, off=%llu, len=%lu, delay=%lu\n",
176 ipo->ddir, ipo->offset,
177 ipo->len, ipo->delay);
178 queue_io_piece(td, ipo);
181 static bool handle_trace_notify(struct blk_io_trace *t)
185 dprint(FD_BLKTRACE, "got process notify: %x, %d\n",
188 case BLK_TN_TIMESTAMP:
189 dprint(FD_BLKTRACE, "got timestamp notify: %x, %d\n",
195 dprint(FD_BLKTRACE, "unknown trace act %x\n", t->action);
201 static bool handle_trace_discard(struct thread_data *td,
202 struct blk_io_trace *t,
203 unsigned long long ttime,
204 unsigned long *ios, unsigned long long *bs,
205 struct file_cache *cache)
207 struct io_piece *ipo;
210 if (td->o.replay_skip & (1u << DDIR_TRIM))
213 ipo = calloc(1, sizeof(*ipo));
215 fileno = trace_add_file(td, t->device, cache);
218 if (t->bytes > bs[DDIR_TRIM])
219 bs[DDIR_TRIM] = t->bytes;
221 td->o.size += t->bytes;
223 INIT_FLIST_HEAD(&ipo->list);
225 ipo->offset = t->sector * 512;
226 if (td->o.replay_scale)
227 ipo->offset = ipo->offset / td->o.replay_scale;
228 ipo_bytes_align(td->o.replay_align, ipo);
230 ipo->delay = ttime / 1000;
231 ipo->ddir = DDIR_TRIM;
232 ipo->fileno = fileno;
234 dprint(FD_BLKTRACE, "store discard, off=%llu, len=%lu, delay=%lu\n",
235 ipo->offset, ipo->len,
237 queue_io_piece(td, ipo);
241 static void dump_trace(struct blk_io_trace *t)
243 log_err("blktrace: ignoring zero byte trace: action=%x\n", t->action);
246 static bool handle_trace_fs(struct thread_data *td, struct blk_io_trace *t,
247 unsigned long long ttime, unsigned long *ios,
248 unsigned long long *bs, struct file_cache *cache)
253 fileno = trace_add_file(td, t->device, cache);
255 rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
258 if (td->o.replay_skip & (1u << DDIR_WRITE))
261 if (td->o.replay_skip & (1u << DDIR_READ))
266 if (!fio_did_warn(FIO_WARN_BTRACE_ZERO))
271 if (t->bytes > bs[rw])
275 td->o.size += t->bytes;
276 store_ipo(td, t->sector, t->bytes, rw, ttime, fileno);
280 static bool handle_trace_flush(struct thread_data *td, struct blk_io_trace *t,
281 unsigned long long ttime, unsigned long *ios,
282 struct file_cache *cache)
284 struct io_piece *ipo;
287 if (td->o.replay_skip & (1u << DDIR_SYNC))
290 ipo = calloc(1, sizeof(*ipo));
292 fileno = trace_add_file(td, t->device, cache);
294 ipo->delay = ttime / 1000;
295 ipo->ddir = DDIR_SYNC;
296 ipo->fileno = fileno;
299 dprint(FD_BLKTRACE, "store flush delay=%lu\n", ipo->delay);
301 if (!(td->flags & TD_F_SYNCS))
302 td->flags |= TD_F_SYNCS;
304 queue_io_piece(td, ipo);
309 * We only care for queue traces, most of the others are side effects
310 * due to internal workings of the block layer.
312 static bool queue_trace(struct thread_data *td, struct blk_io_trace *t,
313 unsigned long *ios, unsigned long long *bs,
314 struct file_cache *cache)
316 unsigned long long *last_ttime = &td->io_log_last_ttime;
317 unsigned long long delay = 0;
319 if ((t->action & 0xffff) != __BLK_TA_QUEUE)
322 if (!(t->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
323 delay = delay_since_ttime(td, t->time);
324 *last_ttime = t->time;
327 t_bytes_align(&td->o, t);
329 if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY))
330 return handle_trace_notify(t);
331 else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
332 return handle_trace_discard(td, t, delay, ios, bs, cache);
333 else if (t->action & BLK_TC_ACT(BLK_TC_FLUSH))
334 return handle_trace_flush(td, t, delay, ios, cache);
336 return handle_trace_fs(td, t, delay, ios, bs, cache);
339 static void byteswap_trace(struct blk_io_trace *t)
341 t->magic = fio_swap32(t->magic);
342 t->sequence = fio_swap32(t->sequence);
343 t->time = fio_swap64(t->time);
344 t->sector = fio_swap64(t->sector);
345 t->bytes = fio_swap32(t->bytes);
346 t->action = fio_swap32(t->action);
347 t->pid = fio_swap32(t->pid);
348 t->device = fio_swap32(t->device);
349 t->cpu = fio_swap32(t->cpu);
350 t->error = fio_swap16(t->error);
351 t->pdu_len = fio_swap16(t->pdu_len);
354 static bool t_is_write(struct blk_io_trace *t)
356 return (t->action & BLK_TC_ACT(BLK_TC_WRITE | BLK_TC_DISCARD)) != 0;
359 static enum fio_ddir t_get_ddir(struct blk_io_trace *t)
361 if (t->action & BLK_TC_ACT(BLK_TC_READ))
363 else if (t->action & BLK_TC_ACT(BLK_TC_WRITE))
365 else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
371 static void depth_inc(struct blk_io_trace *t, int *depth)
375 ddir = t_get_ddir(t);
376 if (ddir != DDIR_INVAL)
380 static void depth_dec(struct blk_io_trace *t, int *depth)
384 ddir = t_get_ddir(t);
385 if (ddir != DDIR_INVAL)
389 static void depth_end(struct blk_io_trace *t, int *this_depth, int *depth)
391 enum fio_ddir ddir = DDIR_INVAL;
393 ddir = t_get_ddir(t);
394 if (ddir != DDIR_INVAL) {
395 depth[ddir] = max(depth[ddir], this_depth[ddir]);
396 this_depth[ddir] = 0;
401 * Load a blktrace file by reading all the blk_io_trace entries, and storing
402 * them as io_pieces like the fio text version would do.
404 bool init_blktrace_read(struct thread_data *td, const char *filename, int need_swap)
408 td->io_log_rfile = fopen(filename, "rb");
409 if (!td->io_log_rfile) {
410 td_verror(td, errno, "open blktrace file");
413 td->io_log_blktrace_swap = need_swap;
414 td->io_log_last_ttime = 0;
417 free_release_files(td);
419 old_state = td_bump_runstate(td, TD_SETTING_UP);
421 if (!read_blktrace(td)) {
425 td_restore_runstate(td, old_state);
427 if (!td->files_index) {
428 log_err("fio: did not find replay device(s)\n");
435 if (td->io_log_rfile) {
436 fclose(td->io_log_rfile);
437 td->io_log_rfile = NULL;
442 bool read_blktrace(struct thread_data* td)
444 struct blk_io_trace t;
445 struct file_cache cache = { };
446 unsigned long ios[DDIR_RWDIR_SYNC_CNT] = { };
447 unsigned long long rw_bs[DDIR_RWDIR_CNT] = { };
448 unsigned long skipped_writes;
449 FILE *f = td->io_log_rfile;
451 struct fio_file *fiof;
452 int this_depth[DDIR_RWDIR_CNT] = { };
453 int depth[DDIR_RWDIR_CNT] = { };
454 int64_t items_to_fetch = 0;
456 if (td->o.read_iolog_chunked) {
457 items_to_fetch = iolog_items_to_fetch(td);
464 int ret = fread(&t, 1, sizeof(t), f);
467 td_verror(td, errno, "read blktrace file");
469 } else if (feof(f)) {
471 } else if (ret < (int) sizeof(t)) {
472 log_err("fio: iolog short read\n");
476 if (td->io_log_blktrace_swap)
479 if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
480 log_err("fio: bad magic in blktrace data: %x\n",
484 if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
485 log_err("fio: bad blktrace version %d\n",
489 ret = discard_pdu(f, &t);
491 td_verror(td, -ret, "blktrace lseek");
494 if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) {
495 if ((t.action & 0xffff) == __BLK_TA_QUEUE)
496 depth_inc(&t, this_depth);
497 else if (((t.action & 0xffff) == __BLK_TA_BACKMERGE) ||
498 ((t.action & 0xffff) == __BLK_TA_FRONTMERGE))
499 depth_dec(&t, this_depth);
500 else if ((t.action & 0xffff) == __BLK_TA_COMPLETE)
501 depth_end(&t, this_depth, depth);
503 if (t_is_write(&t) && read_only) {
509 if (!queue_trace(td, &t, ios, rw_bs, &cache))
512 if (td->o.read_iolog_chunked) {
513 td->io_log_current++;
515 if (items_to_fetch == 0)
520 if (td->o.read_iolog_chunked) {
521 td->io_log_highmark = td->io_log_current;
522 td->io_log_checkmark = (td->io_log_highmark + 1) / 2;
523 fio_gettime(&td->io_log_highmark_time, NULL);
527 log_err("fio: %s skips replay of %lu writes due to read-only\n",
528 td->o.name, skipped_writes);
530 if (td->o.read_iolog_chunked) {
531 if (td->io_log_current == 0) {
534 td->o.td_ddir = TD_DDIR_RW;
535 if ((rw_bs[DDIR_READ] > td->o.max_bs[DDIR_READ] ||
536 rw_bs[DDIR_WRITE] > td->o.max_bs[DDIR_WRITE] ||
537 rw_bs[DDIR_TRIM] > td->o.max_bs[DDIR_TRIM]) &&
540 td->o.max_bs[DDIR_READ] = max(td->o.max_bs[DDIR_READ], rw_bs[DDIR_READ]);
541 td->o.max_bs[DDIR_WRITE] = max(td->o.max_bs[DDIR_WRITE], rw_bs[DDIR_WRITE]);
542 td->o.max_bs[DDIR_TRIM] = max(td->o.max_bs[DDIR_TRIM], rw_bs[DDIR_TRIM]);
545 init_io_u_buffers(td);
550 for_each_file(td, fiof, i)
551 trace_add_open_close_event(td, fiof->fileno, FIO_LOG_CLOSE_FILE);
553 fclose(td->io_log_rfile);
554 td->io_log_rfile = NULL;
557 * For stacked devices, we don't always get a COMPLETE event so
558 * the depth grows to insane values. Limit it to something sane(r).
561 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
564 else if (!depth[i] && ios[i])
566 max_depth = max(depth[i], max_depth);
569 if (!ios[DDIR_READ] && !ios[DDIR_WRITE] && !ios[DDIR_TRIM] &&
571 log_err("fio: found no ios in blktrace data\n");
576 if (ios[DDIR_READ]) {
577 td->o.td_ddir |= TD_DDIR_READ;
578 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
580 if (ios[DDIR_WRITE]) {
581 td->o.td_ddir |= TD_DDIR_WRITE;
582 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
584 if (ios[DDIR_TRIM]) {
585 td->o.td_ddir |= TD_DDIR_TRIM;
586 td->o.max_bs[DDIR_TRIM] = rw_bs[DDIR_TRIM];
590 * If depth wasn't manually set, use probed depth
592 if (!fio_option_is_set(&td->o, iodepth))
593 td->o.iodepth = td->o.iodepth_low = max_depth;
601 static int init_merge_param_list(fio_fp64_t *vals, struct blktrace_cursor *bcs,
602 int nr_logs, int def, size_t off)
606 while (len < FIO_IO_U_LIST_MAX_LEN && vals[len].u.f != 0.0)
609 if (len && len != nr_logs)
612 for (i = 0; i < nr_logs; i++) {
613 int *val = (int *)((char *)&bcs[i] + off);
616 *val = (int)vals[i].u.f;
623 static int find_earliest_io(struct blktrace_cursor *bcs, int nr_logs)
625 __u64 time = ~(__u64)0;
628 for (i = 0; i < nr_logs; i++) {
629 if (bcs[i].t.time < time) {
630 time = bcs[i].t.time;
638 static void merge_finish_file(struct blktrace_cursor *bcs, int i, int *nr_logs)
641 if (bcs[i].iter < bcs[i].nr_iter) {
642 fseek(bcs[i].f, 0, SEEK_SET);
651 /* keep active files contiguous */
652 memmove(&bcs[i], &bcs[*nr_logs], sizeof(bcs[i]));
655 static int read_trace(struct thread_data *td, struct blktrace_cursor *bc)
658 struct blk_io_trace *t = &bc->t;
661 /* read an io trace */
662 ret = fread(&t, 1, sizeof(t), bc->f);
664 td_verror(td, errno, "read blktrace file");
666 } else if (feof(bc->f)) {
668 bc->length = bc->t.time;
670 } else if (ret < (int) sizeof(*t)) {
671 log_err("fio: iolog short read\n");
678 /* skip over actions that fio does not care about */
679 if ((t->action & 0xffff) != __BLK_TA_QUEUE ||
680 t_get_ddir(t) == DDIR_INVAL) {
681 ret = discard_pdu(bc->f, t);
683 td_verror(td, -ret, "blktrace lseek");
689 t->time = (t->time + bc->iter * bc->length) * bc->scalar / 100;
694 static int write_trace(FILE *fp, struct blk_io_trace *t)
696 /* pdu is not used so just write out only the io trace */
698 return fwrite((void *)t, sizeof(*t), 1, fp);
701 int merge_blktrace_iologs(struct thread_data *td)
703 int nr_logs = get_max_str_idx(td->o.read_iolog_file);
704 struct blktrace_cursor *bcs = malloc(sizeof(struct blktrace_cursor) *
706 struct blktrace_cursor *bc;
708 char *str, *ptr, *name, *merge_buf;
711 ret = init_merge_param_list(td->o.merge_blktrace_scalars, bcs, nr_logs,
712 100, offsetof(struct blktrace_cursor,
715 log_err("fio: merge_blktrace_scalars(%d) != nr_logs(%d)\n",
720 ret = init_merge_param_list(td->o.merge_blktrace_iters, bcs, nr_logs,
721 1, offsetof(struct blktrace_cursor,
724 log_err("fio: merge_blktrace_iters(%d) != nr_logs(%d)\n",
729 /* setup output file */
730 merge_fp = fopen(td->o.merge_blktrace_file, "w");
731 merge_buf = malloc(128 * 1024);
734 ret = setvbuf(merge_fp, merge_buf, _IOFBF, 128 * 1024);
738 /* setup input files */
739 str = ptr = strdup(td->o.read_iolog_file);
741 for (i = 0; (name = get_next_str(&ptr)) != NULL; i++) {
742 bcs[i].f = fopen(name, "rb");
744 log_err("fio: could not open file: %s\n", name);
751 if (!is_blktrace(name, &bcs[i].swap)) {
752 log_err("fio: file is not a blktrace: %s\n", name);
757 ret = read_trace(td, &bcs[i]);
762 merge_finish_file(bcs, i, &nr_logs);
770 i = find_earliest_io(bcs, nr_logs);
772 /* skip over the pdu */
773 ret = discard_pdu(bc->f, &bc->t);
775 td_verror(td, -ret, "blktrace lseek");
779 ret = write_trace(merge_fp, &bc->t);
780 ret = read_trace(td, bc);
784 merge_finish_file(bcs, i, &nr_logs);
787 /* set iolog file to read from the newly merged file */
788 td->o.read_iolog_file = td->o.merge_blktrace_file;
793 for (i = 0; i < nr_logs; i++) {