2 * blktrace support code for fio
8 #include <sys/sysmacros.h>
14 #include "blktrace_api.h"
15 #include "oslib/linux-dev-lookup.h"
24 * Just discard the pdu by seeking past it.
26 static int discard_pdu(FILE* f, struct blk_io_trace *t)
31 dprint(FD_BLKTRACE, "discard pdu len %u\n", t->pdu_len);
32 if (fseek(f, t->pdu_len, SEEK_CUR) < 0)
39 * Check if this is a blktrace binary data file. We read a single trace
40 * into memory and check for the magic signature.
42 bool is_blktrace(const char *filename, int *need_swap)
44 struct blk_io_trace t;
47 fd = open(filename, O_RDONLY);
51 ret = read(fd, &t, sizeof(t));
55 perror("read blktrace");
57 } else if (ret != sizeof(t)) {
58 log_err("fio: short read on blktrace file\n");
62 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) {
68 * Maybe it needs to be endian swapped...
70 t.magic = fio_swap32(t.magic);
71 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) {
80 #define FMINORMASK ((1U << FMINORBITS) - 1)
81 #define FMAJOR(dev) ((unsigned int) ((dev) >> FMINORBITS))
82 #define FMINOR(dev) ((unsigned int) ((dev) & FMINORMASK))
84 static void trace_add_open_close_event(struct thread_data *td, int fileno, enum file_log_act action)
88 ipo = calloc(1, sizeof(*ipo));
91 ipo->ddir = DDIR_INVAL;
93 ipo->file_action = action;
94 flist_add_tail(&ipo->list, &td->io_log_list);
97 static int trace_add_file(struct thread_data *td, __u32 device,
98 struct file_cache *cache)
100 unsigned int maj = FMAJOR(device);
101 unsigned int min = FMINOR(device);
106 if (cache->maj == maj && cache->min == min)
107 return cache->fileno;
113 * check for this file in our list
115 for_each_file(td, f, i)
116 if (f->major == maj && f->minor == min) {
117 cache->fileno = f->fileno;
118 return cache->fileno;
122 if (blktrace_lookup_device(td->o.replay_redirect, dev, maj, min)) {
125 if (td->o.replay_redirect)
126 dprint(FD_BLKTRACE, "device lookup: %d/%d\n overridden"
127 " with: %s\n", maj, min,
128 td->o.replay_redirect);
130 dprint(FD_BLKTRACE, "device lookup: %d/%d\n", maj, min);
132 dprint(FD_BLKTRACE, "add devices %s\n", dev);
133 fileno = add_file_exclusive(td, dev);
135 td->files[fileno]->major = maj;
136 td->files[fileno]->minor = min;
137 trace_add_open_close_event(td, fileno, FIO_LOG_OPEN_FILE);
138 cache->fileno = fileno;
141 return cache->fileno;
144 static void t_bytes_align(struct thread_options *o, struct blk_io_trace *t)
146 if (!o->replay_align)
149 t->bytes = (t->bytes + o->replay_align - 1) & ~(o->replay_align - 1);
153 * Store blk_io_trace data in an ipo for later retrieval.
155 static void store_ipo(struct thread_data *td, unsigned long long offset,
156 unsigned int bytes, int rw, unsigned long long ttime,
159 struct io_piece *ipo;
161 ipo = calloc(1, sizeof(*ipo));
164 ipo->offset = offset * 512;
165 if (td->o.replay_scale)
166 ipo->offset = ipo->offset / td->o.replay_scale;
167 ipo_bytes_align(td->o.replay_align, ipo);
169 ipo->delay = ttime / 1000;
171 ipo->ddir = DDIR_WRITE;
173 ipo->ddir = DDIR_READ;
174 ipo->fileno = fileno;
176 dprint(FD_BLKTRACE, "store ddir=%d, off=%llu, len=%lu, delay=%lu\n",
177 ipo->ddir, ipo->offset,
178 ipo->len, ipo->delay);
179 queue_io_piece(td, ipo);
182 static bool handle_trace_notify(struct blk_io_trace *t)
186 dprint(FD_BLKTRACE, "got process notify: %x, %d\n",
189 case BLK_TN_TIMESTAMP:
190 dprint(FD_BLKTRACE, "got timestamp notify: %x, %d\n",
196 dprint(FD_BLKTRACE, "unknown trace act %x\n", t->action);
202 static bool handle_trace_discard(struct thread_data *td,
203 struct blk_io_trace *t,
204 unsigned long long ttime,
205 unsigned long *ios, unsigned long long *bs,
206 struct file_cache *cache)
208 struct io_piece *ipo;
211 if (td->o.replay_skip & (1u << DDIR_TRIM))
214 ipo = calloc(1, sizeof(*ipo));
216 fileno = trace_add_file(td, t->device, cache);
219 if (t->bytes > bs[DDIR_TRIM])
220 bs[DDIR_TRIM] = t->bytes;
222 td->o.size += t->bytes;
224 INIT_FLIST_HEAD(&ipo->list);
226 ipo->offset = t->sector * 512;
227 if (td->o.replay_scale)
228 ipo->offset = ipo->offset / td->o.replay_scale;
229 ipo_bytes_align(td->o.replay_align, ipo);
231 ipo->delay = ttime / 1000;
232 ipo->ddir = DDIR_TRIM;
233 ipo->fileno = fileno;
235 dprint(FD_BLKTRACE, "store discard, off=%llu, len=%lu, delay=%lu\n",
236 ipo->offset, ipo->len,
238 queue_io_piece(td, ipo);
242 static void dump_trace(struct blk_io_trace *t)
244 log_err("blktrace: ignoring zero byte trace: action=%x\n", t->action);
247 static bool handle_trace_fs(struct thread_data *td, struct blk_io_trace *t,
248 unsigned long long ttime, unsigned long *ios,
249 unsigned long long *bs, struct file_cache *cache)
254 fileno = trace_add_file(td, t->device, cache);
256 rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
259 if (td->o.replay_skip & (1u << DDIR_WRITE))
262 if (td->o.replay_skip & (1u << DDIR_READ))
267 if (!fio_did_warn(FIO_WARN_BTRACE_ZERO))
272 if (t->bytes > bs[rw])
276 td->o.size += t->bytes;
277 store_ipo(td, t->sector, t->bytes, rw, ttime, fileno);
281 static bool handle_trace_flush(struct thread_data *td, struct blk_io_trace *t,
282 unsigned long long ttime, unsigned long *ios,
283 struct file_cache *cache)
285 struct io_piece *ipo;
288 if (td->o.replay_skip & (1u << DDIR_SYNC))
291 ipo = calloc(1, sizeof(*ipo));
293 fileno = trace_add_file(td, t->device, cache);
295 ipo->delay = ttime / 1000;
296 ipo->ddir = DDIR_SYNC;
297 ipo->fileno = fileno;
300 dprint(FD_BLKTRACE, "store flush delay=%lu\n", ipo->delay);
302 if (!(td->flags & TD_F_SYNCS))
303 td->flags |= TD_F_SYNCS;
305 queue_io_piece(td, ipo);
310 * We only care for queue traces, most of the others are side effects
311 * due to internal workings of the block layer.
313 static bool queue_trace(struct thread_data *td, struct blk_io_trace *t,
314 unsigned long *ios, unsigned long long *bs,
315 struct file_cache *cache)
317 unsigned long long *last_ttime = &td->io_log_last_ttime;
318 unsigned long long delay = 0;
320 if ((t->action & 0xffff) != __BLK_TA_QUEUE)
323 if (!(t->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
324 delay = delay_since_ttime(td, t->time);
325 *last_ttime = t->time;
328 t_bytes_align(&td->o, t);
330 if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY))
331 return handle_trace_notify(t);
332 else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
333 return handle_trace_discard(td, t, delay, ios, bs, cache);
334 else if (t->action & BLK_TC_ACT(BLK_TC_FLUSH))
335 return handle_trace_flush(td, t, delay, ios, cache);
337 return handle_trace_fs(td, t, delay, ios, bs, cache);
340 static void byteswap_trace(struct blk_io_trace *t)
342 t->magic = fio_swap32(t->magic);
343 t->sequence = fio_swap32(t->sequence);
344 t->time = fio_swap64(t->time);
345 t->sector = fio_swap64(t->sector);
346 t->bytes = fio_swap32(t->bytes);
347 t->action = fio_swap32(t->action);
348 t->pid = fio_swap32(t->pid);
349 t->device = fio_swap32(t->device);
350 t->cpu = fio_swap32(t->cpu);
351 t->error = fio_swap16(t->error);
352 t->pdu_len = fio_swap16(t->pdu_len);
355 static bool t_is_write(struct blk_io_trace *t)
357 return (t->action & BLK_TC_ACT(BLK_TC_WRITE | BLK_TC_DISCARD)) != 0;
360 static enum fio_ddir t_get_ddir(struct blk_io_trace *t)
362 if (t->action & BLK_TC_ACT(BLK_TC_READ))
364 else if (t->action & BLK_TC_ACT(BLK_TC_WRITE))
366 else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
372 static void depth_inc(struct blk_io_trace *t, int *depth)
376 ddir = t_get_ddir(t);
377 if (ddir != DDIR_INVAL)
381 static void depth_dec(struct blk_io_trace *t, int *depth)
385 ddir = t_get_ddir(t);
386 if (ddir != DDIR_INVAL)
390 static void depth_end(struct blk_io_trace *t, int *this_depth, int *depth)
392 enum fio_ddir ddir = DDIR_INVAL;
394 ddir = t_get_ddir(t);
395 if (ddir != DDIR_INVAL) {
396 depth[ddir] = max(depth[ddir], this_depth[ddir]);
397 this_depth[ddir] = 0;
402 * Load a blktrace file by reading all the blk_io_trace entries, and storing
403 * them as io_pieces like the fio text version would do.
405 bool init_blktrace_read(struct thread_data *td, const char *filename, int need_swap)
409 td->io_log_rfile = fopen(filename, "rb");
410 if (!td->io_log_rfile) {
411 td_verror(td, errno, "open blktrace file");
414 td->io_log_blktrace_swap = need_swap;
415 td->io_log_last_ttime = 0;
418 free_release_files(td);
420 old_state = td_bump_runstate(td, TD_SETTING_UP);
422 if (!read_blktrace(td)) {
426 td_restore_runstate(td, old_state);
428 if (!td->files_index) {
429 log_err("fio: did not find replay device(s)\n");
436 if (td->io_log_rfile) {
437 fclose(td->io_log_rfile);
438 td->io_log_rfile = NULL;
443 bool read_blktrace(struct thread_data* td)
445 struct blk_io_trace t;
446 struct file_cache cache = {
450 unsigned long ios[DDIR_RWDIR_SYNC_CNT] = { };
451 unsigned long long rw_bs[DDIR_RWDIR_CNT] = { };
452 unsigned long skipped_writes;
453 FILE *f = td->io_log_rfile;
455 struct fio_file *fiof;
456 int this_depth[DDIR_RWDIR_CNT] = { };
457 int depth[DDIR_RWDIR_CNT] = { };
458 int64_t items_to_fetch = 0;
460 if (td->o.read_iolog_chunked) {
461 items_to_fetch = iolog_items_to_fetch(td);
468 int ret = fread(&t, 1, sizeof(t), f);
471 td_verror(td, errno, "read blktrace file");
473 } else if (feof(f)) {
475 } else if (ret < (int) sizeof(t)) {
476 log_err("fio: iolog short read\n");
480 if (td->io_log_blktrace_swap)
483 if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
484 log_err("fio: bad magic in blktrace data: %x\n",
488 if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
489 log_err("fio: bad blktrace version %d\n",
493 ret = discard_pdu(f, &t);
495 td_verror(td, -ret, "blktrace lseek");
498 if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) {
499 if ((t.action & 0xffff) == __BLK_TA_QUEUE)
500 depth_inc(&t, this_depth);
501 else if (((t.action & 0xffff) == __BLK_TA_BACKMERGE) ||
502 ((t.action & 0xffff) == __BLK_TA_FRONTMERGE))
503 depth_dec(&t, this_depth);
504 else if ((t.action & 0xffff) == __BLK_TA_COMPLETE)
505 depth_end(&t, this_depth, depth);
507 if (t_is_write(&t) && read_only) {
513 if (!queue_trace(td, &t, ios, rw_bs, &cache))
516 if (td->o.read_iolog_chunked) {
517 td->io_log_current++;
519 if (items_to_fetch == 0)
524 if (td->o.read_iolog_chunked) {
525 td->io_log_highmark = td->io_log_current;
526 td->io_log_checkmark = (td->io_log_highmark + 1) / 2;
527 fio_gettime(&td->io_log_highmark_time, NULL);
531 log_err("fio: %s skips replay of %lu writes due to read-only\n",
532 td->o.name, skipped_writes);
534 if (td->o.read_iolog_chunked) {
535 if (td->io_log_current == 0) {
538 td->o.td_ddir = TD_DDIR_RW;
539 if ((rw_bs[DDIR_READ] > td->o.max_bs[DDIR_READ] ||
540 rw_bs[DDIR_WRITE] > td->o.max_bs[DDIR_WRITE] ||
541 rw_bs[DDIR_TRIM] > td->o.max_bs[DDIR_TRIM]) &&
544 td->o.max_bs[DDIR_READ] = max(td->o.max_bs[DDIR_READ], rw_bs[DDIR_READ]);
545 td->o.max_bs[DDIR_WRITE] = max(td->o.max_bs[DDIR_WRITE], rw_bs[DDIR_WRITE]);
546 td->o.max_bs[DDIR_TRIM] = max(td->o.max_bs[DDIR_TRIM], rw_bs[DDIR_TRIM]);
549 if (init_io_u_buffers(td))
555 for_each_file(td, fiof, i)
556 trace_add_open_close_event(td, fiof->fileno, FIO_LOG_CLOSE_FILE);
558 fclose(td->io_log_rfile);
559 td->io_log_rfile = NULL;
562 * For stacked devices, we don't always get a COMPLETE event so
563 * the depth grows to insane values. Limit it to something sane(r).
566 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
569 else if (!depth[i] && ios[i])
571 max_depth = max(depth[i], max_depth);
574 if (!ios[DDIR_READ] && !ios[DDIR_WRITE] && !ios[DDIR_TRIM] &&
576 log_err("fio: found no ios in blktrace data\n");
581 if (ios[DDIR_READ]) {
582 td->o.td_ddir |= TD_DDIR_READ;
583 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
585 if (ios[DDIR_WRITE]) {
586 td->o.td_ddir |= TD_DDIR_WRITE;
587 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
589 if (ios[DDIR_TRIM]) {
590 td->o.td_ddir |= TD_DDIR_TRIM;
591 td->o.max_bs[DDIR_TRIM] = rw_bs[DDIR_TRIM];
595 * If depth wasn't manually set, use probed depth
597 if (!fio_option_is_set(&td->o, iodepth))
598 td->o.iodepth = td->o.iodepth_low = max_depth;
606 static int init_merge_param_list(fio_fp64_t *vals, struct blktrace_cursor *bcs,
607 int nr_logs, int def, size_t off)
611 while (len < FIO_IO_U_LIST_MAX_LEN && vals[len].u.f != 0.0)
614 if (len && len != nr_logs)
617 for (i = 0; i < nr_logs; i++) {
618 int *val = (int *)((char *)&bcs[i] + off);
621 *val = (int)vals[i].u.f;
628 static int find_earliest_io(struct blktrace_cursor *bcs, int nr_logs)
630 __u64 time = ~(__u64)0;
633 for (i = 0; i < nr_logs; i++) {
634 if (bcs[i].t.time < time) {
635 time = bcs[i].t.time;
643 static void merge_finish_file(struct blktrace_cursor *bcs, int i, int *nr_logs)
646 if (bcs[i].iter < bcs[i].nr_iter) {
647 fseek(bcs[i].f, 0, SEEK_SET);
656 /* keep active files contiguous */
657 memmove(&bcs[i], &bcs[*nr_logs], sizeof(bcs[i]));
660 static int read_trace(struct thread_data *td, struct blktrace_cursor *bc)
663 struct blk_io_trace *t = &bc->t;
666 /* read an io trace */
667 ret = fread(&t, 1, sizeof(t), bc->f);
669 td_verror(td, errno, "read blktrace file");
671 } else if (feof(bc->f)) {
673 bc->length = bc->t.time;
675 } else if (ret < (int) sizeof(*t)) {
676 log_err("fio: iolog short read\n");
683 /* skip over actions that fio does not care about */
684 if ((t->action & 0xffff) != __BLK_TA_QUEUE ||
685 t_get_ddir(t) == DDIR_INVAL) {
686 ret = discard_pdu(bc->f, t);
688 td_verror(td, -ret, "blktrace lseek");
694 t->time = (t->time + bc->iter * bc->length) * bc->scalar / 100;
699 static int write_trace(FILE *fp, struct blk_io_trace *t)
701 /* pdu is not used so just write out only the io trace */
703 return fwrite((void *)t, sizeof(*t), 1, fp);
706 int merge_blktrace_iologs(struct thread_data *td)
708 int nr_logs = get_max_str_idx(td->o.read_iolog_file);
709 struct blktrace_cursor *bcs = malloc(sizeof(struct blktrace_cursor) *
711 struct blktrace_cursor *bc;
713 char *str, *ptr, *name, *merge_buf;
716 ret = init_merge_param_list(td->o.merge_blktrace_scalars, bcs, nr_logs,
717 100, offsetof(struct blktrace_cursor,
720 log_err("fio: merge_blktrace_scalars(%d) != nr_logs(%d)\n",
725 ret = init_merge_param_list(td->o.merge_blktrace_iters, bcs, nr_logs,
726 1, offsetof(struct blktrace_cursor,
729 log_err("fio: merge_blktrace_iters(%d) != nr_logs(%d)\n",
734 /* setup output file */
735 merge_fp = fopen(td->o.merge_blktrace_file, "w");
736 merge_buf = malloc(128 * 1024);
739 ret = setvbuf(merge_fp, merge_buf, _IOFBF, 128 * 1024);
743 /* setup input files */
744 str = ptr = strdup(td->o.read_iolog_file);
746 for (i = 0; (name = get_next_str(&ptr)) != NULL; i++) {
747 bcs[i].f = fopen(name, "rb");
749 log_err("fio: could not open file: %s\n", name);
756 if (!is_blktrace(name, &bcs[i].swap)) {
757 log_err("fio: file is not a blktrace: %s\n", name);
762 ret = read_trace(td, &bcs[i]);
767 merge_finish_file(bcs, i, &nr_logs);
775 i = find_earliest_io(bcs, nr_logs);
777 /* skip over the pdu */
778 ret = discard_pdu(bc->f, &bc->t);
780 td_verror(td, -ret, "blktrace lseek");
784 ret = write_trace(merge_fp, &bc->t);
785 ret = read_trace(td, bc);
789 merge_finish_file(bcs, i, &nr_logs);
792 /* set iolog file to read from the newly merged file */
793 td->o.read_iolog_file = td->o.merge_blktrace_file;
798 for (i = 0; i < nr_logs; i++) {