t/random_seed: use methods provided in fiotestlib to run tests
[fio.git] / blktrace.c
... / ...
CommitLineData
1/*
2 * blktrace support code for fio
3 */
4#include <stdio.h>
5#include <stdlib.h>
6#include <unistd.h>
7#include <errno.h>
8#include <sys/sysmacros.h>
9
10#include "flist.h"
11#include "fio.h"
12#include "iolog.h"
13#include "blktrace.h"
14#include "blktrace_api.h"
15#include "oslib/linux-dev-lookup.h"
16
17struct file_cache {
18 unsigned int maj;
19 unsigned int min;
20 unsigned int fileno;
21};
22
23/*
24 * Just discard the pdu by seeking past it.
25 */
26static int discard_pdu(FILE* f, struct blk_io_trace *t)
27{
28 if (t->pdu_len == 0)
29 return 0;
30
31 dprint(FD_BLKTRACE, "discard pdu len %u\n", t->pdu_len);
32 if (fseek(f, t->pdu_len, SEEK_CUR) < 0)
33 return -errno;
34
35 return t->pdu_len;
36}
37
38/*
39 * Check if this is a blktrace binary data file. We read a single trace
40 * into memory and check for the magic signature.
41 */
42bool is_blktrace(const char *filename, int *need_swap)
43{
44 struct blk_io_trace t;
45 int fd, ret;
46
47 fd = open(filename, O_RDONLY);
48 if (fd < 0)
49 return false;
50
51 ret = read(fd, &t, sizeof(t));
52 close(fd);
53
54 if (ret < 0) {
55 perror("read blktrace");
56 return false;
57 } else if (ret != sizeof(t)) {
58 log_err("fio: short read on blktrace file\n");
59 return false;
60 }
61
62 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) {
63 *need_swap = 0;
64 return true;
65 }
66
67 /*
68 * Maybe it needs to be endian swapped...
69 */
70 t.magic = fio_swap32(t.magic);
71 if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) {
72 *need_swap = 1;
73 return true;
74 }
75
76 return false;
77}
78
79#define FMINORBITS 20
80#define FMINORMASK ((1U << FMINORBITS) - 1)
81#define FMAJOR(dev) ((unsigned int) ((dev) >> FMINORBITS))
82#define FMINOR(dev) ((unsigned int) ((dev) & FMINORMASK))
83
84static void trace_add_open_close_event(struct thread_data *td, int fileno, enum file_log_act action)
85{
86 struct io_piece *ipo;
87
88 ipo = calloc(1, sizeof(*ipo));
89 init_ipo(ipo);
90
91 ipo->ddir = DDIR_INVAL;
92 ipo->fileno = fileno;
93 ipo->file_action = action;
94 flist_add_tail(&ipo->list, &td->io_log_list);
95}
96
97static int trace_add_file(struct thread_data *td, __u32 device,
98 struct file_cache *cache)
99{
100 unsigned int maj = FMAJOR(device);
101 unsigned int min = FMINOR(device);
102 struct fio_file *f;
103 char dev[256];
104 unsigned int i;
105
106 if (cache->maj == maj && cache->min == min)
107 return cache->fileno;
108
109 cache->maj = maj;
110 cache->min = min;
111
112 /*
113 * check for this file in our list
114 */
115 for_each_file(td, f, i)
116 if (f->major == maj && f->minor == min) {
117 cache->fileno = f->fileno;
118 return cache->fileno;
119 }
120
121 strcpy(dev, "/dev");
122 if (blktrace_lookup_device(td->o.replay_redirect, dev, maj, min)) {
123 int fileno;
124
125 if (td->o.replay_redirect)
126 dprint(FD_BLKTRACE, "device lookup: %d/%d\n overridden"
127 " with: %s\n", maj, min,
128 td->o.replay_redirect);
129 else
130 dprint(FD_BLKTRACE, "device lookup: %d/%d\n", maj, min);
131
132 dprint(FD_BLKTRACE, "add devices %s\n", dev);
133 fileno = add_file_exclusive(td, dev);
134 td->o.open_files++;
135 td->files[fileno]->major = maj;
136 td->files[fileno]->minor = min;
137 trace_add_open_close_event(td, fileno, FIO_LOG_OPEN_FILE);
138 cache->fileno = fileno;
139 }
140
141 return cache->fileno;
142}
143
144static void t_bytes_align(struct thread_options *o, struct blk_io_trace *t)
145{
146 if (!o->replay_align)
147 return;
148
149 t->bytes = (t->bytes + o->replay_align - 1) & ~(o->replay_align - 1);
150}
151
152/*
153 * Store blk_io_trace data in an ipo for later retrieval.
154 */
155static void store_ipo(struct thread_data *td, unsigned long long offset,
156 unsigned int bytes, int rw, unsigned long long ttime,
157 int fileno)
158{
159 struct io_piece *ipo;
160
161 ipo = calloc(1, sizeof(*ipo));
162 init_ipo(ipo);
163
164 ipo->offset = offset * 512;
165 if (td->o.replay_scale)
166 ipo->offset = ipo->offset / td->o.replay_scale;
167 ipo_bytes_align(td->o.replay_align, ipo);
168 ipo->len = bytes;
169 ipo->delay = ttime / 1000;
170 if (rw)
171 ipo->ddir = DDIR_WRITE;
172 else
173 ipo->ddir = DDIR_READ;
174 ipo->fileno = fileno;
175
176 dprint(FD_BLKTRACE, "store ddir=%d, off=%llu, len=%lu, delay=%lu\n",
177 ipo->ddir, ipo->offset,
178 ipo->len, ipo->delay);
179 queue_io_piece(td, ipo);
180}
181
182static bool handle_trace_notify(struct blk_io_trace *t)
183{
184 switch (t->action) {
185 case BLK_TN_PROCESS:
186 dprint(FD_BLKTRACE, "got process notify: %x, %d\n",
187 t->action, t->pid);
188 break;
189 case BLK_TN_TIMESTAMP:
190 dprint(FD_BLKTRACE, "got timestamp notify: %x, %d\n",
191 t->action, t->pid);
192 break;
193 case BLK_TN_MESSAGE:
194 break;
195 default:
196 dprint(FD_BLKTRACE, "unknown trace act %x\n", t->action);
197 break;
198 }
199 return false;
200}
201
202static bool handle_trace_discard(struct thread_data *td,
203 struct blk_io_trace *t,
204 unsigned long long ttime,
205 unsigned long *ios, unsigned long long *bs,
206 struct file_cache *cache)
207{
208 struct io_piece *ipo;
209 int fileno;
210
211 if (td->o.replay_skip & (1u << DDIR_TRIM))
212 return false;
213
214 ipo = calloc(1, sizeof(*ipo));
215 init_ipo(ipo);
216 fileno = trace_add_file(td, t->device, cache);
217
218 ios[DDIR_TRIM]++;
219 if (t->bytes > bs[DDIR_TRIM])
220 bs[DDIR_TRIM] = t->bytes;
221
222 td->o.size += t->bytes;
223
224 INIT_FLIST_HEAD(&ipo->list);
225
226 ipo->offset = t->sector * 512;
227 if (td->o.replay_scale)
228 ipo->offset = ipo->offset / td->o.replay_scale;
229 ipo_bytes_align(td->o.replay_align, ipo);
230 ipo->len = t->bytes;
231 ipo->delay = ttime / 1000;
232 ipo->ddir = DDIR_TRIM;
233 ipo->fileno = fileno;
234
235 dprint(FD_BLKTRACE, "store discard, off=%llu, len=%lu, delay=%lu\n",
236 ipo->offset, ipo->len,
237 ipo->delay);
238 queue_io_piece(td, ipo);
239 return true;
240}
241
242static void dump_trace(struct blk_io_trace *t)
243{
244 log_err("blktrace: ignoring zero byte trace: action=%x\n", t->action);
245}
246
247static bool handle_trace_fs(struct thread_data *td, struct blk_io_trace *t,
248 unsigned long long ttime, unsigned long *ios,
249 unsigned long long *bs, struct file_cache *cache)
250{
251 int rw;
252 int fileno;
253
254 fileno = trace_add_file(td, t->device, cache);
255
256 rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
257
258 if (rw) {
259 if (td->o.replay_skip & (1u << DDIR_WRITE))
260 return false;
261 } else {
262 if (td->o.replay_skip & (1u << DDIR_READ))
263 return false;
264 }
265
266 if (!t->bytes) {
267 if (!fio_did_warn(FIO_WARN_BTRACE_ZERO))
268 dump_trace(t);
269 return false;
270 }
271
272 if (t->bytes > bs[rw])
273 bs[rw] = t->bytes;
274
275 ios[rw]++;
276 td->o.size += t->bytes;
277 store_ipo(td, t->sector, t->bytes, rw, ttime, fileno);
278 return true;
279}
280
281static bool handle_trace_flush(struct thread_data *td, struct blk_io_trace *t,
282 unsigned long long ttime, unsigned long *ios,
283 struct file_cache *cache)
284{
285 struct io_piece *ipo;
286 int fileno;
287
288 if (td->o.replay_skip & (1u << DDIR_SYNC))
289 return false;
290
291 ipo = calloc(1, sizeof(*ipo));
292 init_ipo(ipo);
293 fileno = trace_add_file(td, t->device, cache);
294
295 ipo->delay = ttime / 1000;
296 ipo->ddir = DDIR_SYNC;
297 ipo->fileno = fileno;
298
299 ios[DDIR_SYNC]++;
300 dprint(FD_BLKTRACE, "store flush delay=%lu\n", ipo->delay);
301
302 if (!(td->flags & TD_F_SYNCS))
303 td->flags |= TD_F_SYNCS;
304
305 queue_io_piece(td, ipo);
306 return true;
307}
308
309/*
310 * We only care for queue traces, most of the others are side effects
311 * due to internal workings of the block layer.
312 */
313static bool queue_trace(struct thread_data *td, struct blk_io_trace *t,
314 unsigned long *ios, unsigned long long *bs,
315 struct file_cache *cache)
316{
317 unsigned long long *last_ttime = &td->io_log_last_ttime;
318 unsigned long long delay = 0;
319
320 if ((t->action & 0xffff) != __BLK_TA_QUEUE)
321 return false;
322
323 if (!(t->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
324 delay = delay_since_ttime(td, t->time);
325 *last_ttime = t->time;
326 }
327
328 t_bytes_align(&td->o, t);
329
330 if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY))
331 return handle_trace_notify(t);
332 else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
333 return handle_trace_discard(td, t, delay, ios, bs, cache);
334 else if (t->action & BLK_TC_ACT(BLK_TC_FLUSH))
335 return handle_trace_flush(td, t, delay, ios, cache);
336 else
337 return handle_trace_fs(td, t, delay, ios, bs, cache);
338}
339
340static void byteswap_trace(struct blk_io_trace *t)
341{
342 t->magic = fio_swap32(t->magic);
343 t->sequence = fio_swap32(t->sequence);
344 t->time = fio_swap64(t->time);
345 t->sector = fio_swap64(t->sector);
346 t->bytes = fio_swap32(t->bytes);
347 t->action = fio_swap32(t->action);
348 t->pid = fio_swap32(t->pid);
349 t->device = fio_swap32(t->device);
350 t->cpu = fio_swap32(t->cpu);
351 t->error = fio_swap16(t->error);
352 t->pdu_len = fio_swap16(t->pdu_len);
353}
354
355static bool t_is_write(struct blk_io_trace *t)
356{
357 return (t->action & BLK_TC_ACT(BLK_TC_WRITE | BLK_TC_DISCARD)) != 0;
358}
359
360static enum fio_ddir t_get_ddir(struct blk_io_trace *t)
361{
362 if (t->action & BLK_TC_ACT(BLK_TC_READ))
363 return DDIR_READ;
364 else if (t->action & BLK_TC_ACT(BLK_TC_WRITE))
365 return DDIR_WRITE;
366 else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
367 return DDIR_TRIM;
368
369 return DDIR_INVAL;
370}
371
372static void depth_inc(struct blk_io_trace *t, int *depth)
373{
374 enum fio_ddir ddir;
375
376 ddir = t_get_ddir(t);
377 if (ddir != DDIR_INVAL)
378 depth[ddir]++;
379}
380
381static void depth_dec(struct blk_io_trace *t, int *depth)
382{
383 enum fio_ddir ddir;
384
385 ddir = t_get_ddir(t);
386 if (ddir != DDIR_INVAL)
387 depth[ddir]--;
388}
389
390static void depth_end(struct blk_io_trace *t, int *this_depth, int *depth)
391{
392 enum fio_ddir ddir = DDIR_INVAL;
393
394 ddir = t_get_ddir(t);
395 if (ddir != DDIR_INVAL) {
396 depth[ddir] = max(depth[ddir], this_depth[ddir]);
397 this_depth[ddir] = 0;
398 }
399}
400
401/*
402 * Load a blktrace file by reading all the blk_io_trace entries, and storing
403 * them as io_pieces like the fio text version would do.
404 */
405bool init_blktrace_read(struct thread_data *td, const char *filename, int need_swap)
406{
407 int old_state;
408
409 td->io_log_rfile = fopen(filename, "rb");
410 if (!td->io_log_rfile) {
411 td_verror(td, errno, "open blktrace file");
412 goto err;
413 }
414 td->io_log_blktrace_swap = need_swap;
415 td->io_log_last_ttime = 0;
416 td->o.size = 0;
417
418 free_release_files(td);
419
420 old_state = td_bump_runstate(td, TD_SETTING_UP);
421
422 if (!read_blktrace(td)) {
423 goto err;
424 }
425
426 td_restore_runstate(td, old_state);
427
428 if (!td->files_index) {
429 log_err("fio: did not find replay device(s)\n");
430 return false;
431 }
432
433 return true;
434
435err:
436 if (td->io_log_rfile) {
437 fclose(td->io_log_rfile);
438 td->io_log_rfile = NULL;
439 }
440 return false;
441}
442
443bool read_blktrace(struct thread_data* td)
444{
445 struct blk_io_trace t;
446 struct file_cache cache = {
447 .maj = ~0U,
448 .min = ~0U,
449 };
450 unsigned long ios[DDIR_RWDIR_SYNC_CNT] = { };
451 unsigned long long rw_bs[DDIR_RWDIR_CNT] = { };
452 unsigned long skipped_writes;
453 FILE *f = td->io_log_rfile;
454 int i, max_depth;
455 struct fio_file *fiof;
456 int this_depth[DDIR_RWDIR_CNT] = { };
457 int depth[DDIR_RWDIR_CNT] = { };
458 int64_t items_to_fetch = 0;
459
460 if (td->o.read_iolog_chunked) {
461 items_to_fetch = iolog_items_to_fetch(td);
462 if (!items_to_fetch)
463 return true;
464 }
465
466 skipped_writes = 0;
467 do {
468 int ret = fread(&t, 1, sizeof(t), f);
469
470 if (ferror(f)) {
471 td_verror(td, errno, "read blktrace file");
472 goto err;
473 } else if (feof(f)) {
474 break;
475 } else if (ret < (int) sizeof(t)) {
476 log_err("fio: iolog short read\n");
477 break;
478 }
479
480 if (td->io_log_blktrace_swap)
481 byteswap_trace(&t);
482
483 if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
484 log_err("fio: bad magic in blktrace data: %x\n",
485 t.magic);
486 goto err;
487 }
488 if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
489 log_err("fio: bad blktrace version %d\n",
490 t.magic & 0xff);
491 goto err;
492 }
493 ret = discard_pdu(f, &t);
494 if (ret < 0) {
495 td_verror(td, -ret, "blktrace lseek");
496 goto err;
497 }
498 if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) {
499 if ((t.action & 0xffff) == __BLK_TA_QUEUE)
500 depth_inc(&t, this_depth);
501 else if (((t.action & 0xffff) == __BLK_TA_BACKMERGE) ||
502 ((t.action & 0xffff) == __BLK_TA_FRONTMERGE))
503 depth_dec(&t, this_depth);
504 else if ((t.action & 0xffff) == __BLK_TA_COMPLETE)
505 depth_end(&t, this_depth, depth);
506
507 if (t_is_write(&t) && read_only) {
508 skipped_writes++;
509 continue;
510 }
511 }
512
513 if (!queue_trace(td, &t, ios, rw_bs, &cache))
514 continue;
515
516 if (td->o.read_iolog_chunked) {
517 td->io_log_current++;
518 items_to_fetch--;
519 if (items_to_fetch == 0)
520 break;
521 }
522 } while (1);
523
524 if (td->o.read_iolog_chunked) {
525 td->io_log_highmark = td->io_log_current;
526 td->io_log_checkmark = (td->io_log_highmark + 1) / 2;
527 fio_gettime(&td->io_log_highmark_time, NULL);
528 }
529
530 if (skipped_writes)
531 log_err("fio: %s skips replay of %lu writes due to read-only\n",
532 td->o.name, skipped_writes);
533
534 if (td->o.read_iolog_chunked) {
535 if (td->io_log_current == 0) {
536 return false;
537 }
538 td->o.td_ddir = TD_DDIR_RW;
539 if ((rw_bs[DDIR_READ] > td->o.max_bs[DDIR_READ] ||
540 rw_bs[DDIR_WRITE] > td->o.max_bs[DDIR_WRITE] ||
541 rw_bs[DDIR_TRIM] > td->o.max_bs[DDIR_TRIM]) &&
542 td->orig_buffer)
543 {
544 td->o.max_bs[DDIR_READ] = max(td->o.max_bs[DDIR_READ], rw_bs[DDIR_READ]);
545 td->o.max_bs[DDIR_WRITE] = max(td->o.max_bs[DDIR_WRITE], rw_bs[DDIR_WRITE]);
546 td->o.max_bs[DDIR_TRIM] = max(td->o.max_bs[DDIR_TRIM], rw_bs[DDIR_TRIM]);
547 io_u_quiesce(td);
548 free_io_mem(td);
549 if (init_io_u_buffers(td))
550 return false;
551 }
552 return true;
553 }
554
555 for_each_file(td, fiof, i)
556 trace_add_open_close_event(td, fiof->fileno, FIO_LOG_CLOSE_FILE);
557
558 fclose(td->io_log_rfile);
559 td->io_log_rfile = NULL;
560
561 /*
562 * For stacked devices, we don't always get a COMPLETE event so
563 * the depth grows to insane values. Limit it to something sane(r).
564 */
565 max_depth = 0;
566 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
567 if (depth[i] > 1024)
568 depth[i] = 1024;
569 else if (!depth[i] && ios[i])
570 depth[i] = 1;
571 max_depth = max(depth[i], max_depth);
572 }
573
574 if (!ios[DDIR_READ] && !ios[DDIR_WRITE] && !ios[DDIR_TRIM] &&
575 !ios[DDIR_SYNC]) {
576 log_err("fio: found no ios in blktrace data\n");
577 return false;
578 }
579
580 td->o.td_ddir = 0;
581 if (ios[DDIR_READ]) {
582 td->o.td_ddir |= TD_DDIR_READ;
583 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
584 }
585 if (ios[DDIR_WRITE]) {
586 td->o.td_ddir |= TD_DDIR_WRITE;
587 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
588 }
589 if (ios[DDIR_TRIM]) {
590 td->o.td_ddir |= TD_DDIR_TRIM;
591 td->o.max_bs[DDIR_TRIM] = rw_bs[DDIR_TRIM];
592 }
593
594 /*
595 * If depth wasn't manually set, use probed depth
596 */
597 if (!fio_option_is_set(&td->o, iodepth))
598 td->o.iodepth = td->o.iodepth_low = max_depth;
599
600 return true;
601err:
602 fclose(f);
603 return false;
604}
605
606static int init_merge_param_list(fio_fp64_t *vals, struct blktrace_cursor *bcs,
607 int nr_logs, int def, size_t off)
608{
609 int i = 0, len = 0;
610
611 while (len < FIO_IO_U_LIST_MAX_LEN && vals[len].u.f != 0.0)
612 len++;
613
614 if (len && len != nr_logs)
615 return len;
616
617 for (i = 0; i < nr_logs; i++) {
618 int *val = (int *)((char *)&bcs[i] + off);
619 *val = def;
620 if (len)
621 *val = (int)vals[i].u.f;
622 }
623
624 return 0;
625
626}
627
628static int find_earliest_io(struct blktrace_cursor *bcs, int nr_logs)
629{
630 __u64 time = ~(__u64)0;
631 int idx = 0, i;
632
633 for (i = 0; i < nr_logs; i++) {
634 if (bcs[i].t.time < time) {
635 time = bcs[i].t.time;
636 idx = i;
637 }
638 }
639
640 return idx;
641}
642
643static void merge_finish_file(struct blktrace_cursor *bcs, int i, int *nr_logs)
644{
645 bcs[i].iter++;
646 if (bcs[i].iter < bcs[i].nr_iter) {
647 fseek(bcs[i].f, 0, SEEK_SET);
648 return;
649 }
650
651 *nr_logs -= 1;
652
653 /* close file */
654 fclose(bcs[i].f);
655
656 /* keep active files contiguous */
657 memmove(&bcs[i], &bcs[*nr_logs], sizeof(bcs[i]));
658}
659
660static int read_trace(struct thread_data *td, struct blktrace_cursor *bc)
661{
662 int ret = 0;
663 struct blk_io_trace *t = &bc->t;
664
665read_skip:
666 /* read an io trace */
667 ret = fread(&t, 1, sizeof(t), bc->f);
668 if (ferror(bc->f)) {
669 td_verror(td, errno, "read blktrace file");
670 return ret;
671 } else if (feof(bc->f)) {
672 if (!bc->length)
673 bc->length = bc->t.time;
674 return ret;
675 } else if (ret < (int) sizeof(*t)) {
676 log_err("fio: iolog short read\n");
677 return -1;
678 }
679
680 if (bc->swap)
681 byteswap_trace(t);
682
683 /* skip over actions that fio does not care about */
684 if ((t->action & 0xffff) != __BLK_TA_QUEUE ||
685 t_get_ddir(t) == DDIR_INVAL) {
686 ret = discard_pdu(bc->f, t);
687 if (ret < 0) {
688 td_verror(td, -ret, "blktrace lseek");
689 return ret;
690 }
691 goto read_skip;
692 }
693
694 t->time = (t->time + bc->iter * bc->length) * bc->scalar / 100;
695
696 return ret;
697}
698
699static int write_trace(FILE *fp, struct blk_io_trace *t)
700{
701 /* pdu is not used so just write out only the io trace */
702 t->pdu_len = 0;
703 return fwrite((void *)t, sizeof(*t), 1, fp);
704}
705
706int merge_blktrace_iologs(struct thread_data *td)
707{
708 int nr_logs = get_max_str_idx(td->o.read_iolog_file);
709 struct blktrace_cursor *bcs = malloc(sizeof(struct blktrace_cursor) *
710 nr_logs);
711 struct blktrace_cursor *bc;
712 FILE *merge_fp;
713 char *str, *ptr, *name, *merge_buf;
714 int i, ret;
715
716 ret = init_merge_param_list(td->o.merge_blktrace_scalars, bcs, nr_logs,
717 100, offsetof(struct blktrace_cursor,
718 scalar));
719 if (ret) {
720 log_err("fio: merge_blktrace_scalars(%d) != nr_logs(%d)\n",
721 ret, nr_logs);
722 goto err_param;
723 }
724
725 ret = init_merge_param_list(td->o.merge_blktrace_iters, bcs, nr_logs,
726 1, offsetof(struct blktrace_cursor,
727 nr_iter));
728 if (ret) {
729 log_err("fio: merge_blktrace_iters(%d) != nr_logs(%d)\n",
730 ret, nr_logs);
731 goto err_param;
732 }
733
734 /* setup output file */
735 merge_fp = fopen(td->o.merge_blktrace_file, "w");
736 merge_buf = malloc(128 * 1024);
737 if (!merge_buf)
738 goto err_out_file;
739 ret = setvbuf(merge_fp, merge_buf, _IOFBF, 128 * 1024);
740 if (ret)
741 goto err_merge_buf;
742
743 /* setup input files */
744 str = ptr = strdup(td->o.read_iolog_file);
745 nr_logs = 0;
746 for (i = 0; (name = get_next_str(&ptr)) != NULL; i++) {
747 bcs[i].f = fopen(name, "rb");
748 if (!bcs[i].f) {
749 log_err("fio: could not open file: %s\n", name);
750 ret = -errno;
751 free(str);
752 goto err_file;
753 }
754 nr_logs++;
755
756 if (!is_blktrace(name, &bcs[i].swap)) {
757 log_err("fio: file is not a blktrace: %s\n", name);
758 free(str);
759 goto err_file;
760 }
761
762 ret = read_trace(td, &bcs[i]);
763 if (ret < 0) {
764 free(str);
765 goto err_file;
766 } else if (!ret) {
767 merge_finish_file(bcs, i, &nr_logs);
768 i--;
769 }
770 }
771 free(str);
772
773 /* merge files */
774 while (nr_logs) {
775 i = find_earliest_io(bcs, nr_logs);
776 bc = &bcs[i];
777 /* skip over the pdu */
778 ret = discard_pdu(bc->f, &bc->t);
779 if (ret < 0) {
780 td_verror(td, -ret, "blktrace lseek");
781 goto err_file;
782 }
783
784 ret = write_trace(merge_fp, &bc->t);
785 ret = read_trace(td, bc);
786 if (ret < 0)
787 goto err_file;
788 else if (!ret)
789 merge_finish_file(bcs, i, &nr_logs);
790 }
791
792 /* set iolog file to read from the newly merged file */
793 td->o.read_iolog_file = td->o.merge_blktrace_file;
794 ret = 0;
795
796err_file:
797 /* cleanup */
798 for (i = 0; i < nr_logs; i++) {
799 fclose(bcs[i].f);
800 }
801err_merge_buf:
802 free(merge_buf);
803err_out_file:
804 fflush(merge_fp);
805 fclose(merge_fp);
806err_param:
807 free(bcs);
808
809 return ret;
810}