blktrace.c: Use file stream interface instead of fifo
[fio.git] / blktrace.c
1 /*
2  * blktrace support code for fio
3  */
4 #include <stdio.h>
5 #include <stdlib.h>
6 #include <unistd.h>
7 #include <errno.h>
8
9 #include "flist.h"
10 #include "fio.h"
11 #include "blktrace.h"
12 #include "blktrace_api.h"
13 #include "oslib/linux-dev-lookup.h"
14
15 /*
16  * Just discard the pdu by seeking past it.
17  */
18 static int discard_pdu(FILE* f, struct blk_io_trace *t)
19 {
20         if (t->pdu_len == 0)
21                 return 0;
22
23         dprint(FD_BLKTRACE, "discard pdu len %u\n", t->pdu_len);
24         if (fseek(f, t->pdu_len, SEEK_CUR) < 0)
25                 return -errno;
26
27         return t->pdu_len;
28 }
29
30 /*
31  * Check if this is a blktrace binary data file. We read a single trace
32  * into memory and check for the magic signature.
33  */
34 bool is_blktrace(const char *filename, int *need_swap)
35 {
36         struct blk_io_trace t;
37         int fd, ret;
38
39         fd = open(filename, O_RDONLY);
40         if (fd < 0)
41                 return false;
42
43         ret = read(fd, &t, sizeof(t));
44         close(fd);
45
46         if (ret < 0) {
47                 perror("read blktrace");
48                 return false;
49         } else if (ret != sizeof(t)) {
50                 log_err("fio: short read on blktrace file\n");
51                 return false;
52         }
53
54         if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) {
55                 *need_swap = 0;
56                 return true;
57         }
58
59         /*
60          * Maybe it needs to be endian swapped...
61          */
62         t.magic = fio_swap32(t.magic);
63         if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) {
64                 *need_swap = 1;
65                 return true;
66         }
67
68         return false;
69 }
70
71 #define FMINORBITS      20
72 #define FMINORMASK      ((1U << FMINORBITS) - 1)
73 #define FMAJOR(dev)     ((unsigned int) ((dev) >> FMINORBITS))
74 #define FMINOR(dev)     ((unsigned int) ((dev) & FMINORMASK))
75
76 static void trace_add_open_close_event(struct thread_data *td, int fileno, enum file_log_act action)
77 {
78         struct io_piece *ipo;
79
80         ipo = calloc(1, sizeof(*ipo));
81         init_ipo(ipo);
82
83         ipo->ddir = DDIR_INVAL;
84         ipo->fileno = fileno;
85         ipo->file_action = action;
86         flist_add_tail(&ipo->list, &td->io_log_list);
87 }
88
89 static int trace_add_file(struct thread_data *td, __u32 device)
90 {
91         static unsigned int last_maj, last_min, last_fileno;
92         unsigned int maj = FMAJOR(device);
93         unsigned int min = FMINOR(device);
94         struct fio_file *f;
95         char dev[256];
96         unsigned int i;
97
98         if (last_maj == maj && last_min == min)
99                 return last_fileno;
100
101         last_maj = maj;
102         last_min = min;
103
104         /*
105          * check for this file in our list
106          */
107         for_each_file(td, f, i)
108                 if (f->major == maj && f->minor == min) {
109                         last_fileno = f->fileno;
110                         return last_fileno;
111                 }
112
113         strcpy(dev, "/dev");
114         if (blktrace_lookup_device(td->o.replay_redirect, dev, maj, min)) {
115                 int fileno;
116
117                 if (td->o.replay_redirect)
118                         dprint(FD_BLKTRACE, "device lookup: %d/%d\n overridden"
119                                         " with: %s\n", maj, min,
120                                         td->o.replay_redirect);
121                 else
122                         dprint(FD_BLKTRACE, "device lookup: %d/%d\n", maj, min);
123
124                 dprint(FD_BLKTRACE, "add devices %s\n", dev);
125                 fileno = add_file_exclusive(td, dev);
126                 td->o.open_files++;
127                 td->files[fileno]->major = maj;
128                 td->files[fileno]->minor = min;
129                 trace_add_open_close_event(td, fileno, FIO_LOG_OPEN_FILE);
130                 last_fileno = fileno;
131         }
132
133         return last_fileno;
134 }
135
136 static void t_bytes_align(struct thread_options *o, struct blk_io_trace *t)
137 {
138         if (!o->replay_align)
139                 return;
140
141         t->bytes = (t->bytes + o->replay_align - 1) & ~(o->replay_align - 1);
142 }
143
144 /*
145  * Store blk_io_trace data in an ipo for later retrieval.
146  */
147 static void store_ipo(struct thread_data *td, unsigned long long offset,
148                       unsigned int bytes, int rw, unsigned long long ttime,
149                       int fileno)
150 {
151         struct io_piece *ipo;
152
153         ipo = calloc(1, sizeof(*ipo));
154         init_ipo(ipo);
155
156         ipo->offset = offset * 512;
157         if (td->o.replay_scale)
158                 ipo->offset = ipo->offset / td->o.replay_scale;
159         ipo_bytes_align(td->o.replay_align, ipo);
160         ipo->len = bytes;
161         ipo->delay = ttime / 1000;
162         if (rw)
163                 ipo->ddir = DDIR_WRITE;
164         else
165                 ipo->ddir = DDIR_READ;
166         ipo->fileno = fileno;
167
168         dprint(FD_BLKTRACE, "store ddir=%d, off=%llu, len=%lu, delay=%lu\n",
169                                                         ipo->ddir, ipo->offset,
170                                                         ipo->len, ipo->delay);
171         queue_io_piece(td, ipo);
172 }
173
174 static void handle_trace_notify(struct blk_io_trace *t)
175 {
176         switch (t->action) {
177         case BLK_TN_PROCESS:
178                 dprint(FD_BLKTRACE, "got process notify: %x, %d\n",
179                                 t->action, t->pid);
180                 break;
181         case BLK_TN_TIMESTAMP:
182                 dprint(FD_BLKTRACE, "got timestamp notify: %x, %d\n",
183                                 t->action, t->pid);
184                 break;
185         case BLK_TN_MESSAGE:
186                 break;
187         default:
188                 dprint(FD_BLKTRACE, "unknown trace act %x\n", t->action);
189                 break;
190         }
191 }
192
193 static void handle_trace_discard(struct thread_data *td,
194                                  struct blk_io_trace *t,
195                                  unsigned long long ttime,
196                                  unsigned long *ios, unsigned int *bs)
197 {
198         struct io_piece *ipo;
199         int fileno;
200
201         if (td->o.replay_skip & (1u << DDIR_TRIM))
202                 return;
203
204         ipo = calloc(1, sizeof(*ipo));
205         init_ipo(ipo);
206         fileno = trace_add_file(td, t->device);
207
208         ios[DDIR_TRIM]++;
209         if (t->bytes > bs[DDIR_TRIM])
210                 bs[DDIR_TRIM] = t->bytes;
211
212         td->o.size += t->bytes;
213
214         INIT_FLIST_HEAD(&ipo->list);
215
216         ipo->offset = t->sector * 512;
217         if (td->o.replay_scale)
218                 ipo->offset = ipo->offset / td->o.replay_scale;
219         ipo_bytes_align(td->o.replay_align, ipo);
220         ipo->len = t->bytes;
221         ipo->delay = ttime / 1000;
222         ipo->ddir = DDIR_TRIM;
223         ipo->fileno = fileno;
224
225         dprint(FD_BLKTRACE, "store discard, off=%llu, len=%lu, delay=%lu\n",
226                                                         ipo->offset, ipo->len,
227                                                         ipo->delay);
228         queue_io_piece(td, ipo);
229 }
230
231 static void dump_trace(struct blk_io_trace *t)
232 {
233         log_err("blktrace: ignoring zero byte trace: action=%x\n", t->action);
234 }
235
236 static void handle_trace_fs(struct thread_data *td, struct blk_io_trace *t,
237                             unsigned long long ttime, unsigned long *ios,
238                             unsigned int *bs)
239 {
240         int rw;
241         int fileno;
242
243         fileno = trace_add_file(td, t->device);
244
245         rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
246
247         if (rw) {
248                 if (td->o.replay_skip & (1u << DDIR_WRITE))
249                         return;
250         } else {
251                 if (td->o.replay_skip & (1u << DDIR_READ))
252                         return;
253         }
254
255         if (!t->bytes) {
256                 if (!fio_did_warn(FIO_WARN_BTRACE_ZERO))
257                         dump_trace(t);
258                 return;
259         }
260
261         if (t->bytes > bs[rw])
262                 bs[rw] = t->bytes;
263
264         ios[rw]++;
265         td->o.size += t->bytes;
266         store_ipo(td, t->sector, t->bytes, rw, ttime, fileno);
267 }
268
269 static void handle_trace_flush(struct thread_data *td, struct blk_io_trace *t,
270                                unsigned long long ttime, unsigned long *ios)
271 {
272         struct io_piece *ipo;
273         int fileno;
274
275         if (td->o.replay_skip & (1u << DDIR_SYNC))
276                 return;
277
278         ipo = calloc(1, sizeof(*ipo));
279         init_ipo(ipo);
280         fileno = trace_add_file(td, t->device);
281
282         ipo->delay = ttime / 1000;
283         ipo->ddir = DDIR_SYNC;
284         ipo->fileno = fileno;
285
286         ios[DDIR_SYNC]++;
287         dprint(FD_BLKTRACE, "store flush delay=%lu\n", ipo->delay);
288         queue_io_piece(td, ipo);
289 }
290
291 /*
292  * We only care for queue traces, most of the others are side effects
293  * due to internal workings of the block layer.
294  */
295 static void handle_trace(struct thread_data *td, struct blk_io_trace *t,
296                          unsigned long *ios, unsigned int *bs)
297 {
298         static unsigned long long last_ttime;
299         unsigned long long delay = 0;
300
301         if ((t->action & 0xffff) != __BLK_TA_QUEUE)
302                 return;
303
304         if (!(t->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
305                 if (!last_ttime || td->o.no_stall)
306                         delay = 0;
307                 else if (td->o.replay_time_scale == 100)
308                         delay = t->time - last_ttime;
309                 else {
310                         double tmp = t->time - last_ttime;
311                         double scale;
312
313                         scale = (double) 100.0 / (double) td->o.replay_time_scale;
314                         tmp *= scale;
315                         delay = tmp;
316                 }
317                 last_ttime = t->time;
318         }
319
320         t_bytes_align(&td->o, t);
321
322         if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY))
323                 handle_trace_notify(t);
324         else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
325                 handle_trace_discard(td, t, delay, ios, bs);
326         else if (t->action & BLK_TC_ACT(BLK_TC_FLUSH))
327                 handle_trace_flush(td, t, delay, ios);
328         else
329                 handle_trace_fs(td, t, delay, ios, bs);
330 }
331
332 static void byteswap_trace(struct blk_io_trace *t)
333 {
334         t->magic = fio_swap32(t->magic);
335         t->sequence = fio_swap32(t->sequence);
336         t->time = fio_swap64(t->time);
337         t->sector = fio_swap64(t->sector);
338         t->bytes = fio_swap32(t->bytes);
339         t->action = fio_swap32(t->action);
340         t->pid = fio_swap32(t->pid);
341         t->device = fio_swap32(t->device);
342         t->cpu = fio_swap32(t->cpu);
343         t->error = fio_swap16(t->error);
344         t->pdu_len = fio_swap16(t->pdu_len);
345 }
346
347 static bool t_is_write(struct blk_io_trace *t)
348 {
349         return (t->action & BLK_TC_ACT(BLK_TC_WRITE | BLK_TC_DISCARD)) != 0;
350 }
351
352 static enum fio_ddir t_get_ddir(struct blk_io_trace *t)
353 {
354         if (t->action & BLK_TC_ACT(BLK_TC_READ))
355                 return DDIR_READ;
356         else if (t->action & BLK_TC_ACT(BLK_TC_WRITE))
357                 return DDIR_WRITE;
358         else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
359                 return DDIR_TRIM;
360
361         return DDIR_INVAL;
362 }
363
364 static void depth_inc(struct blk_io_trace *t, int *depth)
365 {
366         enum fio_ddir ddir;
367
368         ddir = t_get_ddir(t);
369         if (ddir != DDIR_INVAL)
370                 depth[ddir]++;
371 }
372
373 static void depth_dec(struct blk_io_trace *t, int *depth)
374 {
375         enum fio_ddir ddir;
376
377         ddir = t_get_ddir(t);
378         if (ddir != DDIR_INVAL)
379                 depth[ddir]--;
380 }
381
382 static void depth_end(struct blk_io_trace *t, int *this_depth, int *depth)
383 {
384         enum fio_ddir ddir = DDIR_INVAL;
385
386         ddir = t_get_ddir(t);
387         if (ddir != DDIR_INVAL) {
388                 depth[ddir] = max(depth[ddir], this_depth[ddir]);
389                 this_depth[ddir] = 0;
390         }
391 }
392
393 /*
394  * Load a blktrace file by reading all the blk_io_trace entries, and storing
395  * them as io_pieces like the fio text version would do.
396  */
397 bool load_blktrace(struct thread_data *td, const char *filename, int need_swap)
398 {
399         struct blk_io_trace t;
400         unsigned long ios[DDIR_RWDIR_SYNC_CNT] = { };
401         unsigned int rw_bs[DDIR_RWDIR_CNT] = { };
402         unsigned long skipped_writes;
403         FILE *f;
404         int i, old_state, max_depth;
405         struct fio_file *fiof;
406         int this_depth[DDIR_RWDIR_CNT] = { };
407         int depth[DDIR_RWDIR_CNT] = { };
408
409         f = fopen(filename, "rb");
410         if (!f) {
411                 td_verror(td, errno, "open blktrace file");
412                 return false;
413         }
414
415         old_state = td_bump_runstate(td, TD_SETTING_UP);
416
417         td->o.size = 0;
418         skipped_writes = 0;
419         do {
420                 int ret = fread(&t, 1, sizeof(t), f);
421
422                 if (ferror(f)) {
423                         td_verror(td, errno, "read blktrace file");
424                         goto err;
425                 } else if (feof(f)) {
426                         break;
427                 } else if (ret < (int) sizeof(t)) {
428                         log_err("fio: iolog short read\n");
429                         break;
430                 }
431
432                 if (need_swap)
433                         byteswap_trace(&t);
434
435                 if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
436                         log_err("fio: bad magic in blktrace data: %x\n",
437                                                                 t.magic);
438                         goto err;
439                 }
440                 if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
441                         log_err("fio: bad blktrace version %d\n",
442                                                                 t.magic & 0xff);
443                         goto err;
444                 }
445                 ret = discard_pdu(f, &t);
446                 if (ret < 0) {
447                         td_verror(td, -ret, "blktrace lseek");
448                         goto err;
449                 }
450                 if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) {
451                         if ((t.action & 0xffff) == __BLK_TA_QUEUE)
452                                 depth_inc(&t, this_depth);
453                         else if (((t.action & 0xffff) == __BLK_TA_BACKMERGE) ||
454                                 ((t.action & 0xffff) == __BLK_TA_FRONTMERGE))
455                                 depth_dec(&t, this_depth);
456                         else if ((t.action & 0xffff) == __BLK_TA_COMPLETE)
457                                 depth_end(&t, this_depth, depth);
458
459                         if (t_is_write(&t) && read_only) {
460                                 skipped_writes++;
461                                 continue;
462                         }
463                 }
464
465                 handle_trace(td, &t, ios, rw_bs);
466         } while (1);
467
468         for_each_file(td, fiof, i)
469                 trace_add_open_close_event(td, fiof->fileno, FIO_LOG_CLOSE_FILE);
470
471         fclose(f);
472
473         td_restore_runstate(td, old_state);
474
475         if (!td->files_index) {
476                 log_err("fio: did not find replay device(s)\n");
477                 return false;
478         }
479
480         /*
481          * For stacked devices, we don't always get a COMPLETE event so
482          * the depth grows to insane values. Limit it to something sane(r).
483          */
484         max_depth = 0;
485         for (i = 0; i < DDIR_RWDIR_CNT; i++) {
486                 if (depth[i] > 1024)
487                         depth[i] = 1024;
488                 else if (!depth[i] && ios[i])
489                         depth[i] = 1;
490                 max_depth = max(depth[i], max_depth);
491         }
492
493         if (skipped_writes)
494                 log_err("fio: %s skips replay of %lu writes due to read-only\n",
495                                                 td->o.name, skipped_writes);
496
497         if (!ios[DDIR_READ] && !ios[DDIR_WRITE] && !ios[DDIR_TRIM] &&
498             !ios[DDIR_SYNC]) {
499                 log_err("fio: found no ios in blktrace data\n");
500                 return false;
501         }
502
503         td->o.td_ddir = 0;
504         if (ios[DDIR_READ]) {
505                 td->o.td_ddir |= TD_DDIR_READ;
506                 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
507         }
508         if (ios[DDIR_WRITE]) {
509                 td->o.td_ddir |= TD_DDIR_WRITE;
510                 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
511         }
512         if (ios[DDIR_TRIM]) {
513                 td->o.td_ddir |= TD_DDIR_TRIM;
514                 td->o.max_bs[DDIR_TRIM] = rw_bs[DDIR_TRIM];
515         }
516
517         /*
518          * We need to do direct/raw ios to the device, to avoid getting
519          * read-ahead in our way. But only do so if the minimum block size
520          * is a multiple of 4k, otherwise we don't know if it's safe to do so.
521          */
522         if (!fio_option_is_set(&td->o, odirect) && !(td_min_bs(td) & 4095))
523                 td->o.odirect = 1;
524
525         /*
526          * If depth wasn't manually set, use probed depth
527          */
528         if (!fio_option_is_set(&td->o, iodepth))
529                 td->o.iodepth = td->o.iodepth_low = max_depth;
530
531         return true;
532 err:
533         fclose(f);
534         return false;
535 }
536
537 static int init_merge_param_list(fio_fp64_t *vals, struct blktrace_cursor *bcs,
538                                  int nr_logs, int def, size_t off)
539 {
540         int i = 0, len = 0;
541
542         while (len < FIO_IO_U_LIST_MAX_LEN && vals[len].u.f != 0.0)
543                 len++;
544
545         if (len && len != nr_logs)
546                 return len;
547
548         for (i = 0; i < nr_logs; i++) {
549                 int *val = (int *)((char *)&bcs[i] + off);
550                 *val = def;
551                 if (len)
552                         *val = (int)vals[i].u.f;
553         }
554
555         return 0;
556
557 }
558
559 static int find_earliest_io(struct blktrace_cursor *bcs, int nr_logs)
560 {
561         __u64 time = ~(__u64)0;
562         int idx = 0, i;
563
564         for (i = 0; i < nr_logs; i++) {
565                 if (bcs[i].t.time < time) {
566                         time = bcs[i].t.time;
567                         idx = i;
568                 }
569         }
570
571         return idx;
572 }
573
574 static void merge_finish_file(struct blktrace_cursor *bcs, int i, int *nr_logs)
575 {
576         bcs[i].iter++;
577         if (bcs[i].iter < bcs[i].nr_iter) {
578                 fseek(bcs[i].f, 0, SEEK_SET);
579                 return;
580         }
581
582         *nr_logs -= 1;
583
584         /* close file */
585         fclose(bcs[i].f);
586
587         /* keep active files contiguous */
588         memmove(&bcs[i], &bcs[*nr_logs], sizeof(bcs[i]));
589 }
590
591 static int read_trace(struct thread_data *td, struct blktrace_cursor *bc)
592 {
593         int ret = 0;
594         struct blk_io_trace *t = &bc->t;
595
596 read_skip:
597         /* read an io trace */
598         ret = fread(&t, 1, sizeof(t), bc->f);
599         if (ferror(bc->f)) {
600                 td_verror(td, errno, "read blktrace file");
601                 return ret;
602         } else if (feof(bc->f)) {
603                 if (!bc->length)
604                         bc->length = bc->t.time;
605                 return ret;
606         } else if (ret < (int) sizeof(*t)) {
607                 log_err("fio: iolog short read\n");
608                 return -1;
609         }
610
611         if (bc->swap)
612                 byteswap_trace(t);
613
614         /* skip over actions that fio does not care about */
615         if ((t->action & 0xffff) != __BLK_TA_QUEUE ||
616             t_get_ddir(t) == DDIR_INVAL) {
617                 ret = discard_pdu(bc->f, t);
618                 if (ret < 0) {
619                         td_verror(td, -ret, "blktrace lseek");
620                         return ret;
621                 }
622                 goto read_skip;
623         }
624
625         t->time = (t->time + bc->iter * bc->length) * bc->scalar / 100;
626
627         return ret;
628 }
629
630 static int write_trace(FILE *fp, struct blk_io_trace *t)
631 {
632         /* pdu is not used so just write out only the io trace */
633         t->pdu_len = 0;
634         return fwrite((void *)t, sizeof(*t), 1, fp);
635 }
636
637 int merge_blktrace_iologs(struct thread_data *td)
638 {
639         int nr_logs = get_max_str_idx(td->o.read_iolog_file);
640         struct blktrace_cursor *bcs = malloc(sizeof(struct blktrace_cursor) *
641                                              nr_logs);
642         struct blktrace_cursor *bc;
643         FILE *merge_fp;
644         char *str, *ptr, *name, *merge_buf;
645         int i, ret;
646
647         ret = init_merge_param_list(td->o.merge_blktrace_scalars, bcs, nr_logs,
648                                     100, offsetof(struct blktrace_cursor,
649                                                   scalar));
650         if (ret) {
651                 log_err("fio: merge_blktrace_scalars(%d) != nr_logs(%d)\n",
652                         ret, nr_logs);
653                 goto err_param;
654         }
655
656         ret = init_merge_param_list(td->o.merge_blktrace_iters, bcs, nr_logs,
657                                     1, offsetof(struct blktrace_cursor,
658                                                 nr_iter));
659         if (ret) {
660                 log_err("fio: merge_blktrace_iters(%d) != nr_logs(%d)\n",
661                         ret, nr_logs);
662                 goto err_param;
663         }
664
665         /* setup output file */
666         merge_fp = fopen(td->o.merge_blktrace_file, "w");
667         merge_buf = malloc(128 * 1024);
668         if (!merge_buf)
669                 goto err_out_file;
670         ret = setvbuf(merge_fp, merge_buf, _IOFBF, 128 * 1024);
671         if (ret)
672                 goto err_merge_buf;
673
674         /* setup input files */
675         str = ptr = strdup(td->o.read_iolog_file);
676         nr_logs = 0;
677         for (i = 0; (name = get_next_str(&ptr)) != NULL; i++) {
678                 bcs[i].f = fopen(name, "rb");
679                 if (!bcs[i].f) {
680                         log_err("fio: could not open file: %s\n", name);
681                         ret = -errno;
682                         free(str);
683                         goto err_file;
684                 }
685                 nr_logs++;
686
687                 if (!is_blktrace(name, &bcs[i].swap)) {
688                         log_err("fio: file is not a blktrace: %s\n", name);
689                         free(str);
690                         goto err_file;
691                 }
692
693                 ret = read_trace(td, &bcs[i]);
694                 if (ret < 0) {
695                         free(str);
696                         goto err_file;
697                 } else if (!ret) {
698                         merge_finish_file(bcs, i, &nr_logs);
699                         i--;
700                 }
701         }
702         free(str);
703
704         /* merge files */
705         while (nr_logs) {
706                 i = find_earliest_io(bcs, nr_logs);
707                 bc = &bcs[i];
708                 /* skip over the pdu */
709                 ret = discard_pdu(bc->f, &bc->t);
710                 if (ret < 0) {
711                         td_verror(td, -ret, "blktrace lseek");
712                         goto err_file;
713                 }
714
715                 ret = write_trace(merge_fp, &bc->t);
716                 ret = read_trace(td, bc);
717                 if (ret < 0)
718                         goto err_file;
719                 else if (!ret)
720                         merge_finish_file(bcs, i, &nr_logs);
721         }
722
723         /* set iolog file to read from the newly merged file */
724         td->o.read_iolog_file = td->o.merge_blktrace_file;
725         ret = 0;
726
727 err_file:
728         /* cleanup */
729         for (i = 0; i < nr_logs; i++) {
730                 fclose(bcs[i].f);
731         }
732 err_merge_buf:
733         free(merge_buf);
734 err_out_file:
735         fflush(merge_fp);
736         fclose(merge_fp);
737 err_param:
738         free(bcs);
739
740         return ret;
741 }