Read stats for backlog verifies not reported for time-expired workloads
[fio.git] / blktrace.c
1 /*
2  * blktrace support code for fio
3  */
4 #include <stdio.h>
5 #include <stdlib.h>
6 #include <unistd.h>
7 #include <errno.h>
8
9 #include "flist.h"
10 #include "fio.h"
11 #include "iolog.h"
12 #include "blktrace.h"
13 #include "blktrace_api.h"
14 #include "oslib/linux-dev-lookup.h"
15
16 struct file_cache {
17         unsigned int maj;
18         unsigned int min;
19         unsigned int fileno;
20 };
21
22 /*
23  * Just discard the pdu by seeking past it.
24  */
25 static int discard_pdu(FILE* f, struct blk_io_trace *t)
26 {
27         if (t->pdu_len == 0)
28                 return 0;
29
30         dprint(FD_BLKTRACE, "discard pdu len %u\n", t->pdu_len);
31         if (fseek(f, t->pdu_len, SEEK_CUR) < 0)
32                 return -errno;
33
34         return t->pdu_len;
35 }
36
37 /*
38  * Check if this is a blktrace binary data file. We read a single trace
39  * into memory and check for the magic signature.
40  */
41 bool is_blktrace(const char *filename, int *need_swap)
42 {
43         struct blk_io_trace t;
44         int fd, ret;
45
46         fd = open(filename, O_RDONLY);
47         if (fd < 0)
48                 return false;
49
50         ret = read(fd, &t, sizeof(t));
51         close(fd);
52
53         if (ret < 0) {
54                 perror("read blktrace");
55                 return false;
56         } else if (ret != sizeof(t)) {
57                 log_err("fio: short read on blktrace file\n");
58                 return false;
59         }
60
61         if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) {
62                 *need_swap = 0;
63                 return true;
64         }
65
66         /*
67          * Maybe it needs to be endian swapped...
68          */
69         t.magic = fio_swap32(t.magic);
70         if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) {
71                 *need_swap = 1;
72                 return true;
73         }
74
75         return false;
76 }
77
78 #define FMINORBITS      20
79 #define FMINORMASK      ((1U << FMINORBITS) - 1)
80 #define FMAJOR(dev)     ((unsigned int) ((dev) >> FMINORBITS))
81 #define FMINOR(dev)     ((unsigned int) ((dev) & FMINORMASK))
82
83 static void trace_add_open_close_event(struct thread_data *td, int fileno, enum file_log_act action)
84 {
85         struct io_piece *ipo;
86
87         ipo = calloc(1, sizeof(*ipo));
88         init_ipo(ipo);
89
90         ipo->ddir = DDIR_INVAL;
91         ipo->fileno = fileno;
92         ipo->file_action = action;
93         flist_add_tail(&ipo->list, &td->io_log_list);
94 }
95
96 static int trace_add_file(struct thread_data *td, __u32 device,
97                           struct file_cache *cache)
98 {
99         unsigned int maj = FMAJOR(device);
100         unsigned int min = FMINOR(device);
101         struct fio_file *f;
102         char dev[256];
103         unsigned int i;
104
105         if (cache->maj == maj && cache->min == min)
106                 return cache->fileno;
107
108         cache->maj = maj;
109         cache->min = min;
110
111         /*
112          * check for this file in our list
113          */
114         for_each_file(td, f, i)
115                 if (f->major == maj && f->minor == min) {
116                         cache->fileno = f->fileno;
117                         return cache->fileno;
118                 }
119
120         strcpy(dev, "/dev");
121         if (blktrace_lookup_device(td->o.replay_redirect, dev, maj, min)) {
122                 int fileno;
123
124                 if (td->o.replay_redirect)
125                         dprint(FD_BLKTRACE, "device lookup: %d/%d\n overridden"
126                                         " with: %s\n", maj, min,
127                                         td->o.replay_redirect);
128                 else
129                         dprint(FD_BLKTRACE, "device lookup: %d/%d\n", maj, min);
130
131                 dprint(FD_BLKTRACE, "add devices %s\n", dev);
132                 fileno = add_file_exclusive(td, dev);
133                 td->o.open_files++;
134                 td->files[fileno]->major = maj;
135                 td->files[fileno]->minor = min;
136                 trace_add_open_close_event(td, fileno, FIO_LOG_OPEN_FILE);
137                 cache->fileno = fileno;
138         }
139
140         return cache->fileno;
141 }
142
143 static void t_bytes_align(struct thread_options *o, struct blk_io_trace *t)
144 {
145         if (!o->replay_align)
146                 return;
147
148         t->bytes = (t->bytes + o->replay_align - 1) & ~(o->replay_align - 1);
149 }
150
151 /*
152  * Store blk_io_trace data in an ipo for later retrieval.
153  */
154 static void store_ipo(struct thread_data *td, unsigned long long offset,
155                       unsigned int bytes, int rw, unsigned long long ttime,
156                       int fileno)
157 {
158         struct io_piece *ipo;
159
160         ipo = calloc(1, sizeof(*ipo));
161         init_ipo(ipo);
162
163         ipo->offset = offset * 512;
164         if (td->o.replay_scale)
165                 ipo->offset = ipo->offset / td->o.replay_scale;
166         ipo_bytes_align(td->o.replay_align, ipo);
167         ipo->len = bytes;
168         ipo->delay = ttime / 1000;
169         if (rw)
170                 ipo->ddir = DDIR_WRITE;
171         else
172                 ipo->ddir = DDIR_READ;
173         ipo->fileno = fileno;
174
175         dprint(FD_BLKTRACE, "store ddir=%d, off=%llu, len=%lu, delay=%lu\n",
176                                                         ipo->ddir, ipo->offset,
177                                                         ipo->len, ipo->delay);
178         queue_io_piece(td, ipo);
179 }
180
181 static bool handle_trace_notify(struct blk_io_trace *t)
182 {
183         switch (t->action) {
184         case BLK_TN_PROCESS:
185                 dprint(FD_BLKTRACE, "got process notify: %x, %d\n",
186                                 t->action, t->pid);
187                 break;
188         case BLK_TN_TIMESTAMP:
189                 dprint(FD_BLKTRACE, "got timestamp notify: %x, %d\n",
190                                 t->action, t->pid);
191                 break;
192         case BLK_TN_MESSAGE:
193                 break;
194         default:
195                 dprint(FD_BLKTRACE, "unknown trace act %x\n", t->action);
196                 break;
197         }
198         return false;
199 }
200
201 static bool handle_trace_discard(struct thread_data *td,
202                                  struct blk_io_trace *t,
203                                  unsigned long long ttime,
204                                  unsigned long *ios, unsigned long long *bs,
205                                  struct file_cache *cache)
206 {
207         struct io_piece *ipo;
208         int fileno;
209
210         if (td->o.replay_skip & (1u << DDIR_TRIM))
211                 return false;
212
213         ipo = calloc(1, sizeof(*ipo));
214         init_ipo(ipo);
215         fileno = trace_add_file(td, t->device, cache);
216
217         ios[DDIR_TRIM]++;
218         if (t->bytes > bs[DDIR_TRIM])
219                 bs[DDIR_TRIM] = t->bytes;
220
221         td->o.size += t->bytes;
222
223         INIT_FLIST_HEAD(&ipo->list);
224
225         ipo->offset = t->sector * 512;
226         if (td->o.replay_scale)
227                 ipo->offset = ipo->offset / td->o.replay_scale;
228         ipo_bytes_align(td->o.replay_align, ipo);
229         ipo->len = t->bytes;
230         ipo->delay = ttime / 1000;
231         ipo->ddir = DDIR_TRIM;
232         ipo->fileno = fileno;
233
234         dprint(FD_BLKTRACE, "store discard, off=%llu, len=%lu, delay=%lu\n",
235                                                         ipo->offset, ipo->len,
236                                                         ipo->delay);
237         queue_io_piece(td, ipo);
238         return true;
239 }
240
241 static void dump_trace(struct blk_io_trace *t)
242 {
243         log_err("blktrace: ignoring zero byte trace: action=%x\n", t->action);
244 }
245
246 static bool handle_trace_fs(struct thread_data *td, struct blk_io_trace *t,
247                             unsigned long long ttime, unsigned long *ios,
248                             unsigned long long *bs, struct file_cache *cache)
249 {
250         int rw;
251         int fileno;
252
253         fileno = trace_add_file(td, t->device, cache);
254
255         rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
256
257         if (rw) {
258                 if (td->o.replay_skip & (1u << DDIR_WRITE))
259                         return false;
260         } else {
261                 if (td->o.replay_skip & (1u << DDIR_READ))
262                         return false;
263         }
264
265         if (!t->bytes) {
266                 if (!fio_did_warn(FIO_WARN_BTRACE_ZERO))
267                         dump_trace(t);
268                 return false;
269         }
270
271         if (t->bytes > bs[rw])
272                 bs[rw] = t->bytes;
273
274         ios[rw]++;
275         td->o.size += t->bytes;
276         store_ipo(td, t->sector, t->bytes, rw, ttime, fileno);
277         return true;
278 }
279
280 static bool handle_trace_flush(struct thread_data *td, struct blk_io_trace *t,
281                                unsigned long long ttime, unsigned long *ios,
282                                struct file_cache *cache)
283 {
284         struct io_piece *ipo;
285         int fileno;
286
287         if (td->o.replay_skip & (1u << DDIR_SYNC))
288                 return false;
289
290         ipo = calloc(1, sizeof(*ipo));
291         init_ipo(ipo);
292         fileno = trace_add_file(td, t->device, cache);
293
294         ipo->delay = ttime / 1000;
295         ipo->ddir = DDIR_SYNC;
296         ipo->fileno = fileno;
297
298         ios[DDIR_SYNC]++;
299         dprint(FD_BLKTRACE, "store flush delay=%lu\n", ipo->delay);
300
301         if (!(td->flags & TD_F_SYNCS))
302                 td->flags |= TD_F_SYNCS;
303
304         queue_io_piece(td, ipo);
305         return true;
306 }
307
308 /*
309  * We only care for queue traces, most of the others are side effects
310  * due to internal workings of the block layer.
311  */
312 static bool queue_trace(struct thread_data *td, struct blk_io_trace *t,
313                          unsigned long *ios, unsigned long long *bs,
314                          struct file_cache *cache)
315 {
316         unsigned long long *last_ttime = &td->io_log_last_ttime;
317         unsigned long long delay = 0;
318
319         if ((t->action & 0xffff) != __BLK_TA_QUEUE)
320                 return false;
321
322         if (!(t->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
323                 delay = delay_since_ttime(td, t->time);
324                 *last_ttime = t->time;
325         }
326
327         t_bytes_align(&td->o, t);
328
329         if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY))
330                 return handle_trace_notify(t);
331         else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
332                 return handle_trace_discard(td, t, delay, ios, bs, cache);
333         else if (t->action & BLK_TC_ACT(BLK_TC_FLUSH))
334                 return handle_trace_flush(td, t, delay, ios, cache);
335         else
336                 return handle_trace_fs(td, t, delay, ios, bs, cache);
337 }
338
339 static void byteswap_trace(struct blk_io_trace *t)
340 {
341         t->magic = fio_swap32(t->magic);
342         t->sequence = fio_swap32(t->sequence);
343         t->time = fio_swap64(t->time);
344         t->sector = fio_swap64(t->sector);
345         t->bytes = fio_swap32(t->bytes);
346         t->action = fio_swap32(t->action);
347         t->pid = fio_swap32(t->pid);
348         t->device = fio_swap32(t->device);
349         t->cpu = fio_swap32(t->cpu);
350         t->error = fio_swap16(t->error);
351         t->pdu_len = fio_swap16(t->pdu_len);
352 }
353
354 static bool t_is_write(struct blk_io_trace *t)
355 {
356         return (t->action & BLK_TC_ACT(BLK_TC_WRITE | BLK_TC_DISCARD)) != 0;
357 }
358
359 static enum fio_ddir t_get_ddir(struct blk_io_trace *t)
360 {
361         if (t->action & BLK_TC_ACT(BLK_TC_READ))
362                 return DDIR_READ;
363         else if (t->action & BLK_TC_ACT(BLK_TC_WRITE))
364                 return DDIR_WRITE;
365         else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
366                 return DDIR_TRIM;
367
368         return DDIR_INVAL;
369 }
370
371 static void depth_inc(struct blk_io_trace *t, int *depth)
372 {
373         enum fio_ddir ddir;
374
375         ddir = t_get_ddir(t);
376         if (ddir != DDIR_INVAL)
377                 depth[ddir]++;
378 }
379
380 static void depth_dec(struct blk_io_trace *t, int *depth)
381 {
382         enum fio_ddir ddir;
383
384         ddir = t_get_ddir(t);
385         if (ddir != DDIR_INVAL)
386                 depth[ddir]--;
387 }
388
389 static void depth_end(struct blk_io_trace *t, int *this_depth, int *depth)
390 {
391         enum fio_ddir ddir = DDIR_INVAL;
392
393         ddir = t_get_ddir(t);
394         if (ddir != DDIR_INVAL) {
395                 depth[ddir] = max(depth[ddir], this_depth[ddir]);
396                 this_depth[ddir] = 0;
397         }
398 }
399
400 /*
401  * Load a blktrace file by reading all the blk_io_trace entries, and storing
402  * them as io_pieces like the fio text version would do.
403  */
404 bool init_blktrace_read(struct thread_data *td, const char *filename, int need_swap)
405 {
406         int old_state;
407
408         td->io_log_rfile = fopen(filename, "rb");
409         if (!td->io_log_rfile) {
410                 td_verror(td, errno, "open blktrace file");
411                 goto err;
412         }
413         td->io_log_blktrace_swap = need_swap;
414         td->io_log_last_ttime = 0;
415         td->o.size = 0;
416
417         free_release_files(td);
418
419         old_state = td_bump_runstate(td, TD_SETTING_UP);
420
421         if (!read_blktrace(td)) {
422                 goto err;
423         }
424
425         td_restore_runstate(td, old_state);
426
427         if (!td->files_index) {
428                 log_err("fio: did not find replay device(s)\n");
429                 return false;
430         }
431
432         return true;
433
434 err:
435         if (td->io_log_rfile) {
436                 fclose(td->io_log_rfile);
437                 td->io_log_rfile = NULL;
438         }
439         return false;
440 }
441
442 bool read_blktrace(struct thread_data* td)
443 {
444         struct blk_io_trace t;
445         struct file_cache cache = {
446                 .maj = ~0U,
447                 .min = ~0U,
448         };
449         unsigned long ios[DDIR_RWDIR_SYNC_CNT] = { };
450         unsigned long long rw_bs[DDIR_RWDIR_CNT] = { };
451         unsigned long skipped_writes;
452         FILE *f = td->io_log_rfile;
453         int i, max_depth;
454         struct fio_file *fiof;
455         int this_depth[DDIR_RWDIR_CNT] = { };
456         int depth[DDIR_RWDIR_CNT] = { };
457         int64_t items_to_fetch = 0;
458
459         if (td->o.read_iolog_chunked) {
460                 items_to_fetch = iolog_items_to_fetch(td);
461                 if (!items_to_fetch)
462                         return true;
463         }
464
465         skipped_writes = 0;
466         do {
467                 int ret = fread(&t, 1, sizeof(t), f);
468
469                 if (ferror(f)) {
470                         td_verror(td, errno, "read blktrace file");
471                         goto err;
472                 } else if (feof(f)) {
473                         break;
474                 } else if (ret < (int) sizeof(t)) {
475                         log_err("fio: iolog short read\n");
476                         break;
477                 }
478
479                 if (td->io_log_blktrace_swap)
480                         byteswap_trace(&t);
481
482                 if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
483                         log_err("fio: bad magic in blktrace data: %x\n",
484                                                                 t.magic);
485                         goto err;
486                 }
487                 if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
488                         log_err("fio: bad blktrace version %d\n",
489                                                                 t.magic & 0xff);
490                         goto err;
491                 }
492                 ret = discard_pdu(f, &t);
493                 if (ret < 0) {
494                         td_verror(td, -ret, "blktrace lseek");
495                         goto err;
496                 }
497                 if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) {
498                         if ((t.action & 0xffff) == __BLK_TA_QUEUE)
499                                 depth_inc(&t, this_depth);
500                         else if (((t.action & 0xffff) == __BLK_TA_BACKMERGE) ||
501                                 ((t.action & 0xffff) == __BLK_TA_FRONTMERGE))
502                                 depth_dec(&t, this_depth);
503                         else if ((t.action & 0xffff) == __BLK_TA_COMPLETE)
504                                 depth_end(&t, this_depth, depth);
505
506                         if (t_is_write(&t) && read_only) {
507                                 skipped_writes++;
508                                 continue;
509                         }
510                 }
511
512                 if (!queue_trace(td, &t, ios, rw_bs, &cache))
513                         continue;
514
515                 if (td->o.read_iolog_chunked) {
516                         td->io_log_current++;
517                         items_to_fetch--;
518                         if (items_to_fetch == 0)
519                                 break;
520                 }
521         } while (1);
522
523         if (td->o.read_iolog_chunked) {
524                 td->io_log_highmark = td->io_log_current;
525                 td->io_log_checkmark = (td->io_log_highmark + 1) / 2;
526                 fio_gettime(&td->io_log_highmark_time, NULL);
527         }
528
529         if (skipped_writes)
530                 log_err("fio: %s skips replay of %lu writes due to read-only\n",
531                                                 td->o.name, skipped_writes);
532
533         if (td->o.read_iolog_chunked) {
534                 if (td->io_log_current == 0) {
535                         return false;
536                 }
537                 td->o.td_ddir = TD_DDIR_RW;
538                 if ((rw_bs[DDIR_READ] > td->o.max_bs[DDIR_READ] ||
539                      rw_bs[DDIR_WRITE] > td->o.max_bs[DDIR_WRITE] ||
540                      rw_bs[DDIR_TRIM] > td->o.max_bs[DDIR_TRIM]) &&
541                     td->orig_buffer)
542                 {
543                         td->o.max_bs[DDIR_READ] = max(td->o.max_bs[DDIR_READ], rw_bs[DDIR_READ]);
544                         td->o.max_bs[DDIR_WRITE] = max(td->o.max_bs[DDIR_WRITE], rw_bs[DDIR_WRITE]);
545                         td->o.max_bs[DDIR_TRIM] = max(td->o.max_bs[DDIR_TRIM], rw_bs[DDIR_TRIM]);
546                         io_u_quiesce(td);
547                         free_io_mem(td);
548                         if (init_io_u_buffers(td))
549                                 return false;
550                 }
551                 return true;
552         }
553
554         for_each_file(td, fiof, i)
555                 trace_add_open_close_event(td, fiof->fileno, FIO_LOG_CLOSE_FILE);
556
557         fclose(td->io_log_rfile);
558         td->io_log_rfile = NULL;
559
560         /*
561          * For stacked devices, we don't always get a COMPLETE event so
562          * the depth grows to insane values. Limit it to something sane(r).
563          */
564         max_depth = 0;
565         for (i = 0; i < DDIR_RWDIR_CNT; i++) {
566                 if (depth[i] > 1024)
567                         depth[i] = 1024;
568                 else if (!depth[i] && ios[i])
569                         depth[i] = 1;
570                 max_depth = max(depth[i], max_depth);
571         }
572
573         if (!ios[DDIR_READ] && !ios[DDIR_WRITE] && !ios[DDIR_TRIM] &&
574             !ios[DDIR_SYNC]) {
575                 log_err("fio: found no ios in blktrace data\n");
576                 return false;
577         }
578
579         td->o.td_ddir = 0;
580         if (ios[DDIR_READ]) {
581                 td->o.td_ddir |= TD_DDIR_READ;
582                 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
583         }
584         if (ios[DDIR_WRITE]) {
585                 td->o.td_ddir |= TD_DDIR_WRITE;
586                 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
587         }
588         if (ios[DDIR_TRIM]) {
589                 td->o.td_ddir |= TD_DDIR_TRIM;
590                 td->o.max_bs[DDIR_TRIM] = rw_bs[DDIR_TRIM];
591         }
592
593         /*
594          * If depth wasn't manually set, use probed depth
595          */
596         if (!fio_option_is_set(&td->o, iodepth))
597                 td->o.iodepth = td->o.iodepth_low = max_depth;
598
599         return true;
600 err:
601         fclose(f);
602         return false;
603 }
604
605 static int init_merge_param_list(fio_fp64_t *vals, struct blktrace_cursor *bcs,
606                                  int nr_logs, int def, size_t off)
607 {
608         int i = 0, len = 0;
609
610         while (len < FIO_IO_U_LIST_MAX_LEN && vals[len].u.f != 0.0)
611                 len++;
612
613         if (len && len != nr_logs)
614                 return len;
615
616         for (i = 0; i < nr_logs; i++) {
617                 int *val = (int *)((char *)&bcs[i] + off);
618                 *val = def;
619                 if (len)
620                         *val = (int)vals[i].u.f;
621         }
622
623         return 0;
624
625 }
626
627 static int find_earliest_io(struct blktrace_cursor *bcs, int nr_logs)
628 {
629         __u64 time = ~(__u64)0;
630         int idx = 0, i;
631
632         for (i = 0; i < nr_logs; i++) {
633                 if (bcs[i].t.time < time) {
634                         time = bcs[i].t.time;
635                         idx = i;
636                 }
637         }
638
639         return idx;
640 }
641
642 static void merge_finish_file(struct blktrace_cursor *bcs, int i, int *nr_logs)
643 {
644         bcs[i].iter++;
645         if (bcs[i].iter < bcs[i].nr_iter) {
646                 fseek(bcs[i].f, 0, SEEK_SET);
647                 return;
648         }
649
650         *nr_logs -= 1;
651
652         /* close file */
653         fclose(bcs[i].f);
654
655         /* keep active files contiguous */
656         memmove(&bcs[i], &bcs[*nr_logs], sizeof(bcs[i]));
657 }
658
659 static int read_trace(struct thread_data *td, struct blktrace_cursor *bc)
660 {
661         int ret = 0;
662         struct blk_io_trace *t = &bc->t;
663
664 read_skip:
665         /* read an io trace */
666         ret = fread(&t, 1, sizeof(t), bc->f);
667         if (ferror(bc->f)) {
668                 td_verror(td, errno, "read blktrace file");
669                 return ret;
670         } else if (feof(bc->f)) {
671                 if (!bc->length)
672                         bc->length = bc->t.time;
673                 return ret;
674         } else if (ret < (int) sizeof(*t)) {
675                 log_err("fio: iolog short read\n");
676                 return -1;
677         }
678
679         if (bc->swap)
680                 byteswap_trace(t);
681
682         /* skip over actions that fio does not care about */
683         if ((t->action & 0xffff) != __BLK_TA_QUEUE ||
684             t_get_ddir(t) == DDIR_INVAL) {
685                 ret = discard_pdu(bc->f, t);
686                 if (ret < 0) {
687                         td_verror(td, -ret, "blktrace lseek");
688                         return ret;
689                 }
690                 goto read_skip;
691         }
692
693         t->time = (t->time + bc->iter * bc->length) * bc->scalar / 100;
694
695         return ret;
696 }
697
698 static int write_trace(FILE *fp, struct blk_io_trace *t)
699 {
700         /* pdu is not used so just write out only the io trace */
701         t->pdu_len = 0;
702         return fwrite((void *)t, sizeof(*t), 1, fp);
703 }
704
705 int merge_blktrace_iologs(struct thread_data *td)
706 {
707         int nr_logs = get_max_str_idx(td->o.read_iolog_file);
708         struct blktrace_cursor *bcs = malloc(sizeof(struct blktrace_cursor) *
709                                              nr_logs);
710         struct blktrace_cursor *bc;
711         FILE *merge_fp;
712         char *str, *ptr, *name, *merge_buf;
713         int i, ret;
714
715         ret = init_merge_param_list(td->o.merge_blktrace_scalars, bcs, nr_logs,
716                                     100, offsetof(struct blktrace_cursor,
717                                                   scalar));
718         if (ret) {
719                 log_err("fio: merge_blktrace_scalars(%d) != nr_logs(%d)\n",
720                         ret, nr_logs);
721                 goto err_param;
722         }
723
724         ret = init_merge_param_list(td->o.merge_blktrace_iters, bcs, nr_logs,
725                                     1, offsetof(struct blktrace_cursor,
726                                                 nr_iter));
727         if (ret) {
728                 log_err("fio: merge_blktrace_iters(%d) != nr_logs(%d)\n",
729                         ret, nr_logs);
730                 goto err_param;
731         }
732
733         /* setup output file */
734         merge_fp = fopen(td->o.merge_blktrace_file, "w");
735         merge_buf = malloc(128 * 1024);
736         if (!merge_buf)
737                 goto err_out_file;
738         ret = setvbuf(merge_fp, merge_buf, _IOFBF, 128 * 1024);
739         if (ret)
740                 goto err_merge_buf;
741
742         /* setup input files */
743         str = ptr = strdup(td->o.read_iolog_file);
744         nr_logs = 0;
745         for (i = 0; (name = get_next_str(&ptr)) != NULL; i++) {
746                 bcs[i].f = fopen(name, "rb");
747                 if (!bcs[i].f) {
748                         log_err("fio: could not open file: %s\n", name);
749                         ret = -errno;
750                         free(str);
751                         goto err_file;
752                 }
753                 nr_logs++;
754
755                 if (!is_blktrace(name, &bcs[i].swap)) {
756                         log_err("fio: file is not a blktrace: %s\n", name);
757                         free(str);
758                         goto err_file;
759                 }
760
761                 ret = read_trace(td, &bcs[i]);
762                 if (ret < 0) {
763                         free(str);
764                         goto err_file;
765                 } else if (!ret) {
766                         merge_finish_file(bcs, i, &nr_logs);
767                         i--;
768                 }
769         }
770         free(str);
771
772         /* merge files */
773         while (nr_logs) {
774                 i = find_earliest_io(bcs, nr_logs);
775                 bc = &bcs[i];
776                 /* skip over the pdu */
777                 ret = discard_pdu(bc->f, &bc->t);
778                 if (ret < 0) {
779                         td_verror(td, -ret, "blktrace lseek");
780                         goto err_file;
781                 }
782
783                 ret = write_trace(merge_fp, &bc->t);
784                 ret = read_trace(td, bc);
785                 if (ret < 0)
786                         goto err_file;
787                 else if (!ret)
788                         merge_finish_file(bcs, i, &nr_logs);
789         }
790
791         /* set iolog file to read from the newly merged file */
792         td->o.read_iolog_file = td->o.merge_blktrace_file;
793         ret = 0;
794
795 err_file:
796         /* cleanup */
797         for (i = 0; i < nr_logs; i++) {
798                 fclose(bcs[i].f);
799         }
800 err_merge_buf:
801         free(merge_buf);
802 err_out_file:
803         fflush(merge_fp);
804         fclose(merge_fp);
805 err_param:
806         free(bcs);
807
808         return ret;
809 }