Add TD_F_SYNCS thread flag
[fio.git] / blktrace.c
1 /*
2  * blktrace support code for fio
3  */
4 #include <stdio.h>
5 #include <stdlib.h>
6 #include <unistd.h>
7 #include <errno.h>
8
9 #include "flist.h"
10 #include "fio.h"
11 #include "iolog.h"
12 #include "blktrace.h"
13 #include "blktrace_api.h"
14 #include "oslib/linux-dev-lookup.h"
15
16 struct file_cache {
17         unsigned int maj;
18         unsigned int min;
19         unsigned int fileno;
20 };
21
22 /*
23  * Just discard the pdu by seeking past it.
24  */
25 static int discard_pdu(FILE* f, struct blk_io_trace *t)
26 {
27         if (t->pdu_len == 0)
28                 return 0;
29
30         dprint(FD_BLKTRACE, "discard pdu len %u\n", t->pdu_len);
31         if (fseek(f, t->pdu_len, SEEK_CUR) < 0)
32                 return -errno;
33
34         return t->pdu_len;
35 }
36
37 /*
38  * Check if this is a blktrace binary data file. We read a single trace
39  * into memory and check for the magic signature.
40  */
41 bool is_blktrace(const char *filename, int *need_swap)
42 {
43         struct blk_io_trace t;
44         int fd, ret;
45
46         fd = open(filename, O_RDONLY);
47         if (fd < 0)
48                 return false;
49
50         ret = read(fd, &t, sizeof(t));
51         close(fd);
52
53         if (ret < 0) {
54                 perror("read blktrace");
55                 return false;
56         } else if (ret != sizeof(t)) {
57                 log_err("fio: short read on blktrace file\n");
58                 return false;
59         }
60
61         if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) {
62                 *need_swap = 0;
63                 return true;
64         }
65
66         /*
67          * Maybe it needs to be endian swapped...
68          */
69         t.magic = fio_swap32(t.magic);
70         if ((t.magic & 0xffffff00) == BLK_IO_TRACE_MAGIC) {
71                 *need_swap = 1;
72                 return true;
73         }
74
75         return false;
76 }
77
78 #define FMINORBITS      20
79 #define FMINORMASK      ((1U << FMINORBITS) - 1)
80 #define FMAJOR(dev)     ((unsigned int) ((dev) >> FMINORBITS))
81 #define FMINOR(dev)     ((unsigned int) ((dev) & FMINORMASK))
82
83 static void trace_add_open_close_event(struct thread_data *td, int fileno, enum file_log_act action)
84 {
85         struct io_piece *ipo;
86
87         ipo = calloc(1, sizeof(*ipo));
88         init_ipo(ipo);
89
90         ipo->ddir = DDIR_INVAL;
91         ipo->fileno = fileno;
92         ipo->file_action = action;
93         flist_add_tail(&ipo->list, &td->io_log_list);
94 }
95
96 static int trace_add_file(struct thread_data *td, __u32 device,
97                           struct file_cache *cache)
98 {
99         unsigned int maj = FMAJOR(device);
100         unsigned int min = FMINOR(device);
101         struct fio_file *f;
102         char dev[256];
103         unsigned int i;
104
105         if (cache->maj == maj && cache->min == min)
106                 return cache->fileno;
107
108         cache->maj = maj;
109         cache->min = min;
110
111         /*
112          * check for this file in our list
113          */
114         for_each_file(td, f, i)
115                 if (f->major == maj && f->minor == min) {
116                         cache->fileno = f->fileno;
117                         return cache->fileno;
118                 }
119
120         strcpy(dev, "/dev");
121         if (blktrace_lookup_device(td->o.replay_redirect, dev, maj, min)) {
122                 int fileno;
123
124                 if (td->o.replay_redirect)
125                         dprint(FD_BLKTRACE, "device lookup: %d/%d\n overridden"
126                                         " with: %s\n", maj, min,
127                                         td->o.replay_redirect);
128                 else
129                         dprint(FD_BLKTRACE, "device lookup: %d/%d\n", maj, min);
130
131                 dprint(FD_BLKTRACE, "add devices %s\n", dev);
132                 fileno = add_file_exclusive(td, dev);
133                 td->o.open_files++;
134                 td->files[fileno]->major = maj;
135                 td->files[fileno]->minor = min;
136                 trace_add_open_close_event(td, fileno, FIO_LOG_OPEN_FILE);
137                 cache->fileno = fileno;
138         }
139
140         return cache->fileno;
141 }
142
143 static void t_bytes_align(struct thread_options *o, struct blk_io_trace *t)
144 {
145         if (!o->replay_align)
146                 return;
147
148         t->bytes = (t->bytes + o->replay_align - 1) & ~(o->replay_align - 1);
149 }
150
151 /*
152  * Store blk_io_trace data in an ipo for later retrieval.
153  */
154 static void store_ipo(struct thread_data *td, unsigned long long offset,
155                       unsigned int bytes, int rw, unsigned long long ttime,
156                       int fileno)
157 {
158         struct io_piece *ipo;
159
160         ipo = calloc(1, sizeof(*ipo));
161         init_ipo(ipo);
162
163         ipo->offset = offset * 512;
164         if (td->o.replay_scale)
165                 ipo->offset = ipo->offset / td->o.replay_scale;
166         ipo_bytes_align(td->o.replay_align, ipo);
167         ipo->len = bytes;
168         ipo->delay = ttime / 1000;
169         if (rw)
170                 ipo->ddir = DDIR_WRITE;
171         else
172                 ipo->ddir = DDIR_READ;
173         ipo->fileno = fileno;
174
175         dprint(FD_BLKTRACE, "store ddir=%d, off=%llu, len=%lu, delay=%lu\n",
176                                                         ipo->ddir, ipo->offset,
177                                                         ipo->len, ipo->delay);
178         queue_io_piece(td, ipo);
179 }
180
181 static bool handle_trace_notify(struct blk_io_trace *t)
182 {
183         switch (t->action) {
184         case BLK_TN_PROCESS:
185                 dprint(FD_BLKTRACE, "got process notify: %x, %d\n",
186                                 t->action, t->pid);
187                 break;
188         case BLK_TN_TIMESTAMP:
189                 dprint(FD_BLKTRACE, "got timestamp notify: %x, %d\n",
190                                 t->action, t->pid);
191                 break;
192         case BLK_TN_MESSAGE:
193                 break;
194         default:
195                 dprint(FD_BLKTRACE, "unknown trace act %x\n", t->action);
196                 break;
197         }
198         return false;
199 }
200
201 static bool handle_trace_discard(struct thread_data *td,
202                                  struct blk_io_trace *t,
203                                  unsigned long long ttime,
204                                  unsigned long *ios, unsigned long long *bs,
205                                  struct file_cache *cache)
206 {
207         struct io_piece *ipo;
208         int fileno;
209
210         if (td->o.replay_skip & (1u << DDIR_TRIM))
211                 return false;
212
213         ipo = calloc(1, sizeof(*ipo));
214         init_ipo(ipo);
215         fileno = trace_add_file(td, t->device, cache);
216
217         ios[DDIR_TRIM]++;
218         if (t->bytes > bs[DDIR_TRIM])
219                 bs[DDIR_TRIM] = t->bytes;
220
221         td->o.size += t->bytes;
222
223         INIT_FLIST_HEAD(&ipo->list);
224
225         ipo->offset = t->sector * 512;
226         if (td->o.replay_scale)
227                 ipo->offset = ipo->offset / td->o.replay_scale;
228         ipo_bytes_align(td->o.replay_align, ipo);
229         ipo->len = t->bytes;
230         ipo->delay = ttime / 1000;
231         ipo->ddir = DDIR_TRIM;
232         ipo->fileno = fileno;
233
234         dprint(FD_BLKTRACE, "store discard, off=%llu, len=%lu, delay=%lu\n",
235                                                         ipo->offset, ipo->len,
236                                                         ipo->delay);
237         queue_io_piece(td, ipo);
238         return true;
239 }
240
241 static void dump_trace(struct blk_io_trace *t)
242 {
243         log_err("blktrace: ignoring zero byte trace: action=%x\n", t->action);
244 }
245
246 static bool handle_trace_fs(struct thread_data *td, struct blk_io_trace *t,
247                             unsigned long long ttime, unsigned long *ios,
248                             unsigned long long *bs, struct file_cache *cache)
249 {
250         int rw;
251         int fileno;
252
253         fileno = trace_add_file(td, t->device, cache);
254
255         rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
256
257         if (rw) {
258                 if (td->o.replay_skip & (1u << DDIR_WRITE))
259                         return false;
260         } else {
261                 if (td->o.replay_skip & (1u << DDIR_READ))
262                         return false;
263         }
264
265         if (!t->bytes) {
266                 if (!fio_did_warn(FIO_WARN_BTRACE_ZERO))
267                         dump_trace(t);
268                 return false;
269         }
270
271         if (t->bytes > bs[rw])
272                 bs[rw] = t->bytes;
273
274         ios[rw]++;
275         td->o.size += t->bytes;
276         store_ipo(td, t->sector, t->bytes, rw, ttime, fileno);
277         return true;
278 }
279
280 static bool handle_trace_flush(struct thread_data *td, struct blk_io_trace *t,
281                                unsigned long long ttime, unsigned long *ios,
282                                struct file_cache *cache)
283 {
284         struct io_piece *ipo;
285         int fileno;
286
287         if (td->o.replay_skip & (1u << DDIR_SYNC))
288                 return false;
289
290         ipo = calloc(1, sizeof(*ipo));
291         init_ipo(ipo);
292         fileno = trace_add_file(td, t->device, cache);
293
294         ipo->delay = ttime / 1000;
295         ipo->ddir = DDIR_SYNC;
296         ipo->fileno = fileno;
297
298         ios[DDIR_SYNC]++;
299         dprint(FD_BLKTRACE, "store flush delay=%lu\n", ipo->delay);
300
301         if (!(td->flags & TD_F_SYNCS))
302                 td->flags |= TD_F_SYNCS;
303
304         queue_io_piece(td, ipo);
305         return true;
306 }
307
308 /*
309  * We only care for queue traces, most of the others are side effects
310  * due to internal workings of the block layer.
311  */
312 static bool queue_trace(struct thread_data *td, struct blk_io_trace *t,
313                          unsigned long *ios, unsigned long long *bs,
314                          struct file_cache *cache)
315 {
316         unsigned long long *last_ttime = &td->io_log_blktrace_last_ttime;
317         unsigned long long delay = 0;
318
319         if ((t->action & 0xffff) != __BLK_TA_QUEUE)
320                 return false;
321
322         if (!(t->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
323                 if (!*last_ttime || td->o.no_stall || t->time < *last_ttime)
324                         delay = 0;
325                 else if (td->o.replay_time_scale == 100)
326                         delay = t->time - *last_ttime;
327                 else {
328                         double tmp = t->time - *last_ttime;
329                         double scale;
330
331                         scale = (double) 100.0 / (double) td->o.replay_time_scale;
332                         tmp *= scale;
333                         delay = tmp;
334                 }
335                 *last_ttime = t->time;
336         }
337
338         t_bytes_align(&td->o, t);
339
340         if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY))
341                 return handle_trace_notify(t);
342         else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
343                 return handle_trace_discard(td, t, delay, ios, bs, cache);
344         else if (t->action & BLK_TC_ACT(BLK_TC_FLUSH))
345                 return handle_trace_flush(td, t, delay, ios, cache);
346         else
347                 return handle_trace_fs(td, t, delay, ios, bs, cache);
348 }
349
350 static void byteswap_trace(struct blk_io_trace *t)
351 {
352         t->magic = fio_swap32(t->magic);
353         t->sequence = fio_swap32(t->sequence);
354         t->time = fio_swap64(t->time);
355         t->sector = fio_swap64(t->sector);
356         t->bytes = fio_swap32(t->bytes);
357         t->action = fio_swap32(t->action);
358         t->pid = fio_swap32(t->pid);
359         t->device = fio_swap32(t->device);
360         t->cpu = fio_swap32(t->cpu);
361         t->error = fio_swap16(t->error);
362         t->pdu_len = fio_swap16(t->pdu_len);
363 }
364
365 static bool t_is_write(struct blk_io_trace *t)
366 {
367         return (t->action & BLK_TC_ACT(BLK_TC_WRITE | BLK_TC_DISCARD)) != 0;
368 }
369
370 static enum fio_ddir t_get_ddir(struct blk_io_trace *t)
371 {
372         if (t->action & BLK_TC_ACT(BLK_TC_READ))
373                 return DDIR_READ;
374         else if (t->action & BLK_TC_ACT(BLK_TC_WRITE))
375                 return DDIR_WRITE;
376         else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
377                 return DDIR_TRIM;
378
379         return DDIR_INVAL;
380 }
381
382 static void depth_inc(struct blk_io_trace *t, int *depth)
383 {
384         enum fio_ddir ddir;
385
386         ddir = t_get_ddir(t);
387         if (ddir != DDIR_INVAL)
388                 depth[ddir]++;
389 }
390
391 static void depth_dec(struct blk_io_trace *t, int *depth)
392 {
393         enum fio_ddir ddir;
394
395         ddir = t_get_ddir(t);
396         if (ddir != DDIR_INVAL)
397                 depth[ddir]--;
398 }
399
400 static void depth_end(struct blk_io_trace *t, int *this_depth, int *depth)
401 {
402         enum fio_ddir ddir = DDIR_INVAL;
403
404         ddir = t_get_ddir(t);
405         if (ddir != DDIR_INVAL) {
406                 depth[ddir] = max(depth[ddir], this_depth[ddir]);
407                 this_depth[ddir] = 0;
408         }
409 }
410
411 /*
412  * Load a blktrace file by reading all the blk_io_trace entries, and storing
413  * them as io_pieces like the fio text version would do.
414  */
415 bool init_blktrace_read(struct thread_data *td, const char *filename, int need_swap)
416 {
417         int old_state;
418
419         td->io_log_rfile = fopen(filename, "rb");
420         if (!td->io_log_rfile) {
421                 td_verror(td, errno, "open blktrace file");
422                 goto err;
423         }
424         td->io_log_blktrace_swap = need_swap;
425         td->io_log_blktrace_last_ttime = 0;
426         td->o.size = 0;
427
428         free_release_files(td);
429
430         old_state = td_bump_runstate(td, TD_SETTING_UP);
431
432         if (!read_blktrace(td)) {
433                 goto err;
434         }
435
436         td_restore_runstate(td, old_state);
437
438         if (!td->files_index) {
439                 log_err("fio: did not find replay device(s)\n");
440                 return false;
441         }
442
443         return true;
444
445 err:
446         if (td->io_log_rfile) {
447                 fclose(td->io_log_rfile);
448                 td->io_log_rfile = NULL;
449         }
450         return false;
451 }
452
453 bool read_blktrace(struct thread_data* td)
454 {
455         struct blk_io_trace t;
456         struct file_cache cache = { };
457         unsigned long ios[DDIR_RWDIR_SYNC_CNT] = { };
458         unsigned long long rw_bs[DDIR_RWDIR_CNT] = { };
459         unsigned long skipped_writes;
460         FILE *f = td->io_log_rfile;
461         int i, max_depth;
462         struct fio_file *fiof;
463         int this_depth[DDIR_RWDIR_CNT] = { };
464         int depth[DDIR_RWDIR_CNT] = { };
465         int64_t items_to_fetch = 0;
466
467         if (td->o.read_iolog_chunked) {
468                 items_to_fetch = iolog_items_to_fetch(td);
469                 if (!items_to_fetch)
470                         return true;
471         }
472
473         skipped_writes = 0;
474         do {
475                 int ret = fread(&t, 1, sizeof(t), f);
476
477                 if (ferror(f)) {
478                         td_verror(td, errno, "read blktrace file");
479                         goto err;
480                 } else if (feof(f)) {
481                         break;
482                 } else if (ret < (int) sizeof(t)) {
483                         log_err("fio: iolog short read\n");
484                         break;
485                 }
486
487                 if (td->io_log_blktrace_swap)
488                         byteswap_trace(&t);
489
490                 if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
491                         log_err("fio: bad magic in blktrace data: %x\n",
492                                                                 t.magic);
493                         goto err;
494                 }
495                 if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
496                         log_err("fio: bad blktrace version %d\n",
497                                                                 t.magic & 0xff);
498                         goto err;
499                 }
500                 ret = discard_pdu(f, &t);
501                 if (ret < 0) {
502                         td_verror(td, -ret, "blktrace lseek");
503                         goto err;
504                 }
505                 if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) {
506                         if ((t.action & 0xffff) == __BLK_TA_QUEUE)
507                                 depth_inc(&t, this_depth);
508                         else if (((t.action & 0xffff) == __BLK_TA_BACKMERGE) ||
509                                 ((t.action & 0xffff) == __BLK_TA_FRONTMERGE))
510                                 depth_dec(&t, this_depth);
511                         else if ((t.action & 0xffff) == __BLK_TA_COMPLETE)
512                                 depth_end(&t, this_depth, depth);
513
514                         if (t_is_write(&t) && read_only) {
515                                 skipped_writes++;
516                                 continue;
517                         }
518                 }
519
520                 if (!queue_trace(td, &t, ios, rw_bs, &cache))
521                         continue;
522
523                 if (td->o.read_iolog_chunked) {
524                         td->io_log_current++;
525                         items_to_fetch--;
526                         if (items_to_fetch == 0)
527                                 break;
528                 }
529         } while (1);
530
531         if (td->o.read_iolog_chunked) {
532                 td->io_log_highmark = td->io_log_current;
533                 td->io_log_checkmark = (td->io_log_highmark + 1) / 2;
534                 fio_gettime(&td->io_log_highmark_time, NULL);
535         }
536
537         if (skipped_writes)
538                 log_err("fio: %s skips replay of %lu writes due to read-only\n",
539                                                 td->o.name, skipped_writes);
540
541         if (td->o.read_iolog_chunked) {
542                 if (td->io_log_current == 0) {
543                         return false;
544                 }
545                 td->o.td_ddir = TD_DDIR_RW;
546                 if ((rw_bs[DDIR_READ] > td->o.max_bs[DDIR_READ] ||
547                      rw_bs[DDIR_WRITE] > td->o.max_bs[DDIR_WRITE] ||
548                      rw_bs[DDIR_TRIM] > td->o.max_bs[DDIR_TRIM]) &&
549                     td->orig_buffer)
550                 {
551                         td->o.max_bs[DDIR_READ] = max(td->o.max_bs[DDIR_READ], rw_bs[DDIR_READ]);
552                         td->o.max_bs[DDIR_WRITE] = max(td->o.max_bs[DDIR_WRITE], rw_bs[DDIR_WRITE]);
553                         td->o.max_bs[DDIR_TRIM] = max(td->o.max_bs[DDIR_TRIM], rw_bs[DDIR_TRIM]);
554                         io_u_quiesce(td);
555                         free_io_mem(td);
556                         init_io_u_buffers(td);
557                 }
558                 return true;
559         }
560
561         for_each_file(td, fiof, i)
562                 trace_add_open_close_event(td, fiof->fileno, FIO_LOG_CLOSE_FILE);
563
564         fclose(td->io_log_rfile);
565         td->io_log_rfile = NULL;
566
567         /*
568          * For stacked devices, we don't always get a COMPLETE event so
569          * the depth grows to insane values. Limit it to something sane(r).
570          */
571         max_depth = 0;
572         for (i = 0; i < DDIR_RWDIR_CNT; i++) {
573                 if (depth[i] > 1024)
574                         depth[i] = 1024;
575                 else if (!depth[i] && ios[i])
576                         depth[i] = 1;
577                 max_depth = max(depth[i], max_depth);
578         }
579
580         if (!ios[DDIR_READ] && !ios[DDIR_WRITE] && !ios[DDIR_TRIM] &&
581             !ios[DDIR_SYNC]) {
582                 log_err("fio: found no ios in blktrace data\n");
583                 return false;
584         }
585
586         td->o.td_ddir = 0;
587         if (ios[DDIR_READ]) {
588                 td->o.td_ddir |= TD_DDIR_READ;
589                 td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
590         }
591         if (ios[DDIR_WRITE]) {
592                 td->o.td_ddir |= TD_DDIR_WRITE;
593                 td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
594         }
595         if (ios[DDIR_TRIM]) {
596                 td->o.td_ddir |= TD_DDIR_TRIM;
597                 td->o.max_bs[DDIR_TRIM] = rw_bs[DDIR_TRIM];
598         }
599
600         /*
601          * If depth wasn't manually set, use probed depth
602          */
603         if (!fio_option_is_set(&td->o, iodepth))
604                 td->o.iodepth = td->o.iodepth_low = max_depth;
605
606         return true;
607 err:
608         fclose(f);
609         return false;
610 }
611
612 static int init_merge_param_list(fio_fp64_t *vals, struct blktrace_cursor *bcs,
613                                  int nr_logs, int def, size_t off)
614 {
615         int i = 0, len = 0;
616
617         while (len < FIO_IO_U_LIST_MAX_LEN && vals[len].u.f != 0.0)
618                 len++;
619
620         if (len && len != nr_logs)
621                 return len;
622
623         for (i = 0; i < nr_logs; i++) {
624                 int *val = (int *)((char *)&bcs[i] + off);
625                 *val = def;
626                 if (len)
627                         *val = (int)vals[i].u.f;
628         }
629
630         return 0;
631
632 }
633
634 static int find_earliest_io(struct blktrace_cursor *bcs, int nr_logs)
635 {
636         __u64 time = ~(__u64)0;
637         int idx = 0, i;
638
639         for (i = 0; i < nr_logs; i++) {
640                 if (bcs[i].t.time < time) {
641                         time = bcs[i].t.time;
642                         idx = i;
643                 }
644         }
645
646         return idx;
647 }
648
649 static void merge_finish_file(struct blktrace_cursor *bcs, int i, int *nr_logs)
650 {
651         bcs[i].iter++;
652         if (bcs[i].iter < bcs[i].nr_iter) {
653                 fseek(bcs[i].f, 0, SEEK_SET);
654                 return;
655         }
656
657         *nr_logs -= 1;
658
659         /* close file */
660         fclose(bcs[i].f);
661
662         /* keep active files contiguous */
663         memmove(&bcs[i], &bcs[*nr_logs], sizeof(bcs[i]));
664 }
665
666 static int read_trace(struct thread_data *td, struct blktrace_cursor *bc)
667 {
668         int ret = 0;
669         struct blk_io_trace *t = &bc->t;
670
671 read_skip:
672         /* read an io trace */
673         ret = fread(&t, 1, sizeof(t), bc->f);
674         if (ferror(bc->f)) {
675                 td_verror(td, errno, "read blktrace file");
676                 return ret;
677         } else if (feof(bc->f)) {
678                 if (!bc->length)
679                         bc->length = bc->t.time;
680                 return ret;
681         } else if (ret < (int) sizeof(*t)) {
682                 log_err("fio: iolog short read\n");
683                 return -1;
684         }
685
686         if (bc->swap)
687                 byteswap_trace(t);
688
689         /* skip over actions that fio does not care about */
690         if ((t->action & 0xffff) != __BLK_TA_QUEUE ||
691             t_get_ddir(t) == DDIR_INVAL) {
692                 ret = discard_pdu(bc->f, t);
693                 if (ret < 0) {
694                         td_verror(td, -ret, "blktrace lseek");
695                         return ret;
696                 }
697                 goto read_skip;
698         }
699
700         t->time = (t->time + bc->iter * bc->length) * bc->scalar / 100;
701
702         return ret;
703 }
704
705 static int write_trace(FILE *fp, struct blk_io_trace *t)
706 {
707         /* pdu is not used so just write out only the io trace */
708         t->pdu_len = 0;
709         return fwrite((void *)t, sizeof(*t), 1, fp);
710 }
711
712 int merge_blktrace_iologs(struct thread_data *td)
713 {
714         int nr_logs = get_max_str_idx(td->o.read_iolog_file);
715         struct blktrace_cursor *bcs = malloc(sizeof(struct blktrace_cursor) *
716                                              nr_logs);
717         struct blktrace_cursor *bc;
718         FILE *merge_fp;
719         char *str, *ptr, *name, *merge_buf;
720         int i, ret;
721
722         ret = init_merge_param_list(td->o.merge_blktrace_scalars, bcs, nr_logs,
723                                     100, offsetof(struct blktrace_cursor,
724                                                   scalar));
725         if (ret) {
726                 log_err("fio: merge_blktrace_scalars(%d) != nr_logs(%d)\n",
727                         ret, nr_logs);
728                 goto err_param;
729         }
730
731         ret = init_merge_param_list(td->o.merge_blktrace_iters, bcs, nr_logs,
732                                     1, offsetof(struct blktrace_cursor,
733                                                 nr_iter));
734         if (ret) {
735                 log_err("fio: merge_blktrace_iters(%d) != nr_logs(%d)\n",
736                         ret, nr_logs);
737                 goto err_param;
738         }
739
740         /* setup output file */
741         merge_fp = fopen(td->o.merge_blktrace_file, "w");
742         merge_buf = malloc(128 * 1024);
743         if (!merge_buf)
744                 goto err_out_file;
745         ret = setvbuf(merge_fp, merge_buf, _IOFBF, 128 * 1024);
746         if (ret)
747                 goto err_merge_buf;
748
749         /* setup input files */
750         str = ptr = strdup(td->o.read_iolog_file);
751         nr_logs = 0;
752         for (i = 0; (name = get_next_str(&ptr)) != NULL; i++) {
753                 bcs[i].f = fopen(name, "rb");
754                 if (!bcs[i].f) {
755                         log_err("fio: could not open file: %s\n", name);
756                         ret = -errno;
757                         free(str);
758                         goto err_file;
759                 }
760                 nr_logs++;
761
762                 if (!is_blktrace(name, &bcs[i].swap)) {
763                         log_err("fio: file is not a blktrace: %s\n", name);
764                         free(str);
765                         goto err_file;
766                 }
767
768                 ret = read_trace(td, &bcs[i]);
769                 if (ret < 0) {
770                         free(str);
771                         goto err_file;
772                 } else if (!ret) {
773                         merge_finish_file(bcs, i, &nr_logs);
774                         i--;
775                 }
776         }
777         free(str);
778
779         /* merge files */
780         while (nr_logs) {
781                 i = find_earliest_io(bcs, nr_logs);
782                 bc = &bcs[i];
783                 /* skip over the pdu */
784                 ret = discard_pdu(bc->f, &bc->t);
785                 if (ret < 0) {
786                         td_verror(td, -ret, "blktrace lseek");
787                         goto err_file;
788                 }
789
790                 ret = write_trace(merge_fp, &bc->t);
791                 ret = read_trace(td, bc);
792                 if (ret < 0)
793                         goto err_file;
794                 else if (!ret)
795                         merge_finish_file(bcs, i, &nr_logs);
796         }
797
798         /* set iolog file to read from the newly merged file */
799         td->o.read_iolog_file = td->o.merge_blktrace_file;
800         ret = 0;
801
802 err_file:
803         /* cleanup */
804         for (i = 0; i < nr_logs; i++) {
805                 fclose(bcs[i].f);
806         }
807 err_merge_buf:
808         free(merge_buf);
809 err_out_file:
810         fflush(merge_fp);
811         fclose(merge_fp);
812 err_param:
813         free(bcs);
814
815         return ret;
816 }