*/
#include <stdio.h>
#include <stdlib.h>
-#include <sys/ioctl.h>
-#include <linux/fs.h>
+#include <unistd.h>
+#include <errno.h>
#include "flist.h"
#include "fio.h"
+#include "iolog.h"
#include "blktrace.h"
#include "blktrace_api.h"
#include "oslib/linux-dev-lookup.h"
-#define TRACE_FIFO_SIZE 8192
-
-/*
- * fifo refill frontend, to avoid reading data in trace sized bites
- */
-static int refill_fifo(struct thread_data *td, struct fifo *fifo, int fd)
-{
- char buf[TRACE_FIFO_SIZE];
- unsigned int total;
- int ret;
-
- total = sizeof(buf);
- if (total > fifo_room(fifo))
- total = fifo_room(fifo);
-
- ret = read(fd, buf, total);
- if (ret < 0) {
- td_verror(td, errno, "read blktrace file");
- return -1;
- }
-
- if (ret > 0)
- ret = fifo_put(fifo, buf, ret);
-
- dprint(FD_BLKTRACE, "refill: filled %d bytes\n", ret);
- return ret;
-}
-
-/*
- * Retrieve 'len' bytes from the fifo, refilling if necessary.
- */
-static int trace_fifo_get(struct thread_data *td, struct fifo *fifo, int fd,
- void *buf, unsigned int len)
-{
- if (fifo_len(fifo) < len) {
- int ret = refill_fifo(td, fifo, fd);
-
- if (ret < 0)
- return ret;
- }
-
- return fifo_get(fifo, buf, len);
-}
+struct file_cache {
+ unsigned int maj;
+ unsigned int min;
+ unsigned int fileno;
+};
/*
* Just discard the pdu by seeking past it.
*/
-static int discard_pdu(struct thread_data *td, struct fifo *fifo, int fd,
- struct blk_io_trace *t)
+static int discard_pdu(FILE* f, struct blk_io_trace *t)
{
if (t->pdu_len == 0)
return 0;
dprint(FD_BLKTRACE, "discard pdu len %u\n", t->pdu_len);
- return trace_fifo_get(td, fifo, fd, NULL, t->pdu_len);
+ if (fseek(f, t->pdu_len, SEEK_CUR) < 0)
+ return -errno;
+
+ return t->pdu_len;
}
/*
flist_add_tail(&ipo->list, &td->io_log_list);
}
-static int get_dev_blocksize(const char *dev, unsigned int *bs)
-{
- int fd;
-
- fd = open(dev, O_RDONLY);
- if (fd < 0)
- return 1;
-
- if (ioctl(fd, BLKSSZGET, bs) < 0) {
- close(fd);
- return 1;
- }
-
- close(fd);
- return 0;
-}
-
static int trace_add_file(struct thread_data *td, __u32 device,
- unsigned int *bs)
+ struct file_cache *cache)
{
- static unsigned int last_maj, last_min, last_fileno, last_bs;
unsigned int maj = FMAJOR(device);
unsigned int min = FMINOR(device);
struct fio_file *f;
- unsigned int i;
char dev[256];
+ unsigned int i;
- if (last_maj == maj && last_min == min) {
- *bs = last_bs;
- return last_fileno;
- }
+ if (cache->maj == maj && cache->min == min)
+ return cache->fileno;
- last_maj = maj;
- last_min = min;
+ cache->maj = maj;
+ cache->min = min;
/*
* check for this file in our list
*/
- for_each_file(td, f, i) {
+ for_each_file(td, f, i)
if (f->major == maj && f->minor == min) {
- last_fileno = f->fileno;
- last_bs = f->bs;
- goto out;
+ cache->fileno = f->fileno;
+ return cache->fileno;
}
- }
strcpy(dev, "/dev");
if (blktrace_lookup_device(td->o.replay_redirect, dev, maj, min)) {
- unsigned int this_bs;
int fileno;
if (td->o.replay_redirect)
dprint(FD_BLKTRACE, "add devices %s\n", dev);
fileno = add_file_exclusive(td, dev);
-
- if (get_dev_blocksize(dev, &this_bs))
- this_bs = 512;
-
td->o.open_files++;
td->files[fileno]->major = maj;
td->files[fileno]->minor = min;
- td->files[fileno]->bs = this_bs;
trace_add_open_close_event(td, fileno, FIO_LOG_OPEN_FILE);
-
- last_fileno = fileno;
- last_bs = this_bs;
+ cache->fileno = fileno;
}
-out:
- *bs = last_bs;
- return last_fileno;
+ return cache->fileno;
}
static void t_bytes_align(struct thread_options *o, struct blk_io_trace *t)
*/
static void store_ipo(struct thread_data *td, unsigned long long offset,
unsigned int bytes, int rw, unsigned long long ttime,
- int fileno, unsigned int bs)
+ int fileno)
{
struct io_piece *ipo;
ipo = calloc(1, sizeof(*ipo));
init_ipo(ipo);
- ipo->offset = offset * bs;
+ ipo->offset = offset * 512;
if (td->o.replay_scale)
ipo->offset = ipo->offset / td->o.replay_scale;
ipo_bytes_align(td->o.replay_align, ipo);
queue_io_piece(td, ipo);
}
-static void handle_trace_notify(struct blk_io_trace *t)
+static bool handle_trace_notify(struct blk_io_trace *t)
{
switch (t->action) {
case BLK_TN_PROCESS:
dprint(FD_BLKTRACE, "unknown trace act %x\n", t->action);
break;
}
+ return false;
}
-static void handle_trace_discard(struct thread_data *td,
+static bool handle_trace_discard(struct thread_data *td,
struct blk_io_trace *t,
unsigned long long ttime,
- unsigned long *ios, unsigned int *rw_bs)
+ unsigned long *ios, unsigned long long *bs,
+ struct file_cache *cache)
{
struct io_piece *ipo;
- unsigned int bs;
int fileno;
if (td->o.replay_skip & (1u << DDIR_TRIM))
- return;
+ return false;
ipo = calloc(1, sizeof(*ipo));
init_ipo(ipo);
- fileno = trace_add_file(td, t->device, &bs);
+ fileno = trace_add_file(td, t->device, cache);
ios[DDIR_TRIM]++;
- if (t->bytes > rw_bs[DDIR_TRIM])
- rw_bs[DDIR_TRIM] = t->bytes;
+ if (t->bytes > bs[DDIR_TRIM])
+ bs[DDIR_TRIM] = t->bytes;
td->o.size += t->bytes;
INIT_FLIST_HEAD(&ipo->list);
- ipo->offset = t->sector * bs;
+ ipo->offset = t->sector * 512;
if (td->o.replay_scale)
ipo->offset = ipo->offset / td->o.replay_scale;
ipo_bytes_align(td->o.replay_align, ipo);
ipo->offset, ipo->len,
ipo->delay);
queue_io_piece(td, ipo);
+ return true;
}
static void dump_trace(struct blk_io_trace *t)
log_err("blktrace: ignoring zero byte trace: action=%x\n", t->action);
}
-static void handle_trace_fs(struct thread_data *td, struct blk_io_trace *t,
+static bool handle_trace_fs(struct thread_data *td, struct blk_io_trace *t,
unsigned long long ttime, unsigned long *ios,
- unsigned int *rw_bs)
+ unsigned long long *bs, struct file_cache *cache)
{
- unsigned int bs;
int rw;
int fileno;
- fileno = trace_add_file(td, t->device, &bs);
+ fileno = trace_add_file(td, t->device, cache);
rw = (t->action & BLK_TC_ACT(BLK_TC_WRITE)) != 0;
if (rw) {
if (td->o.replay_skip & (1u << DDIR_WRITE))
- return;
+ return false;
} else {
if (td->o.replay_skip & (1u << DDIR_READ))
- return;
+ return false;
}
if (!t->bytes) {
if (!fio_did_warn(FIO_WARN_BTRACE_ZERO))
dump_trace(t);
- return;
+ return false;
}
- if (t->bytes > rw_bs[rw])
- rw_bs[rw] = t->bytes;
+ if (t->bytes > bs[rw])
+ bs[rw] = t->bytes;
ios[rw]++;
td->o.size += t->bytes;
- store_ipo(td, t->sector, t->bytes, rw, ttime, fileno, bs);
+ store_ipo(td, t->sector, t->bytes, rw, ttime, fileno);
+ return true;
}
-static void handle_trace_flush(struct thread_data *td, struct blk_io_trace *t,
- unsigned long long ttime, unsigned long *ios)
+static bool handle_trace_flush(struct thread_data *td, struct blk_io_trace *t,
+ unsigned long long ttime, unsigned long *ios,
+ struct file_cache *cache)
{
struct io_piece *ipo;
- unsigned int bs;
int fileno;
if (td->o.replay_skip & (1u << DDIR_SYNC))
- return;
+ return false;
ipo = calloc(1, sizeof(*ipo));
init_ipo(ipo);
- fileno = trace_add_file(td, t->device, &bs);
+ fileno = trace_add_file(td, t->device, cache);
ipo->delay = ttime / 1000;
ipo->ddir = DDIR_SYNC;
ios[DDIR_SYNC]++;
dprint(FD_BLKTRACE, "store flush delay=%lu\n", ipo->delay);
queue_io_piece(td, ipo);
+ return true;
}
/*
* We only care for queue traces, most of the others are side effects
* due to internal workings of the block layer.
*/
-static void handle_trace(struct thread_data *td, struct blk_io_trace *t,
- unsigned long *ios, unsigned int *bs)
+static bool queue_trace(struct thread_data *td, struct blk_io_trace *t,
+ unsigned long *ios, unsigned long long *bs,
+ struct file_cache *cache)
{
- static unsigned long long last_ttime;
+ unsigned long long *last_ttime = &td->io_log_blktrace_last_ttime;
unsigned long long delay = 0;
if ((t->action & 0xffff) != __BLK_TA_QUEUE)
- return;
+ return false;
if (!(t->action & BLK_TC_ACT(BLK_TC_NOTIFY))) {
- if (!last_ttime || td->o.no_stall)
+ if (!*last_ttime || td->o.no_stall || t->time < *last_ttime)
delay = 0;
else if (td->o.replay_time_scale == 100)
- delay = t->time - last_ttime;
+ delay = t->time - *last_ttime;
else {
- double tmp = t->time - last_ttime;
+ double tmp = t->time - *last_ttime;
double scale;
scale = (double) 100.0 / (double) td->o.replay_time_scale;
tmp *= scale;
delay = tmp;
}
- last_ttime = t->time;
+ *last_ttime = t->time;
}
t_bytes_align(&td->o, t);
if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY))
- handle_trace_notify(t);
+ return handle_trace_notify(t);
else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
- handle_trace_discard(td, t, delay, ios, bs);
+ return handle_trace_discard(td, t, delay, ios, bs, cache);
else if (t->action & BLK_TC_ACT(BLK_TC_FLUSH))
- handle_trace_flush(td, t, delay, ios);
+ return handle_trace_flush(td, t, delay, ios, cache);
else
- handle_trace_fs(td, t, delay, ios, bs);
+ return handle_trace_fs(td, t, delay, ios, bs, cache);
}
static void byteswap_trace(struct blk_io_trace *t)
* Load a blktrace file by reading all the blk_io_trace entries, and storing
* them as io_pieces like the fio text version would do.
*/
-bool load_blktrace(struct thread_data *td, const char *filename, int need_swap)
+bool init_blktrace_read(struct thread_data *td, const char *filename, int need_swap)
+{
+ int old_state;
+
+ td->io_log_rfile = fopen(filename, "rb");
+ if (!td->io_log_rfile) {
+ td_verror(td, errno, "open blktrace file");
+ goto err;
+ }
+ td->io_log_blktrace_swap = need_swap;
+ td->io_log_blktrace_last_ttime = 0;
+ td->o.size = 0;
+
+ free_release_files(td);
+
+ old_state = td_bump_runstate(td, TD_SETTING_UP);
+
+ if (!read_blktrace(td)) {
+ goto err;
+ }
+
+ td_restore_runstate(td, old_state);
+
+ if (!td->files_index) {
+ log_err("fio: did not find replay device(s)\n");
+ return false;
+ }
+
+ return true;
+
+err:
+ if (td->io_log_rfile) {
+ fclose(td->io_log_rfile);
+ td->io_log_rfile = NULL;
+ }
+ return false;
+}
+
+bool read_blktrace(struct thread_data* td)
{
struct blk_io_trace t;
+ struct file_cache cache = { };
unsigned long ios[DDIR_RWDIR_SYNC_CNT] = { };
- unsigned int rw_bs[DDIR_RWDIR_CNT] = { };
+ unsigned long long rw_bs[DDIR_RWDIR_CNT] = { };
unsigned long skipped_writes;
- struct fifo *fifo;
- int fd, i, old_state, max_depth;
- struct fio_file *f;
+ FILE *f = td->io_log_rfile;
+ int i, max_depth;
+ struct fio_file *fiof;
int this_depth[DDIR_RWDIR_CNT] = { };
int depth[DDIR_RWDIR_CNT] = { };
+ int64_t items_to_fetch = 0;
- fd = open(filename, O_RDONLY);
- if (fd < 0) {
- td_verror(td, errno, "open blktrace file");
- return false;
+ if (td->o.read_iolog_chunked) {
+ items_to_fetch = iolog_items_to_fetch(td);
+ if (!items_to_fetch)
+ return true;
}
- fifo = fifo_alloc(TRACE_FIFO_SIZE);
-
- old_state = td_bump_runstate(td, TD_SETTING_UP);
-
- td->o.size = 0;
skipped_writes = 0;
do {
- int ret = trace_fifo_get(td, fifo, fd, &t, sizeof(t));
+ int ret = fread(&t, 1, sizeof(t), f);
- if (ret < 0)
+ if (ferror(f)) {
+ td_verror(td, errno, "read blktrace file");
goto err;
- else if (!ret)
+ } else if (feof(f)) {
break;
- else if (ret < (int) sizeof(t)) {
- log_err("fio: short fifo get\n");
+ } else if (ret < (int) sizeof(t)) {
+ log_err("fio: iolog short read\n");
break;
}
- if (need_swap)
+ if (td->io_log_blktrace_swap)
byteswap_trace(&t);
if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
t.magic & 0xff);
goto err;
}
- ret = discard_pdu(td, fifo, fd, &t);
+ ret = discard_pdu(f, &t);
if (ret < 0) {
- td_verror(td, ret, "blktrace lseek");
- goto err;
- } else if (t.pdu_len != ret) {
- log_err("fio: discarded %d of %d\n", ret, t.pdu_len);
+ td_verror(td, -ret, "blktrace lseek");
goto err;
}
if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) {
}
}
- handle_trace(td, &t, ios, rw_bs);
- } while (1);
+ if (!queue_trace(td, &t, ios, rw_bs, &cache))
+ continue;
- for_each_file(td, f, i)
- trace_add_open_close_event(td, f->fileno, FIO_LOG_CLOSE_FILE);
+ if (td->o.read_iolog_chunked) {
+ td->io_log_current++;
+ items_to_fetch--;
+ if (items_to_fetch == 0)
+ break;
+ }
+ } while (1);
- fifo_free(fifo);
- close(fd);
+ if (td->o.read_iolog_chunked) {
+ td->io_log_highmark = td->io_log_current;
+ td->io_log_checkmark = (td->io_log_highmark + 1) / 2;
+ fio_gettime(&td->io_log_highmark_time, NULL);
+ }
- td_restore_runstate(td, old_state);
+ if (skipped_writes)
+ log_err("fio: %s skips replay of %lu writes due to read-only\n",
+ td->o.name, skipped_writes);
- if (!td->files_index) {
- log_err("fio: did not find replay device(s)\n");
- return false;
+ if (td->o.read_iolog_chunked) {
+ if (td->io_log_current == 0) {
+ return false;
+ }
+ td->o.td_ddir = TD_DDIR_RW;
+ if ((rw_bs[DDIR_READ] > td->o.max_bs[DDIR_READ] ||
+ rw_bs[DDIR_WRITE] > td->o.max_bs[DDIR_WRITE] ||
+ rw_bs[DDIR_TRIM] > td->o.max_bs[DDIR_TRIM]) &&
+ td->orig_buffer)
+ {
+ td->o.max_bs[DDIR_READ] = max(td->o.max_bs[DDIR_READ], rw_bs[DDIR_READ]);
+ td->o.max_bs[DDIR_WRITE] = max(td->o.max_bs[DDIR_WRITE], rw_bs[DDIR_WRITE]);
+ td->o.max_bs[DDIR_TRIM] = max(td->o.max_bs[DDIR_TRIM], rw_bs[DDIR_TRIM]);
+ io_u_quiesce(td);
+ free_io_mem(td);
+ init_io_u_buffers(td);
+ }
+ return true;
}
+ for_each_file(td, fiof, i)
+ trace_add_open_close_event(td, fiof->fileno, FIO_LOG_CLOSE_FILE);
+
+ fclose(td->io_log_rfile);
+ td->io_log_rfile = NULL;
+
/*
* For stacked devices, we don't always get a COMPLETE event so
* the depth grows to insane values. Limit it to something sane(r).
max_depth = max(depth[i], max_depth);
}
- if (skipped_writes)
- log_err("fio: %s skips replay of %lu writes due to read-only\n",
- td->o.name, skipped_writes);
-
if (!ios[DDIR_READ] && !ios[DDIR_WRITE] && !ios[DDIR_TRIM] &&
!ios[DDIR_SYNC]) {
log_err("fio: found no ios in blktrace data\n");
return false;
- } else if (ios[DDIR_READ] && !ios[DDIR_WRITE]) {
- td->o.td_ddir = TD_DDIR_READ;
- td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
- } else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) {
- td->o.td_ddir = TD_DDIR_WRITE;
- td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
- } else {
- td->o.td_ddir = TD_DDIR_RW;
+ }
+
+ td->o.td_ddir = 0;
+ if (ios[DDIR_READ]) {
+ td->o.td_ddir |= TD_DDIR_READ;
td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
+ }
+ if (ios[DDIR_WRITE]) {
+ td->o.td_ddir |= TD_DDIR_WRITE;
td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
+ }
+ if (ios[DDIR_TRIM]) {
+ td->o.td_ddir |= TD_DDIR_TRIM;
td->o.max_bs[DDIR_TRIM] = rw_bs[DDIR_TRIM];
}
- /*
- * We need to do direct/raw ios to the device, to avoid getting
- * read-ahead in our way. But only do so if the minimum block size
- * is a multiple of 4k, otherwise we don't know if it's safe to do so.
- */
- if (!fio_option_is_set(&td->o, odirect) && !(td_min_bs(td) & 4095))
- td->o.odirect = 1;
-
/*
* If depth wasn't manually set, use probed depth
*/
return true;
err:
- close(fd);
- fifo_free(fifo);
+ fclose(f);
return false;
}
+
+static int init_merge_param_list(fio_fp64_t *vals, struct blktrace_cursor *bcs,
+ int nr_logs, int def, size_t off)
+{
+ int i = 0, len = 0;
+
+ while (len < FIO_IO_U_LIST_MAX_LEN && vals[len].u.f != 0.0)
+ len++;
+
+ if (len && len != nr_logs)
+ return len;
+
+ for (i = 0; i < nr_logs; i++) {
+ int *val = (int *)((char *)&bcs[i] + off);
+ *val = def;
+ if (len)
+ *val = (int)vals[i].u.f;
+ }
+
+ return 0;
+
+}
+
+static int find_earliest_io(struct blktrace_cursor *bcs, int nr_logs)
+{
+ __u64 time = ~(__u64)0;
+ int idx = 0, i;
+
+ for (i = 0; i < nr_logs; i++) {
+ if (bcs[i].t.time < time) {
+ time = bcs[i].t.time;
+ idx = i;
+ }
+ }
+
+ return idx;
+}
+
+static void merge_finish_file(struct blktrace_cursor *bcs, int i, int *nr_logs)
+{
+ bcs[i].iter++;
+ if (bcs[i].iter < bcs[i].nr_iter) {
+ fseek(bcs[i].f, 0, SEEK_SET);
+ return;
+ }
+
+ *nr_logs -= 1;
+
+ /* close file */
+ fclose(bcs[i].f);
+
+ /* keep active files contiguous */
+ memmove(&bcs[i], &bcs[*nr_logs], sizeof(bcs[i]));
+}
+
+static int read_trace(struct thread_data *td, struct blktrace_cursor *bc)
+{
+ int ret = 0;
+ struct blk_io_trace *t = &bc->t;
+
+read_skip:
+ /* read an io trace */
+ ret = fread(&t, 1, sizeof(t), bc->f);
+ if (ferror(bc->f)) {
+ td_verror(td, errno, "read blktrace file");
+ return ret;
+ } else if (feof(bc->f)) {
+ if (!bc->length)
+ bc->length = bc->t.time;
+ return ret;
+ } else if (ret < (int) sizeof(*t)) {
+ log_err("fio: iolog short read\n");
+ return -1;
+ }
+
+ if (bc->swap)
+ byteswap_trace(t);
+
+ /* skip over actions that fio does not care about */
+ if ((t->action & 0xffff) != __BLK_TA_QUEUE ||
+ t_get_ddir(t) == DDIR_INVAL) {
+ ret = discard_pdu(bc->f, t);
+ if (ret < 0) {
+ td_verror(td, -ret, "blktrace lseek");
+ return ret;
+ }
+ goto read_skip;
+ }
+
+ t->time = (t->time + bc->iter * bc->length) * bc->scalar / 100;
+
+ return ret;
+}
+
+static int write_trace(FILE *fp, struct blk_io_trace *t)
+{
+ /* pdu is not used so just write out only the io trace */
+ t->pdu_len = 0;
+ return fwrite((void *)t, sizeof(*t), 1, fp);
+}
+
+int merge_blktrace_iologs(struct thread_data *td)
+{
+ int nr_logs = get_max_str_idx(td->o.read_iolog_file);
+ struct blktrace_cursor *bcs = malloc(sizeof(struct blktrace_cursor) *
+ nr_logs);
+ struct blktrace_cursor *bc;
+ FILE *merge_fp;
+ char *str, *ptr, *name, *merge_buf;
+ int i, ret;
+
+ ret = init_merge_param_list(td->o.merge_blktrace_scalars, bcs, nr_logs,
+ 100, offsetof(struct blktrace_cursor,
+ scalar));
+ if (ret) {
+ log_err("fio: merge_blktrace_scalars(%d) != nr_logs(%d)\n",
+ ret, nr_logs);
+ goto err_param;
+ }
+
+ ret = init_merge_param_list(td->o.merge_blktrace_iters, bcs, nr_logs,
+ 1, offsetof(struct blktrace_cursor,
+ nr_iter));
+ if (ret) {
+ log_err("fio: merge_blktrace_iters(%d) != nr_logs(%d)\n",
+ ret, nr_logs);
+ goto err_param;
+ }
+
+ /* setup output file */
+ merge_fp = fopen(td->o.merge_blktrace_file, "w");
+ merge_buf = malloc(128 * 1024);
+ if (!merge_buf)
+ goto err_out_file;
+ ret = setvbuf(merge_fp, merge_buf, _IOFBF, 128 * 1024);
+ if (ret)
+ goto err_merge_buf;
+
+ /* setup input files */
+ str = ptr = strdup(td->o.read_iolog_file);
+ nr_logs = 0;
+ for (i = 0; (name = get_next_str(&ptr)) != NULL; i++) {
+ bcs[i].f = fopen(name, "rb");
+ if (!bcs[i].f) {
+ log_err("fio: could not open file: %s\n", name);
+ ret = -errno;
+ free(str);
+ goto err_file;
+ }
+ nr_logs++;
+
+ if (!is_blktrace(name, &bcs[i].swap)) {
+ log_err("fio: file is not a blktrace: %s\n", name);
+ free(str);
+ goto err_file;
+ }
+
+ ret = read_trace(td, &bcs[i]);
+ if (ret < 0) {
+ free(str);
+ goto err_file;
+ } else if (!ret) {
+ merge_finish_file(bcs, i, &nr_logs);
+ i--;
+ }
+ }
+ free(str);
+
+ /* merge files */
+ while (nr_logs) {
+ i = find_earliest_io(bcs, nr_logs);
+ bc = &bcs[i];
+ /* skip over the pdu */
+ ret = discard_pdu(bc->f, &bc->t);
+ if (ret < 0) {
+ td_verror(td, -ret, "blktrace lseek");
+ goto err_file;
+ }
+
+ ret = write_trace(merge_fp, &bc->t);
+ ret = read_trace(td, bc);
+ if (ret < 0)
+ goto err_file;
+ else if (!ret)
+ merge_finish_file(bcs, i, &nr_logs);
+ }
+
+ /* set iolog file to read from the newly merged file */
+ td->o.read_iolog_file = td->o.merge_blktrace_file;
+ ret = 0;
+
+err_file:
+ /* cleanup */
+ for (i = 0; i < nr_logs; i++) {
+ fclose(bcs[i].f);
+ }
+err_merge_buf:
+ free(merge_buf);
+err_out_file:
+ fflush(merge_fp);
+ fclose(merge_fp);
+err_param:
+ free(bcs);
+
+ return ret;
+}