independent fio invocations. Unfortuantely this also breaks
the strict time ordering between multiple device accesses.
+replay_align=int Force alignment of IO offsets and lengths in a trace
+ to this power of 2 value.
+
+replay_scale=int Scale sector offsets down by this factor when
+ replaying traces.
+
write_bw_log=str If given, write a bandwidth log of the jobs in this job
file. Can be used to store data of the bandwidth of the
jobs in their lifetime. The included fio_generate_plots
return last_fileno;
}
+static void t_bytes_align(struct thread_options *o, struct blk_io_trace *t)
+{
+ if (!o->replay_align)
+ return;
+
+ t->bytes = (t->bytes + o->replay_align - 1) & ~(o->replay_align - 1);
+}
+
+static void ipo_bytes_align(struct thread_options *o, struct io_piece *ipo)
+{
+ if (!o->replay_align)
+ return;
+
+ ipo->offset &= ~(o->replay_align - 1);
+}
+
+
/*
* Store blk_io_trace data in an ipo for later retrieval.
*/
init_ipo(ipo);
ipo->offset = offset * bs;
+ if (td->o.replay_scale)
+ ipo->offset = ipo->offset / td->o.replay_scale;
+ ipo_bytes_align(&td->o, ipo);
ipo->len = bytes;
ipo->delay = ttime / 1000;
if (rw)
INIT_FLIST_HEAD(&ipo->list);
ipo->offset = t->sector * bs;
+ if (td->o.replay_scale)
+ ipo->offset = ipo->offset / td->o.replay_scale;
+ ipo_bytes_align(&td->o, ipo);
ipo->len = t->bytes;
ipo->delay = ttime / 1000;
ipo->ddir = DDIR_TRIM;
unsigned long *ios, unsigned int *bs)
{
static unsigned long long last_ttime;
- unsigned long long delay;
+ unsigned long long delay = 0;
if ((t->action & 0xffff) != __BLK_TA_QUEUE)
return;
}
}
+ t_bytes_align(&td->o, t);
+
if (t->action & BLK_TC_ACT(BLK_TC_NOTIFY))
handle_trace_notify(t);
else if (t->action & BLK_TC_ACT(BLK_TC_DISCARD))
o->compress_percentage = le32_to_cpu(top->compress_percentage);
o->compress_chunk = le32_to_cpu(top->compress_chunk);
o->dedupe_percentage = le32_to_cpu(top->dedupe_percentage);
+ o->replay_align = le32_to_cpu(top->replay_align);
+ o->replay_scale = le32_to_cpu(top->replay_scale);
o->trim_backlog = le64_to_cpu(top->trim_backlog);
top->compress_percentage = cpu_to_le32(o->compress_percentage);
top->compress_chunk = cpu_to_le32(o->compress_chunk);
top->dedupe_percentage = cpu_to_le32(o->dedupe_percentage);
+ top->replay_align = cpu_to_le32(o->replay_align);
+ top->replay_scale = cpu_to_le32(o->replay_scale);
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
top->bs[i] = cpu_to_le32(o->bs[i]);
from. Setting \fBreplay_redirect\fR causes all IOPS to be replayed onto the
single specified device regardless of the device it was recorded from.
.TP
+.BI replay_align \fR=\fPint
+Force alignment of IO offsets and lengths in a trace to this power of 2 value.
+.TP
+.BI replay_scale \fR=\fPint
+Scale sector offsets down by this factor when replaying traces.
+.TP
.BI write_bw_log \fR=\fPstr
If given, write a bandwidth log of the jobs in this job file. Can be used to
store data of the bandwidth of the jobs in their lifetime. The included
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IOLOG,
},
+ {
+ .name = "replay_scale",
+ .lname = "Replace offset scale factor",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(replay_scale),
+ .parent = "read_iolog",
+ .def = "1",
+ .help = "Align offsets to this blocksize",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
+ },
+ {
+ .name = "replay_align",
+ .lname = "Replace alignment",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(replay_align),
+ .parent = "read_iolog",
+ .help = "Scale offset down by this factor",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
+ .pow2 = 1,
+ },
{
.name = "exec_prerun",
.lname = "Pre-execute runnable",
unsigned block_error_hist;
unsigned int skip_bad;
+
+ unsigned int replay_align;
+ unsigned int replay_scale;
};
#define FIO_TOP_STR_MAX 256
uint32_t block_error_hist;
uint32_t skip_bad;
+
+ uint32_t replay_align;
+ uint32_t replay_scale;
} __attribute__((packed));
extern void convert_thread_options_to_cpu(struct thread_options *o, struct thread_options_pack *top);