summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--HOWTO6
-rw-r--r--init.c13
-rw-r--r--log.c22
3 files changed, 31 insertions, 10 deletions
diff --git a/HOWTO b/HOWTO
index f256f24f..4ff2d929 100644
--- a/HOWTO
+++ b/HOWTO
@@ -532,9 +532,9 @@ norandommap Normally fio will cover every block of the file when doing
new random offset without looking at past io history. This
means that some blocks may not be read or written, and that
some blocks may be read/written more than once. This option
- is mutually exclusive with verify= for that reason, since
- fio doesn't track potential block rewrites which may alter
- the calculated checksum for that block.
+ is mutually exclusive with verify= if and only if multiple
+ blocksizes (via bsrange=) are used, since fio only tracks
+ complete rewrites of blocks.
softrandommap See norandommap. If fio runs with the random block map enabled
and it fails to allocate the map, if this option is set it
diff --git a/init.c b/init.c
index 95c282ac..001e5c45 100644
--- a/init.c
+++ b/init.c
@@ -206,6 +206,13 @@ static int setup_rate(struct thread_data *td)
return 0;
}
+static int fixed_block_size(struct thread_options *o)
+{
+ return o->min_bs[DDIR_READ] == o->max_bs[DDIR_READ] &&
+ o->min_bs[DDIR_WRITE] == o->max_bs[DDIR_WRITE] &&
+ o->min_bs[DDIR_READ] == o->min_bs[DDIR_WRITE];
+}
+
/*
* Lazy way of fixing up options that depend on each other. We could also
* define option callback handlers, but this is easier.
@@ -269,8 +276,10 @@ static int fixup_options(struct thread_data *td)
if (!o->file_size_high)
o->file_size_high = o->file_size_low;
- if (o->norandommap && o->verify != VERIFY_NONE) {
- log_err("fio: norandommap given, verify disabled\n");
+ if (o->norandommap && o->verify != VERIFY_NONE
+ && !fixed_block_size(o)) {
+ log_err("fio: norandommap given for variable block sizes, "
+ "verify disabled\n");
o->verify = VERIFY_NONE;
}
if (o->bs_unaligned && (o->odirect || td->io_ops->flags & FIO_RAWIO))
diff --git a/log.c b/log.c
index 01e4ad0b..6604c1c8 100644
--- a/log.c
+++ b/log.c
@@ -183,28 +183,40 @@ void log_io_piece(struct thread_data *td, struct io_u *io_u)
*
* For both these cases, just reading back data in the order we
* wrote it out is the fastest.
+ *
+ * One exception is if we don't have a random map AND we are doing
+ * verifies, in that case we need to check for duplicate blocks and
+ * drop the old one, which we rely on the rb insert/lookup for
+ * handling.
*/
- if (!td_random(td) || !td->o.overwrite) {
+ if ((!td_random(td) || !td->o.overwrite) &&
+ (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) {
INIT_FLIST_HEAD(&ipo->list);
flist_add_tail(&ipo->list, &td->io_hist_list);
return;
}
RB_CLEAR_NODE(&ipo->rb_node);
- p = &td->io_hist_tree.rb_node;
- parent = NULL;
/*
* Sort the entry into the verification list
*/
+restart:
+ p = &td->io_hist_tree.rb_node;
+ parent = NULL;
while (*p) {
parent = *p;
__ipo = rb_entry(parent, struct io_piece, rb_node);
- if (ipo->offset <= __ipo->offset)
+ if (ipo->offset < __ipo->offset)
p = &(*p)->rb_left;
- else
+ else if (ipo->offset > __ipo->offset)
p = &(*p)->rb_right;
+ else {
+ assert(ipo->len == __ipo->len);
+ rb_erase(parent, &td->io_hist_tree);
+ goto restart;
+ }
}
rb_link_node(&ipo->rb_node, parent, p);