We used to turn off verify completely if norandommap was given, since
fio does not track block rewrites. But we can easily track rewrites,
as long as we don't have to track various extent sizes. So relax
the restriction to only cover norandommap with differing block sizes
and adapt log_io_piece() to always use rbtree inserts for cases where
we do need to check for potential overwrites.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
new random offset without looking at past io history. This
means that some blocks may not be read or written, and that
some blocks may be read/written more than once. This option
new random offset without looking at past io history. This
means that some blocks may not be read or written, and that
some blocks may be read/written more than once. This option
- is mutually exclusive with verify= for that reason, since
- fio doesn't track potential block rewrites which may alter
- the calculated checksum for that block.
+ is mutually exclusive with verify= if and only if multiple
+ blocksizes (via bsrange=) are used, since fio only tracks
+ complete rewrites of blocks.
softrandommap See norandommap. If fio runs with the random block map enabled
and it fails to allocate the map, if this option is set it
softrandommap See norandommap. If fio runs with the random block map enabled
and it fails to allocate the map, if this option is set it
+static int fixed_block_size(struct thread_options *o)
+{
+ return o->min_bs[DDIR_READ] == o->max_bs[DDIR_READ] &&
+ o->min_bs[DDIR_WRITE] == o->max_bs[DDIR_WRITE] &&
+ o->min_bs[DDIR_READ] == o->min_bs[DDIR_WRITE];
+}
+
/*
* Lazy way of fixing up options that depend on each other. We could also
* define option callback handlers, but this is easier.
/*
* Lazy way of fixing up options that depend on each other. We could also
* define option callback handlers, but this is easier.
if (!o->file_size_high)
o->file_size_high = o->file_size_low;
if (!o->file_size_high)
o->file_size_high = o->file_size_low;
- if (o->norandommap && o->verify != VERIFY_NONE) {
- log_err("fio: norandommap given, verify disabled\n");
+ if (o->norandommap && o->verify != VERIFY_NONE
+ && !fixed_block_size(o)) {
+ log_err("fio: norandommap given for variable block sizes, "
+ "verify disabled\n");
o->verify = VERIFY_NONE;
}
if (o->bs_unaligned && (o->odirect || td->io_ops->flags & FIO_RAWIO))
o->verify = VERIFY_NONE;
}
if (o->bs_unaligned && (o->odirect || td->io_ops->flags & FIO_RAWIO))
*
* For both these cases, just reading back data in the order we
* wrote it out is the fastest.
*
* For both these cases, just reading back data in the order we
* wrote it out is the fastest.
+ *
+ * One exception is if we don't have a random map AND we are doing
+ * verifies, in that case we need to check for duplicate blocks and
+ * drop the old one, which we rely on the rb insert/lookup for
+ * handling.
- if (!td_random(td) || !td->o.overwrite) {
+ if ((!td_random(td) || !td->o.overwrite) &&
+ (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) {
INIT_FLIST_HEAD(&ipo->list);
flist_add_tail(&ipo->list, &td->io_hist_list);
return;
}
RB_CLEAR_NODE(&ipo->rb_node);
INIT_FLIST_HEAD(&ipo->list);
flist_add_tail(&ipo->list, &td->io_hist_list);
return;
}
RB_CLEAR_NODE(&ipo->rb_node);
- p = &td->io_hist_tree.rb_node;
- parent = NULL;
/*
* Sort the entry into the verification list
*/
/*
* Sort the entry into the verification list
*/
+restart:
+ p = &td->io_hist_tree.rb_node;
+ parent = NULL;
while (*p) {
parent = *p;
__ipo = rb_entry(parent, struct io_piece, rb_node);
while (*p) {
parent = *p;
__ipo = rb_entry(parent, struct io_piece, rb_node);
- if (ipo->offset <= __ipo->offset)
+ if (ipo->offset < __ipo->offset)
+ else if (ipo->offset > __ipo->offset)
+ else {
+ assert(ipo->len == __ipo->len);
+ rb_erase(parent, &td->io_hist_tree);
+ goto restart;
+ }
}
rb_link_node(&ipo->rb_node, parent, p);
}
rb_link_node(&ipo->rb_node, parent, p);