From 8347239af0a361e160293100bfc053f88bbbf737 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 19 Feb 2009 21:32:12 +0100 Subject: [PATCH] Allow 'norandommap' with verify We used to turn off verify completely if norandommap was given, since fio does not track block rewrites. But we can easily track rewrites, as long as we don't have to track various extent sizes. So relax the restriction to only cover norandommap with differing block sizes and adapt log_io_piece() to always use rbtree inserts for cases where we do need to check for potential overwrites. Signed-off-by: Jens Axboe --- HOWTO | 6 +++--- init.c | 13 +++++++++++-- log.c | 22 +++++++++++++++++----- 3 files changed, 31 insertions(+), 10 deletions(-) diff --git a/HOWTO b/HOWTO index f256f24f..4ff2d929 100644 --- a/HOWTO +++ b/HOWTO @@ -532,9 +532,9 @@ norandommap Normally fio will cover every block of the file when doing new random offset without looking at past io history. This means that some blocks may not be read or written, and that some blocks may be read/written more than once. This option - is mutually exclusive with verify= for that reason, since - fio doesn't track potential block rewrites which may alter - the calculated checksum for that block. + is mutually exclusive with verify= if and only if multiple + blocksizes (via bsrange=) are used, since fio only tracks + complete rewrites of blocks. softrandommap See norandommap. If fio runs with the random block map enabled and it fails to allocate the map, if this option is set it diff --git a/init.c b/init.c index 95c282ac..001e5c45 100644 --- a/init.c +++ b/init.c @@ -206,6 +206,13 @@ static int setup_rate(struct thread_data *td) return 0; } +static int fixed_block_size(struct thread_options *o) +{ + return o->min_bs[DDIR_READ] == o->max_bs[DDIR_READ] && + o->min_bs[DDIR_WRITE] == o->max_bs[DDIR_WRITE] && + o->min_bs[DDIR_READ] == o->min_bs[DDIR_WRITE]; +} + /* * Lazy way of fixing up options that depend on each other. We could also * define option callback handlers, but this is easier. @@ -269,8 +276,10 @@ static int fixup_options(struct thread_data *td) if (!o->file_size_high) o->file_size_high = o->file_size_low; - if (o->norandommap && o->verify != VERIFY_NONE) { - log_err("fio: norandommap given, verify disabled\n"); + if (o->norandommap && o->verify != VERIFY_NONE + && !fixed_block_size(o)) { + log_err("fio: norandommap given for variable block sizes, " + "verify disabled\n"); o->verify = VERIFY_NONE; } if (o->bs_unaligned && (o->odirect || td->io_ops->flags & FIO_RAWIO)) diff --git a/log.c b/log.c index 01e4ad0b..6604c1c8 100644 --- a/log.c +++ b/log.c @@ -183,28 +183,40 @@ void log_io_piece(struct thread_data *td, struct io_u *io_u) * * For both these cases, just reading back data in the order we * wrote it out is the fastest. + * + * One exception is if we don't have a random map AND we are doing + * verifies, in that case we need to check for duplicate blocks and + * drop the old one, which we rely on the rb insert/lookup for + * handling. */ - if (!td_random(td) || !td->o.overwrite) { + if ((!td_random(td) || !td->o.overwrite) && + (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) { INIT_FLIST_HEAD(&ipo->list); flist_add_tail(&ipo->list, &td->io_hist_list); return; } RB_CLEAR_NODE(&ipo->rb_node); - p = &td->io_hist_tree.rb_node; - parent = NULL; /* * Sort the entry into the verification list */ +restart: + p = &td->io_hist_tree.rb_node; + parent = NULL; while (*p) { parent = *p; __ipo = rb_entry(parent, struct io_piece, rb_node); - if (ipo->offset <= __ipo->offset) + if (ipo->offset < __ipo->offset) p = &(*p)->rb_left; - else + else if (ipo->offset > __ipo->offset) p = &(*p)->rb_right; + else { + assert(ipo->len == __ipo->len); + rb_erase(parent, &td->io_hist_tree); + goto restart; + } } rb_link_node(&ipo->rb_node, parent, p); -- 2.25.1