Add some support for a verify backlog
authorJens Axboe <jaxboe@fusionio.com>
Tue, 15 Jun 2010 12:25:36 +0000 (14:25 +0200)
committerJens Axboe <jaxboe@fusionio.com>
Tue, 15 Jun 2010 12:25:36 +0000 (14:25 +0200)
Instead of writing everything and then verifying everything,
allow a job to specify incremental verify stages. This can
help reduce memory consumption of fio, since we don't have
to store a huge number of verify entries to be processed
when the write phase is complete.

Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
fio.h
io_u.c
log.c
options.c
verify.c

diff --git a/fio.h b/fio.h
index 56aa9b6e803117dcfdd2e6784a66008fc75f0234..2a762aa2e5427c93adfdc6cdf9426c3e60ec4b6a 100644 (file)
--- a/fio.h
+++ b/fio.h
@@ -185,6 +185,8 @@ struct thread_options {
        unsigned int verify_pattern_bytes;
        unsigned int verify_fatal;
        unsigned int verify_async;
+       unsigned long long verify_backlog;
+       unsigned int verify_batch;
        unsigned int use_thread;
        unsigned int unlink;
        unsigned int do_disk_util;
@@ -321,6 +323,7 @@ struct thread_data {
        unsigned int ioprio;
        unsigned int ioprio_set;
        unsigned int last_was_sync;
+       enum fio_ddir last_ddir;
 
        char *mmapfile;
        int mmapfd;
@@ -335,6 +338,8 @@ struct thread_data {
        os_random_state_t bsrange_state;
        os_random_state_t verify_state;
 
+       unsigned int verify_batch;
+
        int shm_id;
 
        /*
@@ -410,6 +415,7 @@ struct thread_data {
         */
        struct rb_root io_hist_tree;
        struct flist_head io_hist_list;
+       unsigned long io_hist_len;
 
        /*
         * For IO replaying
diff --git a/io_u.c b/io_u.c
index 23037f1a8c227114bbcf90b8c0372d8ca5ce108d..76ad9335f0f3f14f503476dc11811539be29bc4b 100644 (file)
--- a/io_u.c
+++ b/io_u.c
@@ -926,6 +926,22 @@ struct io_u *get_io_u(struct thread_data *td)
                return NULL;
        }
 
+       if (td->o.verify_backlog && td->io_hist_len) {
+               int get_verify = 0;
+
+               if (td->verify_batch) {
+                       td->verify_batch--;
+                       get_verify = 1;
+               } else if (!(td->io_hist_len % td->o.verify_backlog) &&
+                        td->last_ddir != DDIR_READ) {
+                       td->verify_batch = td->o.verify_batch;
+                       get_verify = 1;
+               }
+
+               if (get_verify && !get_next_verify(td, io_u))
+                       goto out;
+       }
+
        /*
         * from a requeue, io_u already setup
         */
@@ -1024,6 +1040,7 @@ static void io_completed(struct thread_data *td, struct io_u *io_u,
        }
 
        td->last_was_sync = 0;
+       td->last_ddir = io_u->ddir;
 
        if (!io_u->error) {
                unsigned int bytes = io_u->buflen - io_u->resid;
diff --git a/log.c b/log.c
index 99f20b53083b29ae4399827ff677b89e1d16781f..6a99c6611e3ad79aa2763ee44f4656863d1ba824 100644 (file)
--- a/log.c
+++ b/log.c
@@ -160,12 +160,14 @@ void prune_io_piece_log(struct thread_data *td)
        while ((n = rb_first(&td->io_hist_tree)) != NULL) {
                ipo = rb_entry(n, struct io_piece, rb_node);
                rb_erase(n, &td->io_hist_tree);
+               td->io_hist_len--;
                free(ipo);
        }
 
        while (!flist_empty(&td->io_hist_list)) {
                ipo = flist_entry(td->io_hist_list.next, struct io_piece, list);
                flist_del(&ipo->list);
+               td->io_hist_len--;
                free(ipo);
        }
 }
@@ -201,6 +203,7 @@ void log_io_piece(struct thread_data *td, struct io_u *io_u)
              (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) {
                INIT_FLIST_HEAD(&ipo->list);
                flist_add_tail(&ipo->list, &td->io_hist_list);
+               td->io_hist_len++;
                return;
        }
 
@@ -222,6 +225,7 @@ restart:
                        p = &(*p)->rb_right;
                else {
                        assert(ipo->len == __ipo->len);
+                       td->io_hist_len--;
                        rb_erase(parent, &td->io_hist_tree);
                        goto restart;
                }
@@ -229,6 +233,7 @@ restart:
 
        rb_link_node(&ipo->rb_node, parent, p);
        rb_insert_color(&ipo->rb_node, &td->io_hist_tree);
+       td->io_hist_len++;
 }
 
 void write_iolog_close(struct thread_data *td)
index 2369191ca748fe3572a3b816c597431c58aac987..f470b45392cb78e8884f5d33c0aaa489293c5616 100644 (file)
--- a/options.c
+++ b/options.c
@@ -1410,6 +1410,20 @@ static struct fio_option options[FIO_MAX_OPTS] = {
                .help   = "Number of async verifier threads to use",
                .parent = "verify",
        },
+       {
+               .name   = "verify_backlog",
+               .type   = FIO_OPT_STR_VAL,
+               .off1   = td_var_offset(verify_backlog),
+               .help   = "Verify after this number of blocks are written",
+               .parent = "verify",
+       },
+       {
+               .name   = "verify_backlog_batch",
+               .type   = FIO_OPT_INT,
+               .off1   = td_var_offset(verify_batch),
+               .help   = "Verify this number of IO blocks",
+               .parent = "verify_backlog",
+       },
 #ifdef FIO_HAVE_CPU_AFFINITY
        {
                .name   = "verify_async_cpus",
index c894b600525c5969d5ddefb1d9eafe51c6a7ece9..6932cfcd20d5876bd710824613cbf76257d83a10 100644 (file)
--- a/verify.c
+++ b/verify.c
@@ -748,8 +748,10 @@ int get_next_verify(struct thread_data *td, struct io_u *io_u)
 
                ipo = rb_entry(n, struct io_piece, rb_node);
                rb_erase(n, &td->io_hist_tree);
+               td->io_hist_len--;
        } else if (!flist_empty(&td->io_hist_list)) {
                ipo = flist_entry(td->io_hist_list.next, struct io_piece, list);
+               td->io_hist_len--;
                flist_del(&ipo->list);
        }