From: Jens Axboe Date: Tue, 15 Jun 2010 12:25:36 +0000 (+0200) Subject: Add some support for a verify backlog X-Git-Tag: fio-1.40~7 X-Git-Url: https://git.kernel.dk/?p=fio.git;a=commitdiff_plain;h=9e144189c6e000df8a797312e43a4913f3b83c9a Add some support for a verify backlog Instead of writing everything and then verifying everything, allow a job to specify incremental verify stages. This can help reduce memory consumption of fio, since we don't have to store a huge number of verify entries to be processed when the write phase is complete. Signed-off-by: Jens Axboe --- diff --git a/fio.h b/fio.h index 56aa9b6e..2a762aa2 100644 --- a/fio.h +++ b/fio.h @@ -185,6 +185,8 @@ struct thread_options { unsigned int verify_pattern_bytes; unsigned int verify_fatal; unsigned int verify_async; + unsigned long long verify_backlog; + unsigned int verify_batch; unsigned int use_thread; unsigned int unlink; unsigned int do_disk_util; @@ -321,6 +323,7 @@ struct thread_data { unsigned int ioprio; unsigned int ioprio_set; unsigned int last_was_sync; + enum fio_ddir last_ddir; char *mmapfile; int mmapfd; @@ -335,6 +338,8 @@ struct thread_data { os_random_state_t bsrange_state; os_random_state_t verify_state; + unsigned int verify_batch; + int shm_id; /* @@ -410,6 +415,7 @@ struct thread_data { */ struct rb_root io_hist_tree; struct flist_head io_hist_list; + unsigned long io_hist_len; /* * For IO replaying diff --git a/io_u.c b/io_u.c index 23037f1a..76ad9335 100644 --- a/io_u.c +++ b/io_u.c @@ -926,6 +926,22 @@ struct io_u *get_io_u(struct thread_data *td) return NULL; } + if (td->o.verify_backlog && td->io_hist_len) { + int get_verify = 0; + + if (td->verify_batch) { + td->verify_batch--; + get_verify = 1; + } else if (!(td->io_hist_len % td->o.verify_backlog) && + td->last_ddir != DDIR_READ) { + td->verify_batch = td->o.verify_batch; + get_verify = 1; + } + + if (get_verify && !get_next_verify(td, io_u)) + goto out; + } + /* * from a requeue, io_u already setup */ @@ -1024,6 +1040,7 @@ static void io_completed(struct thread_data *td, struct io_u *io_u, } td->last_was_sync = 0; + td->last_ddir = io_u->ddir; if (!io_u->error) { unsigned int bytes = io_u->buflen - io_u->resid; diff --git a/log.c b/log.c index 99f20b53..6a99c661 100644 --- a/log.c +++ b/log.c @@ -160,12 +160,14 @@ void prune_io_piece_log(struct thread_data *td) while ((n = rb_first(&td->io_hist_tree)) != NULL) { ipo = rb_entry(n, struct io_piece, rb_node); rb_erase(n, &td->io_hist_tree); + td->io_hist_len--; free(ipo); } while (!flist_empty(&td->io_hist_list)) { ipo = flist_entry(td->io_hist_list.next, struct io_piece, list); flist_del(&ipo->list); + td->io_hist_len--; free(ipo); } } @@ -201,6 +203,7 @@ void log_io_piece(struct thread_data *td, struct io_u *io_u) (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) { INIT_FLIST_HEAD(&ipo->list); flist_add_tail(&ipo->list, &td->io_hist_list); + td->io_hist_len++; return; } @@ -222,6 +225,7 @@ restart: p = &(*p)->rb_right; else { assert(ipo->len == __ipo->len); + td->io_hist_len--; rb_erase(parent, &td->io_hist_tree); goto restart; } @@ -229,6 +233,7 @@ restart: rb_link_node(&ipo->rb_node, parent, p); rb_insert_color(&ipo->rb_node, &td->io_hist_tree); + td->io_hist_len++; } void write_iolog_close(struct thread_data *td) diff --git a/options.c b/options.c index 2369191c..f470b453 100644 --- a/options.c +++ b/options.c @@ -1410,6 +1410,20 @@ static struct fio_option options[FIO_MAX_OPTS] = { .help = "Number of async verifier threads to use", .parent = "verify", }, + { + .name = "verify_backlog", + .type = FIO_OPT_STR_VAL, + .off1 = td_var_offset(verify_backlog), + .help = "Verify after this number of blocks are written", + .parent = "verify", + }, + { + .name = "verify_backlog_batch", + .type = FIO_OPT_INT, + .off1 = td_var_offset(verify_batch), + .help = "Verify this number of IO blocks", + .parent = "verify_backlog", + }, #ifdef FIO_HAVE_CPU_AFFINITY { .name = "verify_async_cpus", diff --git a/verify.c b/verify.c index c894b600..6932cfcd 100644 --- a/verify.c +++ b/verify.c @@ -748,8 +748,10 @@ int get_next_verify(struct thread_data *td, struct io_u *io_u) ipo = rb_entry(n, struct io_piece, rb_node); rb_erase(n, &td->io_hist_tree); + td->io_hist_len--; } else if (!flist_empty(&td->io_hist_list)) { ipo = flist_entry(td->io_hist_list.next, struct io_piece, list); + td->io_hist_len--; flist_del(&ipo->list); }