unsigned int verify_pattern_bytes;
unsigned int verify_fatal;
unsigned int verify_async;
+ unsigned long long verify_backlog;
+ unsigned int verify_batch;
unsigned int use_thread;
unsigned int unlink;
unsigned int do_disk_util;
unsigned int ioprio;
unsigned int ioprio_set;
unsigned int last_was_sync;
+ enum fio_ddir last_ddir;
char *mmapfile;
int mmapfd;
os_random_state_t bsrange_state;
os_random_state_t verify_state;
+ unsigned int verify_batch;
+
int shm_id;
/*
*/
struct rb_root io_hist_tree;
struct flist_head io_hist_list;
+ unsigned long io_hist_len;
/*
* For IO replaying
return NULL;
}
+ if (td->o.verify_backlog && td->io_hist_len) {
+ int get_verify = 0;
+
+ if (td->verify_batch) {
+ td->verify_batch--;
+ get_verify = 1;
+ } else if (!(td->io_hist_len % td->o.verify_backlog) &&
+ td->last_ddir != DDIR_READ) {
+ td->verify_batch = td->o.verify_batch;
+ get_verify = 1;
+ }
+
+ if (get_verify && !get_next_verify(td, io_u))
+ goto out;
+ }
+
/*
* from a requeue, io_u already setup
*/
}
td->last_was_sync = 0;
+ td->last_ddir = io_u->ddir;
if (!io_u->error) {
unsigned int bytes = io_u->buflen - io_u->resid;
while ((n = rb_first(&td->io_hist_tree)) != NULL) {
ipo = rb_entry(n, struct io_piece, rb_node);
rb_erase(n, &td->io_hist_tree);
+ td->io_hist_len--;
free(ipo);
}
while (!flist_empty(&td->io_hist_list)) {
ipo = flist_entry(td->io_hist_list.next, struct io_piece, list);
flist_del(&ipo->list);
+ td->io_hist_len--;
free(ipo);
}
}
(file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) {
INIT_FLIST_HEAD(&ipo->list);
flist_add_tail(&ipo->list, &td->io_hist_list);
+ td->io_hist_len++;
return;
}
p = &(*p)->rb_right;
else {
assert(ipo->len == __ipo->len);
+ td->io_hist_len--;
rb_erase(parent, &td->io_hist_tree);
goto restart;
}
rb_link_node(&ipo->rb_node, parent, p);
rb_insert_color(&ipo->rb_node, &td->io_hist_tree);
+ td->io_hist_len++;
}
void write_iolog_close(struct thread_data *td)
.help = "Number of async verifier threads to use",
.parent = "verify",
},
+ {
+ .name = "verify_backlog",
+ .type = FIO_OPT_STR_VAL,
+ .off1 = td_var_offset(verify_backlog),
+ .help = "Verify after this number of blocks are written",
+ .parent = "verify",
+ },
+ {
+ .name = "verify_backlog_batch",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(verify_batch),
+ .help = "Verify this number of IO blocks",
+ .parent = "verify_backlog",
+ },
#ifdef FIO_HAVE_CPU_AFFINITY
{
.name = "verify_async_cpus",
ipo = rb_entry(n, struct io_piece, rb_node);
rb_erase(n, &td->io_hist_tree);
+ td->io_hist_len--;
} else if (!flist_empty(&td->io_hist_list)) {
ipo = flist_entry(td->io_hist_list.next, struct io_piece, list);
+ td->io_hist_len--;
flist_del(&ipo->list);
}