- }
-
- td->done = 1;
- return 1;
-}
-
-void prune_io_piece_log(struct thread_data *td)
-{
- struct io_piece *ipo;
- struct rb_node *n;
-
- while ((n = rb_first(&td->io_hist_tree)) != NULL) {
- ipo = rb_entry(n, struct io_piece, rb_node);
- rb_erase(n, &td->io_hist_tree);
- free(ipo);
- }
-
- while (!flist_empty(&td->io_hist_list)) {
- ipo = flist_entry(td->io_hist_list.next, struct io_piece, list);
- flist_del(&ipo->list);
- free(ipo);
- }
-}
-
-/*
- * log a successful write, so we can unwind the log for verify
- */
-void log_io_piece(struct thread_data *td, struct io_u *io_u)
-{
- struct rb_node **p, *parent;
- struct io_piece *ipo, *__ipo;
-
- ipo = malloc(sizeof(struct io_piece));
- ipo->file = io_u->file;
- ipo->offset = io_u->offset;
- ipo->len = io_u->buflen;
-
- /*
- * We don't need to sort the entries, if:
- *
- * Sequential writes, or
- * Random writes that lay out the file as it goes along
- *
- * For both these cases, just reading back data in the order we
- * wrote it out is the fastest.
- *
- * One exception is if we don't have a random map AND we are doing
- * verifies, in that case we need to check for duplicate blocks and
- * drop the old one, which we rely on the rb insert/lookup for
- * handling.
- */
- if ((!td_random(td) || !td->o.overwrite) &&
- (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) {
- INIT_FLIST_HEAD(&ipo->list);
- flist_add_tail(&ipo->list, &td->io_hist_list);
- return;
- }
-
- RB_CLEAR_NODE(&ipo->rb_node);
-
- /*
- * Sort the entry into the verification list
- */
-restart:
- p = &td->io_hist_tree.rb_node;
- parent = NULL;
- while (*p) {
- parent = *p;