/*
* For read-only workloads, the program cannot be certain of the
- * last numberio written to a block. Checking of numberio will be done
- * only for workloads that write data.
- * For verify_only, numberio will be checked in the last iteration when
- * the correct state of numberio, that would have been written to each
- * block in a previous run of fio, has been reached.
+ * last numberio written to a block. Checking of numberio will be
+ * done only for workloads that write data. For verify_only,
+ * numberio will be checked in the last iteration when the correct
+ * state of numberio, that would have been written to each block
+ * in a previous run of fio, has been reached.
*/
- if (td_write(td) || td_rw(td))
+ if ((td_write(td) || td_rw(td)) && (td_min_bs(td) == td_max_bs(td)) &&
+ !td->o.time_based)
if (!td->o.verify_only || td->o.loops == 0)
if (vh->numberio != io_u->numberio)
ret = EILSEQ;
/*
* Push IO verification to a separate thread
*/
-int verify_io_u_async(struct thread_data *td, struct io_u *io_u)
+int verify_io_u_async(struct thread_data *td, struct io_u **io_u_ptr)
{
- if (io_u->file)
- put_file_log(td, io_u->file);
+ struct io_u *io_u = *io_u_ptr;
pthread_mutex_lock(&td->io_u_lock);
+ if (io_u->file)
+ put_file_log(td, io_u->file);
+
if (io_u->flags & IO_U_F_IN_CUR_DEPTH) {
td->cur_depth--;
io_u->flags &= ~IO_U_F_IN_CUR_DEPTH;
}
flist_add_tail(&io_u->verify_list, &td->verify_list);
- io_u->flags |= IO_U_F_FREE_DEF;
+ *io_u_ptr = NULL;
pthread_mutex_unlock(&td->io_u_lock);
pthread_cond_signal(&td->verify_cond);
return EILSEQ;
}
-int verify_io_u(struct thread_data *td, struct io_u *io_u)
+int verify_io_u(struct thread_data *td, struct io_u **io_u_ptr)
{
struct verify_header *hdr;
+ struct io_u *io_u = *io_u_ptr;
unsigned int header_size, hdr_inc, hdr_num = 0;
void *p;
int ret;
if (td->o.verify == VERIFY_NULL || io_u->ddir != DDIR_READ)
return 0;
+ /*
+ * If the IO engine is faking IO (like null), then just pretend
+ * we verified everything.
+ */
+ if (td->io_ops->flags & FIO_FAKEIO)
+ return 0;
+
if (io_u->flags & IO_U_F_TRIMMED) {
ret = verify_trimmed_io_u(td, io_u);
goto done;
done:
if (ret && td->o.verify_fatal)
- td->terminate = 1;
+ fio_mark_td_terminate(td);
return ret;
}
assert(ipo->flags & IP_F_ONRB);
ipo->flags &= ~IP_F_ONRB;
} else if (!flist_empty(&td->io_hist_list)) {
- ipo = flist_entry(td->io_hist_list.next, struct io_piece, list);
+ ipo = flist_first_entry(&td->io_hist_list, struct io_piece, list);
/*
* Ensure that the associated IO has completed
continue;
while (!flist_empty(&list)) {
- io_u = flist_entry(list.next, struct io_u, verify_list);
- flist_del(&io_u->verify_list);
+ io_u = flist_first_entry(&list, struct io_u, verify_list);
+ flist_del_init(&io_u->verify_list);
+
+ io_u->flags |= IO_U_F_NO_FILE_PUT;
+ ret = verify_io_u(td, &io_u);
- ret = verify_io_u(td, io_u);
put_io_u(td, io_u);
if (!ret)
continue;
if (ret) {
td_verror(td, ret, "async_verify");
if (td->o.verify_fatal)
- td->terminate = 1;
+ fio_mark_td_terminate(td);
}
done: