break;
}
} else {
- if (ddir_rw_sum(td->bytes_done) + td->o.rw_min_bs > verify_bytes)
+ if (td->bytes_verified + td->o.rw_min_bs > verify_bytes)
break;
while ((io_u = get_io_u(td)) != NULL) {
break;
} else if (io_u->ddir == DDIR_WRITE) {
io_u->ddir = DDIR_READ;
+ io_u->numberio = td->verify_read_issues;
+ td->verify_read_issues++;
populate_verify_io_u(td, io_u);
break;
} else {
break;
}
- if (io_u->ddir == DDIR_WRITE && td->flags & TD_F_DO_VERIFY)
+ if (io_u->ddir == DDIR_WRITE && td->flags & TD_F_DO_VERIFY) {
+ io_u->numberio = td->io_issues[io_u->ddir];
populate_verify_io_u(td, io_u);
+ }
ddir = io_u->ddir;
}
}
- init_io_u_buffers(td);
+ if (init_io_u_buffers(td))
+ return 1;
if (init_file_completion_logging(td, max_units))
return 1;
* overflow later. this adjustment may be too much if we get
* lucky and the allocator gives us an aligned address.
*/
- if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
+ if (td->o.odirect || td->o.mem_align ||
td_ioengine_flagged(td, FIO_RAWIO))
td->orig_buffer_size += page_mask + td->o.mem_align;
if (data_xfer && allocate_io_mem(td))
return 1;
- if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
+ if (td->o.odirect || td->o.mem_align ||
td_ioengine_flagged(td, FIO_RAWIO))
p = PTR_ALIGN(td->orig_buffer, page_mask) + td->o.mem_align;
else
if (td_io_init(td))
goto err;
- if (td_ioengine_flagged(td, FIO_SYNCIO) && td->o.iodepth > 1) {
+ if (td_ioengine_flagged(td, FIO_SYNCIO) && td->o.iodepth > 1 && td->o.io_submit_mode != IO_MODE_OFFLOAD) {
log_info("note: both iodepth >= 1 and synchronous I/O engine "
"are selected, queue depth will be capped at 1\n");
}
}
} while (1);
- if (td_read(td) && td->io_bytes[DDIR_READ])
+ if (td->io_bytes[DDIR_READ] && (td_read(td) ||
+ ((td->flags & TD_F_VER_BACKLOG) && td_write(td))))
update_runtime(td, elapsed_us, DDIR_READ);
if (td_write(td) && td->io_bytes[DDIR_WRITE])
update_runtime(td, elapsed_us, DDIR_WRITE);