loop_cache_invalidate(td, f);
}
+ /*
+ * If we reach the end for a rw-io-size based run, reset us back to 0
+ * and invalidate the cache, if we need to.
+ */
+ if (td_rw(td) && o->io_size > o->size) {
+ if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f)) {
+ f->last_pos[ddir] = f->file_offset;
+ loop_cache_invalidate(td, f);
+ }
+ }
+
if (f->last_pos[ddir] < f->real_file_size) {
uint64_t pos;
* See if it's time to fsync/fdatasync/sync_file_range first,
* and if not then move on to check regular I/Os.
*/
- if (should_fsync(td)) {
+ if (should_fsync(td) && td->last_ddir_issued == DDIR_WRITE) {
if (td->o.fsync_blocks && td->io_issues[DDIR_WRITE] &&
!(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks))
return DDIR_SYNC;
if (td->o.zone_mode == ZONE_MODE_ZBD)
ddir = zbd_adjust_ddir(td, io_u, ddir);
- if (td_trimwrite(td)) {
+ if (td_trimwrite(td) && !ddir_sync(ddir)) {
struct fio_file *f = io_u->file;
if (f->last_start[DDIR_WRITE] == f->last_start[DDIR_TRIM])
ddir = DDIR_TRIM;
}
}
- if (td->o.fdp)
- fdp_fill_dspec_data(td, io_u);
+ if (td->o.dp_type != FIO_DP_NONE)
+ dp_fill_dspec_data(td, io_u);
if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%llx exceeds file size=0x%llx\n",
if (get_next_trim(td, io_u))
return true;
} else if (!(td->io_hist_len % td->o.trim_backlog) &&
- td->last_ddir != DDIR_READ) {
+ td->last_ddir_completed != DDIR_READ) {
td->trim_batch = td->o.trim_batch;
if (!td->trim_batch)
td->trim_batch = td->o.trim_backlog;
if (td->verify_batch)
get_verify = 1;
else if (!(td->io_hist_len % td->o.verify_backlog) &&
- td->last_ddir != DDIR_READ) {
+ td->last_ddir_completed != DDIR_READ) {
td->verify_batch = td->o.verify_batch;
if (!td->verify_batch)
td->verify_batch = td->o.verify_backlog;
zbd_log_err(td, io_u);
if (td->io_ops->errdetails) {
- char *err = td->io_ops->errdetails(io_u);
+ char *err = td->io_ops->errdetails(td, io_u);
log_err("fio: %s\n", err);
free(err);
unsigned long long tnsec;
tnsec = ntime_since(&io_u->start_time, &icd->time);
- add_lat_sample(td, idx, tnsec, bytes, io_u->offset,
- io_u->ioprio, io_u->clat_prio_index);
+ add_lat_sample(td, idx, tnsec, bytes, io_u);
if (td->flags & TD_F_PROFILE_OPS) {
struct prof_io_ops *ops = &td->prof_io_ops;
if (ddir_rw(idx)) {
if (!td->o.disable_clat) {
- add_clat_sample(td, idx, llnsec, bytes, io_u->offset,
- io_u->ioprio, io_u->clat_prio_index);
+ add_clat_sample(td, idx, llnsec, bytes, io_u);
io_u_mark_latency(td, llnsec);
}
if (ddir_sync(ddir)) {
if (io_u->error)
goto error;
- td->last_was_sync = true;
if (f) {
f->first_write = -1ULL;
f->last_write = -1ULL;
return;
}
- td->last_was_sync = false;
- td->last_ddir = ddir;
+ td->last_ddir_completed = ddir;
if (!io_u->error && ddir_rw(ddir)) {
unsigned long long bytes = io_u->xfer_buflen - io_u->resid;
void io_u_queued(struct thread_data *td, struct io_u *io_u)
{
if (!td->o.disable_slat && ramp_time_over(td) && td->o.stats) {
- unsigned long slat_time;
-
- slat_time = ntime_since(&io_u->start_time, &io_u->issue_time);
-
if (td->parent)
td = td->parent;
-
- add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen,
- io_u->offset, io_u->ioprio);
+ add_slat_sample(td, io_u);
}
}