int io_u_quiesce(struct thread_data *td)
{
- int ret = 0, completed = 0;
+ int ret = 0, completed = 0, err = 0;
/*
* We are going to sleep, ensure that we flush anything pending as
if (ret > 0)
completed += ret;
else if (ret < 0)
- break;
+ err = ret;
}
if (td->flags & TD_F_REGROW_LOGS)
if (completed)
return completed;
- return ret;
+ return err;
}
static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
if (td->o.io_submit_mode == IO_MODE_INLINE)
io_u_quiesce(td);
+ if (td->o.timeout && ((usec + now) > td->o.timeout)) {
+ /*
+ * check if the usec is capable of taking negative values
+ */
+ if (now > td->o.timeout) {
+ ddir = DDIR_INVAL;
+ return ddir;
+ }
+ usec = td->o.timeout - now;
+ }
usec_sleep(td, usec);
+
+ now = utime_since_now(&td->epoch);
+ if ((td->o.timeout && (now > td->o.timeout)) || td->terminate)
+ ddir = DDIR_INVAL;
+
return ddir;
}
{
enum fio_ddir ddir = get_rw_ddir(td);
+ if (td->o.zone_mode == ZONE_MODE_ZBD)
+ ddir = zbd_adjust_ddir(td, io_u, ddir);
+
if (td_trimwrite(td)) {
struct fio_file *f = io_u->file;
if (f->last_pos[DDIR_WRITE] == f->last_pos[DDIR_TRIM])
set_rw_ddir(td, io_u);
+ if (io_u->ddir == DDIR_INVAL) {
+ dprint(FD_IO, "invalid direction received ddir = %d", io_u->ddir);
+ return 1;
+ }
/*
* fsync() or fdatasync() or trim etc, we are done
*/
td->latency_qd_low--;
td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2;
+ td->latency_stable_count = 0;
dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
td->latency_qd_low = td->latency_qd;
+ if (td->latency_qd + 1 == td->latency_qd_high) {
+ /*
+ * latency_qd will not incease on lat_target_success(), so
+ * called stable. If we stick with this queue depth, the
+ * final latency is likely lower than latency_target. Fix
+ * this by increasing latency_qd_high slowly. Use a naive
+ * heuristic here. If we get lat_target_success() 3 times
+ * in a row, increase latency_qd_high by 1.
+ */
+ if (++td->latency_stable_count >= 3) {
+ td->latency_qd_high++;
+ td->latency_stable_count = 0;
+ }
+ }
+
/*
* If we haven't failed yet, we double up to a failing value instead
* of bisecting from highest possible queue depth. If we have set
* Same as last one, we are done. Let it run a latency cycle, so
* we get only the results from the targeted depth.
*/
- if (td->latency_qd == qd) {
+ if (!o->latency_run && td->latency_qd == qd) {
if (td->latency_end_run) {
dprint(FD_RATE, "We are done\n");
td->done = 1;
assert(io_u->flags & IO_U_F_FREE);
io_u_clear(td, io_u, IO_U_F_FREE | IO_U_F_NO_FILE_PUT |
IO_U_F_TRIMMED | IO_U_F_BARRIER |
- IO_U_F_VER_LIST);
+ IO_U_F_VER_LIST | IO_U_F_PRIORITY);
io_u->error = 0;
io_u->acct_ddir = -1;
unsigned long long tnsec;
tnsec = ntime_since(&io_u->start_time, &icd->time);
- add_lat_sample(td, idx, tnsec, bytes, io_u->offset);
+ add_lat_sample(td, idx, tnsec, bytes, io_u->offset, io_u_is_prio(io_u));
if (td->flags & TD_F_PROFILE_OPS) {
struct prof_io_ops *ops = &td->prof_io_ops;
if (ddir_rw(idx)) {
if (!td->o.disable_clat) {
- add_clat_sample(td, idx, llnsec, bytes, io_u->offset);
+ add_clat_sample(td, idx, llnsec, bytes, io_u->offset, io_u_is_prio(io_u));
io_u_mark_latency(td, llnsec);
}
if (io_u->error)
unlog_io_piece(td, io_u);
else {
- io_u->ipo->flags &= ~IP_F_IN_FLIGHT;
- write_barrier();
+ atomic_store_release(&io_u->ipo->flags,
+ io_u->ipo->flags & ~IP_F_IN_FLIGHT);
}
}
td = td->parent;
add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen,
- io_u->offset);
+ io_u->offset, io_u_is_prio(io_u));
}
}