for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
struct bssplit *bsp = &td->o.bssplit[ddir][i];
+ if (!bsp->perc)
+ continue;
buflen = bsp->bs;
perc += bsp->perc;
- if (!perc)
- break;
if ((r / perc <= frand_max / 100ULL) &&
io_u_fits(td, io_u, buflen))
break;
power_2 = is_power_of_2(minbs);
if (!td->o.bs_unaligned && power_2)
buflen &= ~(minbs - 1);
- else if (!td->o.bs_unaligned && !power_2)
- buflen -= buflen % minbs;
+ else if (!td->o.bs_unaligned && !power_2)
+ buflen -= buflen % minbs;
+ if (buflen > maxbs)
+ buflen = maxbs;
} while (!io_u_fits(td, io_u, buflen));
return buflen;
int io_u_quiesce(struct thread_data *td)
{
- int ret = 0, completed = 0;
+ int ret = 0, completed = 0, err = 0;
/*
* We are going to sleep, ensure that we flush anything pending as
if (ret > 0)
completed += ret;
else if (ret < 0)
- break;
+ err = ret;
}
if (td->flags & TD_F_REGROW_LOGS)
if (completed)
return completed;
- return ret;
+ return err;
}
static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
uint64_t now;
assert(ddir_rw(ddir));
- now = utime_since_now(&td->start);
+ now = utime_since_now(&td->epoch);
/*
* if rate_next_io_time is in the past, need to catch up to rate
{
enum fio_ddir ddir = get_rw_ddir(td);
+ if (td->o.zone_mode == ZONE_MODE_ZBD)
+ ddir = zbd_adjust_ddir(td, io_u, ddir);
+
if (td_trimwrite(td)) {
struct fio_file *f = io_u->file;
if (f->last_pos[DDIR_WRITE] == f->last_pos[DDIR_TRIM])
{
const bool needs_lock = td_async_processing(td);
- if (io_u->post_submit) {
- io_u->post_submit(io_u, io_u->error == 0);
- io_u->post_submit = NULL;
- }
+ zbd_put_io_u(io_u);
if (td->parent)
td = td->parent;
/*
* See if it's time to switch to a new zone
*/
- if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
+ if (td->zone_bytes >= td->o.zone_size) {
td->zone_bytes = 0;
f->file_offset += td->o.zone_range + td->o.zone_skip;
if (td->o.zone_mode == ZONE_MODE_STRIDED)
setup_strided_zone_mode(td, io_u);
+ else if (td->o.zone_mode == ZONE_MODE_ZBD)
+ setup_zbd_zone_mode(td, io_u);
/*
* No log, let the seq/rand engine retrieve the next buflen and
if (!fill_io_u(td, io_u))
break;
- if (io_u->post_submit) {
- io_u->post_submit(io_u, false);
- io_u->post_submit = NULL;
- }
+ zbd_put_io_u(io_u);
put_file_log(td, f);
td_io_close_file(td, f);
td->latency_qd_low--;
td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2;
+ td->latency_stable_count = 0;
dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
td->latency_qd_low = td->latency_qd;
+ if (td->latency_qd + 1 == td->latency_qd_high) {
+ /*
+ * latency_qd will not incease on lat_target_success(), so
+ * called stable. If we stick with this queue depth, the
+ * final latency is likely lower than latency_target. Fix
+ * this by increasing latency_qd_high slowly. Use a naive
+ * heuristic here. If we get lat_target_success() 3 times
+ * in a row, increase latency_qd_high by 1.
+ */
+ if (++td->latency_stable_count >= 3) {
+ td->latency_qd_high++;
+ td->latency_stable_count = 0;
+ }
+ }
+
/*
* If we haven't failed yet, we double up to a failing value instead
* of bisecting from highest possible queue depth. If we have set
* Same as last one, we are done. Let it run a latency cycle, so
* we get only the results from the targeted depth.
*/
- if (td->latency_qd == qd) {
+ if (!o->latency_run && td->latency_qd == qd) {
if (td->latency_end_run) {
dprint(FD_RATE, "We are done\n");
td->done = 1;
assert(io_u->flags & IO_U_F_FREE);
io_u_clear(td, io_u, IO_U_F_FREE | IO_U_F_NO_FILE_PUT |
IO_U_F_TRIMMED | IO_U_F_BARRIER |
- IO_U_F_VER_LIST);
+ IO_U_F_VER_LIST | IO_U_F_PRIORITY);
io_u->error = 0;
io_u->acct_ddir = -1;
assert(!(td->flags & TD_F_CHILD));
ret = pthread_cond_wait(&td->free_cond, &td->io_u_lock);
assert(ret == 0);
- if (td->error)
- return NULL;
- goto again;
+ if (!td->error)
+ goto again;
}
if (needs_lock)
unsigned long long tnsec;
tnsec = ntime_since(&io_u->start_time, &icd->time);
- add_lat_sample(td, idx, tnsec, bytes, io_u->offset);
+ add_lat_sample(td, idx, tnsec, bytes, io_u->offset, io_u_is_prio(io_u));
if (td->flags & TD_F_PROFILE_OPS) {
struct prof_io_ops *ops = &td->prof_io_ops;
if (ddir_rw(idx)) {
if (!td->o.disable_clat) {
- add_clat_sample(td, idx, llnsec, bytes, io_u->offset);
+ add_clat_sample(td, idx, llnsec, bytes, io_u->offset, io_u_is_prio(io_u));
io_u_mark_latency(td, llnsec);
}
td = td->parent;
add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen,
- io_u->offset);
+ io_u->offset, io_u_is_prio(io_u));
}
}
static int do_sync_file_range(const struct thread_data *td,
struct fio_file *f)
{
- off64_t offset, nbytes;
+ uint64_t offset, nbytes;
offset = f->first_write;
nbytes = f->last_write - f->first_write;