* and invalidate the cache, if we need to.
*/
if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) &&
- o->time_based) {
+ o->time_based && o->nr_files == 1) {
f->last_pos[ddir] = f->file_offset;
loop_cache_invalidate(td, f);
}
b = offset = -1ULL;
- if (rw_seq) {
+ if (td_randtrimwrite(td) && ddir == DDIR_WRITE) {
+ /* don't mark randommap for these writes */
+ io_u_set(td, io_u, IO_U_F_BUSY_OK);
+ offset = f->last_start[DDIR_TRIM];
+ *is_random = true;
+ ret = 0;
+ } else if (rw_seq) {
if (td_random(td)) {
if (should_do_random(td, ddir)) {
ret = get_next_rand_block(td, f, ddir, &b);
log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b);
ret = 1;
}
+ io_u->verify_offset = io_u->offset;
}
return ret;
return 1;
}
+ /*
+ * For randtrimwrite, we decide whether to issue a trim or a write
+ * based on whether the offsets for the most recent trim and write
+ * operations match. If they don't match that means we just issued a
+ * new trim and the next operation should be a write. If they *do*
+ * match that means we just completed a trim+write pair and the next
+ * command should be a trim.
+ *
+ * This works fine for sequential workloads but for random workloads
+ * it's possible to complete a trim+write pair and then have the next
+ * randomly generated offset match the previous offset. If that happens
+ * we need to alter the offset for the last write operation in order
+ * to ensure that we issue a write operation the next time through.
+ */
+ if (td_randtrimwrite(td) && ddir == DDIR_TRIM &&
+ f->last_start[DDIR_TRIM] == io_u->offset)
+ f->last_start[DDIR_WRITE]--;
+
+ io_u->verify_offset = io_u->offset;
return 0;
}
assert(ddir_rw(ddir));
+ if (td_randtrimwrite(td) && ddir == DDIR_WRITE) {
+ struct fio_file *f = io_u->file;
+
+ return f->last_pos[DDIR_TRIM] - f->last_start[DDIR_TRIM];
+ }
+
if (td->o.bs_is_seq_rand)
ddir = is_random ? DDIR_WRITE : DDIR_READ;
int io_u_quiesce(struct thread_data *td)
{
- int ret = 0, completed = 0;
+ int ret = 0, completed = 0, err = 0;
/*
* We are going to sleep, ensure that we flush anything pending as
if (ret > 0)
completed += ret;
else if (ret < 0)
- break;
+ err = ret;
}
if (td->flags & TD_F_REGROW_LOGS)
if (completed)
return completed;
- return ret;
+ return err;
}
static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
if (td->o.io_submit_mode == IO_MODE_INLINE)
io_u_quiesce(td);
+ if (td->o.timeout && ((usec + now) > td->o.timeout)) {
+ /*
+ * check if the usec is capable of taking negative values
+ */
+ if (now > td->o.timeout) {
+ ddir = DDIR_TIMEOUT;
+ return ddir;
+ }
+ usec = td->o.timeout - now;
+ }
usec_sleep(td, usec);
+
+ now = utime_since_now(&td->epoch);
+ if ((td->o.timeout && (now > td->o.timeout)) || td->terminate)
+ ddir = DDIR_TIMEOUT;
+
return ddir;
}
else
ddir = DDIR_INVAL;
- td->rwmix_ddir = rate_ddir(td, ddir);
+ if (!should_check_rate(td)) {
+ /*
+ * avoid time-consuming call to utime_since_now() if rate checking
+ * isn't being used. this imrpoves IOPs 50%. See:
+ * https://github.com/axboe/fio/issues/1501#issuecomment-1418327049
+ */
+ td->rwmix_ddir = ddir;
+ } else
+ td->rwmix_ddir = rate_ddir(td, ddir);
return td->rwmix_ddir;
}
{
enum fio_ddir ddir = get_rw_ddir(td);
+ if (td->o.zone_mode == ZONE_MODE_ZBD)
+ ddir = zbd_adjust_ddir(td, io_u, ddir);
+
if (td_trimwrite(td)) {
struct fio_file *f = io_u->file;
- if (f->last_pos[DDIR_WRITE] == f->last_pos[DDIR_TRIM])
+ if (f->last_start[DDIR_WRITE] == f->last_start[DDIR_TRIM])
ddir = DDIR_TRIM;
else
ddir = DDIR_WRITE;
{
const bool needs_lock = td_async_processing(td);
- zbd_put_io_u(io_u);
+ zbd_put_io_u(td, io_u);
if (td->parent)
td = td->parent;
set_rw_ddir(td, io_u);
+ if (io_u->ddir == DDIR_INVAL || io_u->ddir == DDIR_TIMEOUT) {
+ dprint(FD_IO, "invalid direction received ddir = %d", io_u->ddir);
+ return 1;
+ }
/*
* fsync() or fdatasync() or trim etc, we are done
*/
offset = io_u->offset;
if (td->o.zone_mode == ZONE_MODE_ZBD) {
ret = zbd_adjust_block(td, io_u);
- if (ret == io_u_eof)
+ if (ret == io_u_eof) {
+ dprint(FD_IO, "zbd_adjust_block() returned io_u_eof\n");
return 1;
+ }
}
+ if (td->o.fdp)
+ fdp_fill_dspec_data(td, io_u);
+
if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%llx exceeds file size=0x%llx\n",
io_u,
out:
dprint_io_u(io_u, "fill");
+ io_u->verify_offset = io_u->offset;
td->zone_bytes += io_u->buflen;
return 0;
}
break;
case 1 ... 4:
idx = 1;
+ fio_fallthrough;
case 0:
break;
}
break;
case 2 ... 3:
idx = 1;
+ fio_fallthrough;
case 1:
break;
}
break;
case 2 ... 3:
idx = 1;
+ fio_fallthrough;
case 0 ... 1:
break;
}
break;
case 2 ... 3:
idx = 1;
+ fio_fallthrough;
case 0 ... 1:
break;
}
break;
case 2 ... 3:
idx = 1;
+ fio_fallthrough;
case 0 ... 1:
break;
}
if (f && fio_file_open(f) && !fio_file_closing(f)) {
if (td->o.file_service_type == FIO_FSERVICE_SEQ)
goto out;
- if (td->file_service_left--)
+ if (td->file_service_left) {
+ td->file_service_left--;
goto out;
+ }
}
if (td->o.file_service_type == FIO_FSERVICE_RR ||
if (!fill_io_u(td, io_u))
break;
- zbd_put_io_u(io_u);
+ zbd_put_io_u(td, io_u);
put_file_log(td, f);
td_io_close_file(td, f);
io_u->file = NULL;
+
+ if (io_u->ddir == DDIR_TIMEOUT)
+ return 1;
+
if (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)
fio_file_reset(td, f);
else {
return 0;
}
-static void lat_fatal(struct thread_data *td, struct io_completion_data *icd,
+static void lat_fatal(struct thread_data *td, struct io_u *io_u, struct io_completion_data *icd,
unsigned long long tnsec, unsigned long long max_nsec)
{
- if (!td->error)
- log_err("fio: latency of %llu nsec exceeds specified max (%llu nsec)\n", tnsec, max_nsec);
+ if (!td->error) {
+ log_err("fio: latency of %llu nsec exceeds specified max (%llu nsec): %s %s %llu %llu\n",
+ tnsec, max_nsec,
+ io_u->file->file_name,
+ io_ddir_name(io_u->ddir),
+ io_u->offset, io_u->buflen);
+ }
td_verror(td, ETIMEDOUT, "max latency exceeded");
icd->error = ETIMEDOUT;
}
td->latency_qd_low--;
td->latency_qd = (td->latency_qd + td->latency_qd_low) / 2;
+ td->latency_stable_count = 0;
dprint(FD_RATE, "Ramped down: %d %d %d\n", td->latency_qd_low, td->latency_qd, td->latency_qd_high);
td->latency_qd_low = td->latency_qd;
+ if (td->latency_qd + 1 == td->latency_qd_high) {
+ /*
+ * latency_qd will not incease on lat_target_success(), so
+ * called stable. If we stick with this queue depth, the
+ * final latency is likely lower than latency_target. Fix
+ * this by increasing latency_qd_high slowly. Use a naive
+ * heuristic here. If we get lat_target_success() 3 times
+ * in a row, increase latency_qd_high by 1.
+ */
+ if (++td->latency_stable_count >= 3) {
+ td->latency_qd_high++;
+ td->latency_stable_count = 0;
+ }
+ }
+
/*
* If we haven't failed yet, we double up to a failing value instead
* of bisecting from highest possible queue depth. If we have set
* Same as last one, we are done. Let it run a latency cycle, so
* we get only the results from the targeted depth.
*/
- if (td->latency_qd == qd) {
+ if (!o->latency_run && td->latency_qd == qd) {
if (td->latency_end_run) {
dprint(FD_RATE, "We are done\n");
td->done = 1;
{
const bool needs_lock = td_async_processing(td);
struct io_u *io_u = NULL;
- int ret;
if (td->stop_io)
return NULL;
__td_io_u_lock(td);
again:
- if (!io_u_rempty(&td->io_u_requeues))
+ if (!io_u_rempty(&td->io_u_requeues)) {
io_u = io_u_rpop(&td->io_u_requeues);
- else if (!queue_full(td)) {
+ io_u->resid = 0;
+ } else if (!queue_full(td)) {
io_u = io_u_qpop(&td->io_u_freelist);
io_u->file = NULL;
io_u_set(td, io_u, IO_U_F_IN_CUR_DEPTH);
io_u->ipo = NULL;
} else if (td_async_processing(td)) {
+ int ret;
/*
* We ran out, wait for async verify threads to finish and
* return one
*/
assert(!(td->flags & TD_F_CHILD));
ret = pthread_cond_wait(&td->free_cond, &td->io_u_lock);
- assert(ret == 0);
- if (!td->error)
+ if (fio_unlikely(ret != 0)) {
+ td->error = errno;
+ } else if (!td->error)
goto again;
}
io_u->xfer_buf = io_u->buf;
io_u->xfer_buflen = io_u->buflen;
+ /*
+ * Remember the issuing context priority. The IO engine may change this.
+ */
+ io_u->ioprio = td->ioprio;
+ io_u->clat_prio_index = 0;
out:
assert(io_u->file);
if (!td_io_prep(td, io_u)) {
io_ddir_name(io_u->ddir),
io_u->offset, io_u->xfer_buflen);
+ zbd_log_err(td, io_u);
+
if (td->io_ops->errdetails) {
char *err = td->io_ops->errdetails(io_u);
unsigned long long tnsec;
tnsec = ntime_since(&io_u->start_time, &icd->time);
- add_lat_sample(td, idx, tnsec, bytes, io_u->offset);
+ add_lat_sample(td, idx, tnsec, bytes, io_u->offset,
+ io_u->ioprio, io_u->clat_prio_index);
if (td->flags & TD_F_PROFILE_OPS) {
struct prof_io_ops *ops = &td->prof_io_ops;
icd->error = ops->io_u_lat(td, tnsec);
}
- if (td->o.max_latency && tnsec > td->o.max_latency)
- lat_fatal(td, icd, tnsec, td->o.max_latency);
- if (td->o.latency_target && tnsec > td->o.latency_target) {
- if (lat_target_failed(td))
- lat_fatal(td, icd, tnsec, td->o.latency_target);
+ if (ddir_rw(idx)) {
+ if (td->o.max_latency[idx] && tnsec > td->o.max_latency[idx])
+ lat_fatal(td, io_u, icd, tnsec, td->o.max_latency[idx]);
+ if (td->o.latency_target && tnsec > td->o.latency_target) {
+ if (lat_target_failed(td))
+ lat_fatal(td, io_u, icd, tnsec, td->o.latency_target);
+ }
}
}
if (ddir_rw(idx)) {
if (!td->o.disable_clat) {
- add_clat_sample(td, idx, llnsec, bytes, io_u->offset);
+ add_clat_sample(td, idx, llnsec, bytes, io_u->offset,
+ io_u->ioprio, io_u->clat_prio_index);
io_u_mark_latency(td, llnsec);
}
dprint_io_u(io_u, "complete");
assert(io_u->flags & IO_U_F_FLIGHT);
- io_u_clear(td, io_u, IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
+ io_u_clear(td, io_u, IO_U_F_FLIGHT | IO_U_F_BUSY_OK | IO_U_F_PATTERN_DONE);
/*
* Mark IO ok to verify
if (io_u->error)
unlog_io_piece(td, io_u);
else {
- io_u->ipo->flags &= ~IP_F_IN_FLIGHT;
- write_barrier();
+ atomic_store_release(&io_u->ipo->flags,
+ io_u->ipo->flags & ~IP_F_IN_FLIGHT);
}
}
if (ddir_sync(ddir)) {
+ if (io_u->error)
+ goto error;
td->last_was_sync = true;
if (f) {
f->first_write = -1ULL;
td->last_ddir = ddir;
if (!io_u->error && ddir_rw(ddir)) {
- unsigned long long bytes = io_u->buflen - io_u->resid;
+ unsigned long long bytes = io_u->xfer_buflen - io_u->resid;
int ret;
+ /*
+ * Make sure we notice short IO from here, and requeue them
+ * appropriately!
+ */
+ if (bytes && io_u->resid) {
+ io_u->xfer_buflen = io_u->resid;
+ io_u->xfer_buf += bytes;
+ io_u->offset += bytes;
+ td->ts.short_io_u[io_u->ddir]++;
+ if (io_u->offset < io_u->file->real_file_size) {
+ requeue_io_u(td, io_u_ptr);
+ return;
+ }
+ }
+
td->io_blocks[ddir]++;
td->io_bytes[ddir] += bytes;
icd->error = ret;
}
} else if (io_u->error) {
+error:
icd->error = io_u->error;
io_u_log_error(td, io_u);
}
}
}
+static void io_u_update_bytes_done(struct thread_data *td,
+ struct io_completion_data *icd)
+{
+ int ddir;
+
+ if (td->runstate == TD_VERIFYING) {
+ td->bytes_verified += icd->bytes_done[DDIR_READ];
+ return;
+ }
+
+ for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
+ td->bytes_done[ddir] += icd->bytes_done[ddir];
+}
+
/*
* Complete a single io_u for the sync engines.
*/
int io_u_sync_complete(struct thread_data *td, struct io_u *io_u)
{
struct io_completion_data icd;
- int ddir;
init_icd(td, &icd, 1);
io_completed(td, &io_u, &icd);
return -1;
}
- for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
- td->bytes_done[ddir] += icd.bytes_done[ddir];
+ io_u_update_bytes_done(td, &icd);
return 0;
}
{
struct io_completion_data icd;
struct timespec *tvp = NULL;
- int ret, ddir;
+ int ret;
struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
dprint(FD_IO, "io_u_queued_complete: min=%d\n", min_evts);
return -1;
}
- for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
- td->bytes_done[ddir] += icd.bytes_done[ddir];
+ io_u_update_bytes_done(td, &icd);
return ret;
}
td = td->parent;
add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen,
- io_u->offset);
+ io_u->offset, io_u->ioprio);
}
}
static struct frand_state *get_buf_state(struct thread_data *td)
{
unsigned int v;
+ unsigned long long i;
if (!td->o.dedupe_percentage)
return &td->buf_state;
v = rand_between(&td->dedupe_state, 1, 100);
if (v <= td->o.dedupe_percentage)
- return &td->buf_state_prev;
+ switch (td->o.dedupe_mode) {
+ case DEDUPE_MODE_REPEAT:
+ /*
+ * The caller advances the returned frand_state.
+ * A copy of prev should be returned instead since
+ * a subsequent intention to generate a deduped buffer
+ * might result in generating a unique one
+ */
+ frand_copy(&td->buf_state_ret, &td->buf_state_prev);
+ return &td->buf_state_ret;
+ case DEDUPE_MODE_WORKING_SET:
+ i = rand_between(&td->dedupe_working_set_index_state, 0, td->num_unique_pages - 1);
+ frand_copy(&td->buf_state_ret, &td->dedupe_working_set_states[i]);
+ return &td->buf_state_ret;
+ default:
+ log_err("unexpected dedupe mode %u\n", td->o.dedupe_mode);
+ assert(0);
+ }
return &td->buf_state;
}
if (o->compress_percentage || o->dedupe_percentage) {
unsigned int perc = td->o.compress_percentage;
- struct frand_state *rs;
+ struct frand_state *rs = NULL;
unsigned long long left = max_bs;
unsigned long long this_write;
do {
- rs = get_buf_state(td);
+ /*
+ * Buffers are either entirely dedupe-able or not.
+ * If we choose to dedup, the buffer should undergo
+ * the same manipulation as the original write. Which
+ * means we should retrack the steps we took for compression
+ * as well.
+ */
+ if (!rs)
+ rs = get_buf_state(td);
min_write = min(min_write, left);
- if (perc) {
- this_write = min_not_zero(min_write,
- (unsigned long long) td->o.compress_chunk);
+ this_write = min_not_zero(min_write,
+ (unsigned long long) td->o.compress_chunk);
- fill_random_buf_percentage(rs, buf, perc,
- this_write, this_write,
- o->buffer_pattern,
- o->buffer_pattern_bytes);
- } else {
- fill_random_buf(rs, buf, min_write);
- this_write = min_write;
- }
+ fill_random_buf_percentage(rs, buf, perc,
+ this_write, this_write,
+ o->buffer_pattern,
+ o->buffer_pattern_bytes);
buf += this_write;
left -= this_write;
int ret;
if (io_u->ddir == DDIR_SYNC) {
+#ifdef CONFIG_FCNTL_SYNC
+ ret = fcntl(io_u->file->fd, F_FULLFSYNC);
+#else
ret = fsync(io_u->file->fd);
+#endif
} else if (io_u->ddir == DDIR_DATASYNC) {
#ifdef CONFIG_FDATASYNC
ret = fdatasync(io_u->file->fd);
return ret;
}
-int do_io_u_trim(const struct thread_data *td, struct io_u *io_u)
+int do_io_u_trim(struct thread_data *td, struct io_u *io_u)
{
#ifndef FIO_HAVE_TRIM
io_u->error = EINVAL;
struct fio_file *f = io_u->file;
int ret;
+ if (td->o.zone_mode == ZONE_MODE_ZBD) {
+ ret = zbd_do_io_u_trim(td, io_u);
+ if (ret == io_u_completed)
+ return io_u->xfer_buflen;
+ if (ret)
+ goto err;
+ }
+
ret = os_trim(f, io_u->offset, io_u->xfer_buflen);
if (!ret)
return io_u->xfer_buflen;
+err:
io_u->error = ret;
return 0;
#endif