loop_cache_invalidate(td, f);
}
+ /*
+ * If we reach the end for a rw-io-size based run, reset us back to 0
+ * and invalidate the cache, if we need to.
+ */
+ if (td_rw(td) && o->io_size > o->size) {
+ if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f)) {
+ f->last_pos[ddir] = f->file_offset;
+ loop_cache_invalidate(td, f);
+ }
+ }
+
if (f->last_pos[ddir] < f->real_file_size) {
uint64_t pos;
* check if the usec is capable of taking negative values
*/
if (now > td->o.timeout) {
- ddir = DDIR_INVAL;
+ ddir = DDIR_TIMEOUT;
return ddir;
}
usec = td->o.timeout - now;
now = utime_since_now(&td->epoch);
if ((td->o.timeout && (now > td->o.timeout)) || td->terminate)
- ddir = DDIR_INVAL;
+ ddir = DDIR_TIMEOUT;
return ddir;
}
* See if it's time to fsync/fdatasync/sync_file_range first,
* and if not then move on to check regular I/Os.
*/
- if (should_fsync(td)) {
+ if (should_fsync(td) && td->last_ddir_issued == DDIR_WRITE) {
if (td->o.fsync_blocks && td->io_issues[DDIR_WRITE] &&
!(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks))
return DDIR_SYNC;
if (td->o.zone_mode == ZONE_MODE_ZBD)
ddir = zbd_adjust_ddir(td, io_u, ddir);
- if (td_trimwrite(td)) {
+ if (td_trimwrite(td) && !ddir_sync(ddir)) {
struct fio_file *f = io_u->file;
if (f->last_start[DDIR_WRITE] == f->last_start[DDIR_TRIM])
ddir = DDIR_TRIM;
fio_file_reset(td, f);
}
+static int fill_multi_range_io_u(struct thread_data *td, struct io_u *io_u)
+{
+ bool is_random;
+ uint64_t buflen, i = 0;
+ struct trim_range *range;
+ struct fio_file *f = io_u->file;
+ uint8_t *buf;
+
+ buf = io_u->buf;
+ buflen = 0;
+
+ while (i < td->o.num_range) {
+ range = (struct trim_range *)buf;
+ if (get_next_offset(td, io_u, &is_random)) {
+ dprint(FD_IO, "io_u %p, failed getting offset\n",
+ io_u);
+ break;
+ }
+
+ io_u->buflen = get_next_buflen(td, io_u, is_random);
+ if (!io_u->buflen) {
+ dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
+ break;
+ }
+
+ if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
+ dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%llx exceeds file size=0x%llx\n",
+ io_u,
+ (unsigned long long) io_u->offset, io_u->buflen,
+ (unsigned long long) io_u->file->real_file_size);
+ break;
+ }
+
+ range->start = io_u->offset;
+ range->len = io_u->buflen;
+ buflen += io_u->buflen;
+ f->last_start[io_u->ddir] = io_u->offset;
+ f->last_pos[io_u->ddir] = io_u->offset + range->len;
+
+ buf += sizeof(struct trim_range);
+ i++;
+
+ if (td_random(td) && file_randommap(td, io_u->file))
+ mark_random_map(td, io_u, io_u->offset, io_u->buflen);
+ dprint_io_u(io_u, "fill");
+ }
+ if (buflen) {
+ /*
+ * Set buffer length as overall trim length for this IO, and
+ * tell the ioengine about the number of ranges to be trimmed.
+ */
+ io_u->buflen = buflen;
+ io_u->number_trim = i;
+ return 0;
+ }
+
+ return 1;
+}
+
static int fill_io_u(struct thread_data *td, struct io_u *io_u)
{
bool is_random;
set_rw_ddir(td, io_u);
- if (io_u->ddir == DDIR_INVAL) {
+ if (io_u->ddir == DDIR_INVAL || io_u->ddir == DDIR_TIMEOUT) {
dprint(FD_IO, "invalid direction received ddir = %d", io_u->ddir);
return 1;
}
else if (td->o.zone_mode == ZONE_MODE_ZBD)
setup_zbd_zone_mode(td, io_u);
- /*
- * No log, let the seq/rand engine retrieve the next buflen and
- * position.
- */
- if (get_next_offset(td, io_u, &is_random)) {
- dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
- return 1;
- }
+ if (multi_range_trim(td, io_u)) {
+ if (fill_multi_range_io_u(td, io_u))
+ return 1;
+ } else {
+ /*
+ * No log, let the seq/rand engine retrieve the next buflen and
+ * position.
+ */
+ if (get_next_offset(td, io_u, &is_random)) {
+ dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
+ return 1;
+ }
- io_u->buflen = get_next_buflen(td, io_u, is_random);
- if (!io_u->buflen) {
- dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
- return 1;
+ io_u->buflen = get_next_buflen(td, io_u, is_random);
+ if (!io_u->buflen) {
+ dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
+ return 1;
+ }
}
-
offset = io_u->offset;
+
if (td->o.zone_mode == ZONE_MODE_ZBD) {
ret = zbd_adjust_block(td, io_u);
if (ret == io_u_eof) {
}
}
- if (td->o.fdp)
- fdp_fill_dspec_data(td, io_u);
+ if (td->o.dp_type != FIO_DP_NONE)
+ dp_fill_dspec_data(td, io_u);
if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
dprint(FD_IO, "io_u %p, off=0x%llx + len=0x%llx exceeds file size=0x%llx\n",
/*
* mark entry before potentially trimming io_u
*/
- if (td_random(td) && file_randommap(td, io_u->file))
+ if (!multi_range_trim(td, io_u) && td_random(td) && file_randommap(td, io_u->file))
io_u->buflen = mark_random_map(td, io_u, offset, io_u->buflen);
out:
- dprint_io_u(io_u, "fill");
+ if (!multi_range_trim(td, io_u))
+ dprint_io_u(io_u, "fill");
io_u->verify_offset = io_u->offset;
td->zone_bytes += io_u->buflen;
return 0;
put_file_log(td, f);
td_io_close_file(td, f);
io_u->file = NULL;
+
+ if (io_u->ddir == DDIR_TIMEOUT)
+ return 1;
+
if (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)
fio_file_reset(td, f);
else {
if (get_next_trim(td, io_u))
return true;
} else if (!(td->io_hist_len % td->o.trim_backlog) &&
- td->last_ddir != DDIR_READ) {
+ td->last_ddir_completed != DDIR_READ) {
td->trim_batch = td->o.trim_batch;
if (!td->trim_batch)
td->trim_batch = td->o.trim_backlog;
if (td->verify_batch)
get_verify = 1;
else if (!(td->io_hist_len % td->o.verify_backlog) &&
- td->last_ddir != DDIR_READ) {
+ td->last_ddir_completed != DDIR_READ) {
td->verify_batch = td->o.verify_batch;
if (!td->verify_batch)
td->verify_batch = td->o.verify_backlog;
assert(fio_file_open(f));
- if (ddir_rw(io_u->ddir)) {
+ if (ddir_rw(io_u->ddir) && !multi_range_trim(td, io_u)) {
if (!io_u->buflen && !td_ioengine_flagged(td, FIO_NOIO)) {
dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
goto err_put;
io_u->buflen);
} else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) &&
!(td->flags & TD_F_COMPRESS) &&
- !(td->flags & TD_F_DO_VERIFY))
+ !(td->flags & TD_F_DO_VERIFY)) {
do_scramble = 1;
+ }
} else if (io_u->ddir == DDIR_READ) {
/*
* Reset the buf_filled parameters so next time if the
io_ddir_name(io_u->ddir),
io_u->offset, io_u->xfer_buflen);
+ zbd_log_err(td, io_u);
+
if (td->io_ops->errdetails) {
- char *err = td->io_ops->errdetails(io_u);
+ char *err = td->io_ops->errdetails(td, io_u);
log_err("fio: %s\n", err);
free(err);
unsigned long long tnsec;
tnsec = ntime_since(&io_u->start_time, &icd->time);
- add_lat_sample(td, idx, tnsec, bytes, io_u->offset,
- io_u->ioprio, io_u->clat_prio_index);
+ add_lat_sample(td, idx, tnsec, bytes, io_u);
if (td->flags & TD_F_PROFILE_OPS) {
struct prof_io_ops *ops = &td->prof_io_ops;
if (ddir_rw(idx)) {
if (!td->o.disable_clat) {
- add_clat_sample(td, idx, llnsec, bytes, io_u->offset,
- io_u->ioprio, io_u->clat_prio_index);
+ add_clat_sample(td, idx, llnsec, bytes, io_u);
io_u_mark_latency(td, llnsec);
}
if (ddir_sync(ddir)) {
if (io_u->error)
goto error;
- td->last_was_sync = true;
if (f) {
f->first_write = -1ULL;
f->last_write = -1ULL;
return;
}
- td->last_was_sync = false;
- td->last_ddir = ddir;
+ td->last_ddir_completed = ddir;
if (!io_u->error && ddir_rw(ddir)) {
unsigned long long bytes = io_u->xfer_buflen - io_u->resid;
if (td->runstate == TD_VERIFYING) {
td->bytes_verified += icd->bytes_done[DDIR_READ];
- return;
+ if (td_write(td))
+ return;
}
for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
void io_u_queued(struct thread_data *td, struct io_u *io_u)
{
if (!td->o.disable_slat && ramp_time_over(td) && td->o.stats) {
- unsigned long slat_time;
-
- slat_time = ntime_since(&io_u->start_time, &io_u->issue_time);
-
if (td->parent)
td = td->parent;
-
- add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen,
- io_u->offset, io_u->ioprio);
+ add_slat_sample(td, io_u);
}
}