if (max_size > f->real_file_size)
max_size = f->real_file_size;
+ if (td->o.zone_range)
+ max_size = td->o.zone_range;
+
max_blocks = max_size / (unsigned long long) td->o.ba[ddir];
if (!max_blocks)
return 0;
if (minbs == maxbs)
return minbs;
+ /*
+ * If we can't satisfy the min block size from here, then fail
+ */
+ if (!io_u_fits(td, io_u, minbs))
+ return 0;
+
if (td->o.use_os_rand)
rand_max = OS_RAND_MAX;
else
/*
* We are going to sleep, ensure that we flush anything pending as
- * not to skew our latency numbers
+ * not to skew our latency numbers.
+ *
+ * Changed to only monitor 'in flight' requests here instead of the
+ * td->cur_depth, b/c td->cur_depth does not accurately represent
+ * io's that have been actually submitted to an async engine,
+ * and cur_depth is meaningless for sync engines.
*/
- if (td->cur_depth) {
+ if (td->io_u_in_flight) {
int fio_unused ret;
- ret = io_u_queued_complete(td, td->cur_depth, NULL);
+ ret = io_u_queued_complete(td, td->io_u_in_flight, NULL);
}
fio_gettime(&t, NULL);
{
td_io_u_lock(td);
- io_u->flags |= IO_U_F_FREE;
- io_u->flags &= ~IO_U_F_FREE_DEF;
-
- if (io_u->file)
+ if (io_u->file && !(io_u->flags & IO_U_F_FREE_DEF))
put_file_log(td, io_u->file);
-
io_u->file = NULL;
+ io_u->flags &= ~IO_U_F_FREE_DEF;
+ io_u->flags |= IO_U_F_FREE;
+
if (io_u->flags & IO_U_F_IN_CUR_DEPTH)
td->cur_depth--;
flist_del_init(&io_u->list);
/*
* See if it's time to switch to a new zone
*/
- if (td->zone_bytes >= td->o.zone_size) {
+ if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
td->zone_bytes = 0;
- io_u->file->last_pos += td->o.zone_skip;
+ io_u->file->file_offset += td->o.zone_range + td->o.zone_skip;
+ io_u->file->last_pos = io_u->file->file_offset;
td->io_skip_bytes += td->o.zone_skip;
}
p = io_u->xfer_buf;
boffset = io_u->offset;
+ io_u->buf_filled_len = 0;
for (i = 0; i < nr_blocks; i++) {
/*
if (!td->o.disable_bw)
add_bw_sample(td, idx, bytes, &icd->time);
+
+ add_iops_sample(td, idx, &icd->time);
+}
+
+static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
+{
+ unsigned long long secs, remainder, bps, bytes;
+ bytes = td->this_io_bytes[ddir];
+ bps = td->rate_bps[ddir];
+ secs = bytes / bps;
+ remainder = bytes % bps;
+ return remainder * 1000000 / bps + secs * 1000000;
}
static void io_completed(struct thread_data *td, struct io_u *io_u,
int ret;
td->io_blocks[idx]++;
+ td->this_io_blocks[idx]++;
td->io_bytes[idx] += bytes;
td->this_io_bytes[idx] += bytes;
}
}
- if (ramp_time_over(td)) {
+ if (ramp_time_over(td) && (td->runstate == TD_RUNNING ||
+ td->runstate == TD_VERIFYING)) {
account_io_completion(td, io_u, icd, idx, bytes);
if (__should_check_rate(td, idx)) {
td->rate_pending_usleep[idx] =
- ((td->this_io_bytes[idx] *
- td->rate_nsec_cycle[idx]) / 1000 -
+ (usec_for_io(td, idx) -
utime_since_now(&td->start));
}
- if (__should_check_rate(td, idx ^ 1))
+ if (__should_check_rate(td, odx))
td->rate_pending_usleep[odx] =
- ((td->this_io_bytes[odx] *
- td->rate_nsec_cycle[odx]) / 1000 -
+ (usec_for_io(td, odx) -
utime_since_now(&td->start));
}
icd->error = io_u->error;
io_u_log_error(td, io_u);
}
- if (td->o.continue_on_error && icd->error &&
- td_non_fatal_error(icd->error)) {
+ if (icd->error && td_non_fatal_error(icd->error) &&
+ (td->o.continue_on_error & td_error_type(io_u->ddir, icd->error))) {
/*
* If there is a non_fatal error, then add to the error count
* and clear all the errors.