bytes_total = td->fill_device_size;
}
- if (td->o.zone_size && td->o.zone_skip && bytes_total) {
+ /*
+ * If io_size is set, bytes_total is an exact value that does not need
+ * adjustment.
+ */
+ if (td->o.zone_size && td->o.zone_skip && bytes_total &&
+ !fio_option_is_set(&td->o, io_size)) {
unsigned int nr_zones;
uint64_t zone_bytes;
- zone_bytes = bytes_total + td->o.zone_size + td->o.zone_skip;
- nr_zones = (zone_bytes - 1) / (td->o.zone_size + td->o.zone_skip);
+ /*
+ * Calculate the upper bound of the number of zones that will
+ * be processed, including skipped bytes between zones. If this
+ * is larger than total_io_size (e.g. when --io_size or --size
+ * specify a small value), use the lower bound to avoid
+ * adjustments to a negative value that would result in a very
+ * large bytes_total and an incorrect eta.
+ */
+ zone_bytes = td->o.zone_size + td->o.zone_skip;
+ nr_zones = (bytes_total + zone_bytes - 1) / zone_bytes;
+ if (bytes_total < nr_zones * td->o.zone_skip)
+ nr_zones = bytes_total / zone_bytes;
bytes_total -= nr_zones * td->o.zone_skip;
}
calc_rate(unified_rw_rep, rate_time, io_bytes, rate_io_bytes,
je->rate);
memcpy(&rate_prev_time, &now, sizeof(now));
- add_agg_sample(sample_val(je->rate[DDIR_READ]), DDIR_READ, 0);
- add_agg_sample(sample_val(je->rate[DDIR_WRITE]), DDIR_WRITE, 0);
- add_agg_sample(sample_val(je->rate[DDIR_TRIM]), DDIR_TRIM, 0);
+ add_agg_sample(sample_val(je->rate[DDIR_READ]), DDIR_READ, 0, 0);
+ add_agg_sample(sample_val(je->rate[DDIR_WRITE]), DDIR_WRITE, 0, 0);
+ add_agg_sample(sample_val(je->rate[DDIR_TRIM]), DDIR_TRIM, 0, 0);
}
disp_time = mtime_since(&disp_prev_time, &now);
void print_status_init(int thr_number)
{
+ struct jobs_eta_packed jep;
+
+ compiletime_assert(sizeof(struct jobs_eta) == sizeof(jep), "jobs_eta");
+
DRD_IGNORE_VAR(__run_str);
__run_str[thr_number] = 'P';
update_condensed_str(__run_str, run_str);