Make it slightly easier to add Zone Append as fully fledged I/O type.
Signed-off-by: Alexey Dobriyan (SK hynix) <adobriyan@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
{
bool ret = false;
- if (td->bytes_done[DDIR_READ])
- ret |= __check_min_rate(td, now, DDIR_READ);
- if (td->bytes_done[DDIR_WRITE])
- ret |= __check_min_rate(td, now, DDIR_WRITE);
- if (td->bytes_done[DDIR_TRIM])
- ret |= __check_min_rate(td, now, DDIR_TRIM);
+ for_each_rw_ddir(ddir) {
+ if (td->bytes_done[ddir])
+ ret |= __check_min_rate(td, now, ddir);
+ }
return ret;
}
update_rusage_stat(td);
td->ts.total_run_time = mtime_since_now(&td->epoch);
- td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
- td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
- td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
+ for_each_rw_ddir(ddir) {
+ td->ts.io_bytes[ddir] = td->io_bytes[ddir];
+ }
if (td->o.verify_state_save && !(td->flags & TD_F_VSTATE_SAVED) &&
(td->o.verify != VERIFY_NONE && td_write(td)))
struct thread_data *td;
int i, unified_rw_rep;
uint64_t rate_time, disp_time, bw_avg_time, *eta_secs;
- unsigned long long io_bytes[DDIR_RWDIR_CNT];
- unsigned long long io_iops[DDIR_RWDIR_CNT];
+ unsigned long long io_bytes[DDIR_RWDIR_CNT] = {};
+ unsigned long long io_iops[DDIR_RWDIR_CNT] = {};
struct timespec now;
static unsigned long long rate_io_bytes[DDIR_RWDIR_CNT];
je->elapsed_sec = (mtime_since_genesis() + 999) / 1000;
- io_bytes[DDIR_READ] = io_bytes[DDIR_WRITE] = io_bytes[DDIR_TRIM] = 0;
- io_iops[DDIR_READ] = io_iops[DDIR_WRITE] = io_iops[DDIR_TRIM] = 0;
bw_avg_time = ULONG_MAX;
unified_rw_rep = 0;
for_each_td(td, i) {
calc_rate(unified_rw_rep, rate_time, io_bytes, rate_io_bytes,
je->rate);
memcpy(&rate_prev_time, &now, sizeof(now));
- add_agg_sample(sample_val(je->rate[DDIR_READ]), DDIR_READ, 0, 0);
- add_agg_sample(sample_val(je->rate[DDIR_WRITE]), DDIR_WRITE, 0, 0);
- add_agg_sample(sample_val(je->rate[DDIR_TRIM]), DDIR_TRIM, 0, 0);
+ for_each_rw_ddir(ddir) {
+ add_agg_sample(sample_val(je->rate[ddir]), ddir, 0, 0);
+ }
}
disp_time = mtime_since(&disp_prev_time, &now);
{
int ret = 0;
- if (td->o.rate[DDIR_READ] || td->o.rate_iops[DDIR_READ])
- ret = __setup_rate(td, DDIR_READ);
- if (td->o.rate[DDIR_WRITE] || td->o.rate_iops[DDIR_WRITE])
- ret |= __setup_rate(td, DDIR_WRITE);
- if (td->o.rate[DDIR_TRIM] || td->o.rate_iops[DDIR_TRIM])
- ret |= __setup_rate(td, DDIR_TRIM);
-
+ for_each_rw_ddir(ddir) {
+ if (td->o.rate[ddir] || td->o.rate_iops[ddir]) {
+ ret |= __setup_rate(td, ddir);
+ }
+ }
return ret;
}
if (td_read(td))
o->overwrite = 1;
- if (!o->min_bs[DDIR_READ])
- o->min_bs[DDIR_READ] = o->bs[DDIR_READ];
- if (!o->max_bs[DDIR_READ])
- o->max_bs[DDIR_READ] = o->bs[DDIR_READ];
- if (!o->min_bs[DDIR_WRITE])
- o->min_bs[DDIR_WRITE] = o->bs[DDIR_WRITE];
- if (!o->max_bs[DDIR_WRITE])
- o->max_bs[DDIR_WRITE] = o->bs[DDIR_WRITE];
- if (!o->min_bs[DDIR_TRIM])
- o->min_bs[DDIR_TRIM] = o->bs[DDIR_TRIM];
- if (!o->max_bs[DDIR_TRIM])
- o->max_bs[DDIR_TRIM] = o->bs[DDIR_TRIM];
-
- o->rw_min_bs = min(o->min_bs[DDIR_READ], o->min_bs[DDIR_WRITE]);
- o->rw_min_bs = min(o->min_bs[DDIR_TRIM], o->rw_min_bs);
+ for_each_rw_ddir(ddir) {
+ if (!o->min_bs[ddir])
+ o->min_bs[ddir] = o->bs[ddir];
+ if (!o->max_bs[ddir])
+ o->max_bs[ddir] = o->bs[ddir];
+ }
+
+ o->rw_min_bs = -1;
+ for_each_rw_ddir(ddir) {
+ o->rw_min_bs = min(o->rw_min_bs, o->min_bs[ddir]);
+ }
/*
* For random IO, allow blockalign offset other than min_bs.
*/
- if (!o->ba[DDIR_READ] || !td_random(td))
- o->ba[DDIR_READ] = o->min_bs[DDIR_READ];
- if (!o->ba[DDIR_WRITE] || !td_random(td))
- o->ba[DDIR_WRITE] = o->min_bs[DDIR_WRITE];
- if (!o->ba[DDIR_TRIM] || !td_random(td))
- o->ba[DDIR_TRIM] = o->min_bs[DDIR_TRIM];
+ for_each_rw_ddir(ddir) {
+ if (!o->ba[ddir] || !td_random(td))
+ o->ba[ddir] = o->min_bs[ddir];
+ }
if ((o->ba[DDIR_READ] != o->min_bs[DDIR_READ] ||
o->ba[DDIR_WRITE] != o->min_bs[DDIR_WRITE] ||
log_err("fio: rate and rate_iops are mutually exclusive\n");
ret |= 1;
}
- if ((o->rate[DDIR_READ] && (o->rate[DDIR_READ] < o->ratemin[DDIR_READ])) ||
- (o->rate[DDIR_WRITE] && (o->rate[DDIR_WRITE] < o->ratemin[DDIR_WRITE])) ||
- (o->rate[DDIR_TRIM] && (o->rate[DDIR_TRIM] < o->ratemin[DDIR_TRIM])) ||
- (o->rate_iops[DDIR_READ] && (o->rate_iops[DDIR_READ] < o->rate_iops_min[DDIR_READ])) ||
- (o->rate_iops[DDIR_WRITE] && (o->rate_iops[DDIR_WRITE] < o->rate_iops_min[DDIR_WRITE])) ||
- (o->rate_iops[DDIR_TRIM] && (o->rate_iops[DDIR_TRIM] < o->rate_iops_min[DDIR_TRIM]))) {
- log_err("fio: minimum rate exceeds rate\n");
- ret |= 1;
+ for_each_rw_ddir(ddir) {
+ if ((o->rate[ddir] && (o->rate[ddir] < o->ratemin[ddir])) ||
+ (o->rate_iops[ddir] && (o->rate_iops[ddir] < o->rate_iops_min[ddir]))) {
+ log_err("fio: minimum rate exceeds rate, ddir %d\n", +ddir);
+ ret |= 1;
+ }
}
if (!o->timeout && o->time_based) {
DDIR_RWDIR_SYNC_CNT = 4,
};
+#define for_each_rw_ddir(ddir) for (enum fio_ddir ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
+
static inline const char *io_ddir_name(enum fio_ddir ddir)
{
static const char *name[] = { "read", "write", "trim", "sync",
if (strlen(ts->description))
log_buf(out, " Description : [%s]\n", ts->description);
- if (ts->io_bytes[DDIR_READ])
- show_ddir_status(rs, ts, DDIR_READ, out);
- if (ts->io_bytes[DDIR_WRITE])
- show_ddir_status(rs, ts, DDIR_WRITE, out);
- if (ts->io_bytes[DDIR_TRIM])
- show_ddir_status(rs, ts, DDIR_TRIM, out);
+ for_each_rw_ddir(ddir) {
+ if (ts->io_bytes[ddir])
+ show_ddir_status(rs, ts, ddir, out);
+ }
show_latencies(ts, out);
for_each_td(td, i) {
td->update_rusage = 1;
- td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
- td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
- td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
+ for_each_rw_ddir(ddir) {
+ td->ts.io_bytes[ddir] = td->io_bytes[ddir];
+ }
td->ts.total_run_time = mtime_since(&td->epoch, &ts);
rt[i] = mtime_since(&td->start, &ts);