fio: add for_each_rw_ddir() macro
authorAlexey Dobriyan <adobriyan@gmail.com>
Thu, 13 Aug 2020 16:33:07 +0000 (19:33 +0300)
committerJens Axboe <axboe@kernel.dk>
Mon, 17 Aug 2020 04:01:22 +0000 (21:01 -0700)
Make it slightly easier to add Zone Append as fully fledged I/O type.

Signed-off-by: Alexey Dobriyan (SK hynix) <adobriyan@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
backend.c
eta.c
init.c
io_ddir.h
stat.c

index 0e454cdd1e9a4f2979320e7b913cc5f242d4b362..a4367672d68ce80c88dc828245c2467aa1f6917c 100644 (file)
--- a/backend.c
+++ b/backend.c
@@ -223,12 +223,10 @@ static bool check_min_rate(struct thread_data *td, struct timespec *now)
 {
        bool ret = false;
 
-       if (td->bytes_done[DDIR_READ])
-               ret |= __check_min_rate(td, now, DDIR_READ);
-       if (td->bytes_done[DDIR_WRITE])
-               ret |= __check_min_rate(td, now, DDIR_WRITE);
-       if (td->bytes_done[DDIR_TRIM])
-               ret |= __check_min_rate(td, now, DDIR_TRIM);
+       for_each_rw_ddir(ddir) {
+               if (td->bytes_done[ddir])
+                       ret |= __check_min_rate(td, now, ddir);
+       }
 
        return ret;
 }
@@ -1876,9 +1874,9 @@ static void *thread_main(void *data)
 
        update_rusage_stat(td);
        td->ts.total_run_time = mtime_since_now(&td->epoch);
-       td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
-       td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
-       td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
+       for_each_rw_ddir(ddir) {
+               td->ts.io_bytes[ddir] = td->io_bytes[ddir];
+       }
 
        if (td->o.verify_state_save && !(td->flags & TD_F_VSTATE_SAVED) &&
            (td->o.verify != VERIFY_NONE && td_write(td)))
diff --git a/eta.c b/eta.c
index 13f61ba43b5b9c1c3788b4be33fa88e22e6fef71..e8c727809e39977dc2886a4e7f2730142db38eb5 100644 (file)
--- a/eta.c
+++ b/eta.c
@@ -383,8 +383,8 @@ bool calc_thread_status(struct jobs_eta *je, int force)
        struct thread_data *td;
        int i, unified_rw_rep;
        uint64_t rate_time, disp_time, bw_avg_time, *eta_secs;
-       unsigned long long io_bytes[DDIR_RWDIR_CNT];
-       unsigned long long io_iops[DDIR_RWDIR_CNT];
+       unsigned long long io_bytes[DDIR_RWDIR_CNT] = {};
+       unsigned long long io_iops[DDIR_RWDIR_CNT] = {};
        struct timespec now;
 
        static unsigned long long rate_io_bytes[DDIR_RWDIR_CNT];
@@ -413,8 +413,6 @@ bool calc_thread_status(struct jobs_eta *je, int force)
 
        je->elapsed_sec = (mtime_since_genesis() + 999) / 1000;
 
-       io_bytes[DDIR_READ] = io_bytes[DDIR_WRITE] = io_bytes[DDIR_TRIM] = 0;
-       io_iops[DDIR_READ] = io_iops[DDIR_WRITE] = io_iops[DDIR_TRIM] = 0;
        bw_avg_time = ULONG_MAX;
        unified_rw_rep = 0;
        for_each_td(td, i) {
@@ -509,9 +507,9 @@ bool calc_thread_status(struct jobs_eta *je, int force)
                calc_rate(unified_rw_rep, rate_time, io_bytes, rate_io_bytes,
                                je->rate);
                memcpy(&rate_prev_time, &now, sizeof(now));
-               add_agg_sample(sample_val(je->rate[DDIR_READ]), DDIR_READ, 0, 0);
-               add_agg_sample(sample_val(je->rate[DDIR_WRITE]), DDIR_WRITE, 0, 0);
-               add_agg_sample(sample_val(je->rate[DDIR_TRIM]), DDIR_TRIM, 0, 0);
+               for_each_rw_ddir(ddir) {
+                       add_agg_sample(sample_val(je->rate[ddir]), ddir, 0, 0);
+               }
        }
 
        disp_time = mtime_since(&disp_prev_time, &now);
diff --git a/init.c b/init.c
index 84325f1e86fc800cdef58140d677004b4eacf9f1..6ff7c68d72de1329ceb2ab8e84709dcd3519f1e2 100644 (file)
--- a/init.c
+++ b/init.c
@@ -564,13 +564,11 @@ static int setup_rate(struct thread_data *td)
 {
        int ret = 0;
 
-       if (td->o.rate[DDIR_READ] || td->o.rate_iops[DDIR_READ])
-               ret = __setup_rate(td, DDIR_READ);
-       if (td->o.rate[DDIR_WRITE] || td->o.rate_iops[DDIR_WRITE])
-               ret |= __setup_rate(td, DDIR_WRITE);
-       if (td->o.rate[DDIR_TRIM] || td->o.rate_iops[DDIR_TRIM])
-               ret |= __setup_rate(td, DDIR_TRIM);
-
+       for_each_rw_ddir(ddir) {
+               if (td->o.rate[ddir] || td->o.rate_iops[ddir]) {
+                       ret |= __setup_rate(td, ddir);
+               }
+       }
        return ret;
 }
 
@@ -662,31 +660,25 @@ static int fixup_options(struct thread_data *td)
        if (td_read(td))
                o->overwrite = 1;
 
-       if (!o->min_bs[DDIR_READ])
-               o->min_bs[DDIR_READ] = o->bs[DDIR_READ];
-       if (!o->max_bs[DDIR_READ])
-               o->max_bs[DDIR_READ] = o->bs[DDIR_READ];
-       if (!o->min_bs[DDIR_WRITE])
-               o->min_bs[DDIR_WRITE] = o->bs[DDIR_WRITE];
-       if (!o->max_bs[DDIR_WRITE])
-               o->max_bs[DDIR_WRITE] = o->bs[DDIR_WRITE];
-       if (!o->min_bs[DDIR_TRIM])
-               o->min_bs[DDIR_TRIM] = o->bs[DDIR_TRIM];
-       if (!o->max_bs[DDIR_TRIM])
-               o->max_bs[DDIR_TRIM] = o->bs[DDIR_TRIM];
-
-       o->rw_min_bs = min(o->min_bs[DDIR_READ], o->min_bs[DDIR_WRITE]);
-       o->rw_min_bs = min(o->min_bs[DDIR_TRIM], o->rw_min_bs);
+       for_each_rw_ddir(ddir) {
+               if (!o->min_bs[ddir])
+                       o->min_bs[ddir] = o->bs[ddir];
+               if (!o->max_bs[ddir])
+                       o->max_bs[ddir] = o->bs[ddir];
+       }
+
+       o->rw_min_bs = -1;
+       for_each_rw_ddir(ddir) {
+               o->rw_min_bs = min(o->rw_min_bs, o->min_bs[ddir]);
+       }
 
        /*
         * For random IO, allow blockalign offset other than min_bs.
         */
-       if (!o->ba[DDIR_READ] || !td_random(td))
-               o->ba[DDIR_READ] = o->min_bs[DDIR_READ];
-       if (!o->ba[DDIR_WRITE] || !td_random(td))
-               o->ba[DDIR_WRITE] = o->min_bs[DDIR_WRITE];
-       if (!o->ba[DDIR_TRIM] || !td_random(td))
-               o->ba[DDIR_TRIM] = o->min_bs[DDIR_TRIM];
+       for_each_rw_ddir(ddir) {
+               if (!o->ba[ddir] || !td_random(td))
+                       o->ba[ddir] = o->min_bs[ddir];
+       }
 
        if ((o->ba[DDIR_READ] != o->min_bs[DDIR_READ] ||
            o->ba[DDIR_WRITE] != o->min_bs[DDIR_WRITE] ||
@@ -765,14 +757,12 @@ static int fixup_options(struct thread_data *td)
                log_err("fio: rate and rate_iops are mutually exclusive\n");
                ret |= 1;
        }
-       if ((o->rate[DDIR_READ] && (o->rate[DDIR_READ] < o->ratemin[DDIR_READ])) ||
-           (o->rate[DDIR_WRITE] && (o->rate[DDIR_WRITE] < o->ratemin[DDIR_WRITE])) ||
-           (o->rate[DDIR_TRIM] && (o->rate[DDIR_TRIM] < o->ratemin[DDIR_TRIM])) ||
-           (o->rate_iops[DDIR_READ] && (o->rate_iops[DDIR_READ] < o->rate_iops_min[DDIR_READ])) ||
-           (o->rate_iops[DDIR_WRITE] && (o->rate_iops[DDIR_WRITE] < o->rate_iops_min[DDIR_WRITE])) ||
-           (o->rate_iops[DDIR_TRIM] && (o->rate_iops[DDIR_TRIM] < o->rate_iops_min[DDIR_TRIM]))) {
-               log_err("fio: minimum rate exceeds rate\n");
-               ret |= 1;
+       for_each_rw_ddir(ddir) {
+               if ((o->rate[ddir] && (o->rate[ddir] < o->ratemin[ddir])) ||
+                   (o->rate_iops[ddir] && (o->rate_iops[ddir] < o->rate_iops_min[ddir]))) {
+                       log_err("fio: minimum rate exceeds rate, ddir %d\n", +ddir);
+                       ret |= 1;
+               }
        }
 
        if (!o->timeout && o->time_based) {
index deaa8b5a3705e3c99e435a64b7b44da6c9b4574b..a42da97a335cd046b82c44c396695be471341b8e 100644 (file)
--- a/io_ddir.h
+++ b/io_ddir.h
@@ -16,6 +16,8 @@ enum fio_ddir {
        DDIR_RWDIR_SYNC_CNT = 4,
 };
 
+#define for_each_rw_ddir(ddir) for (enum fio_ddir ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
+
 static inline const char *io_ddir_name(enum fio_ddir ddir)
 {
        static const char *name[] = { "read", "write", "trim", "sync",
diff --git a/stat.c b/stat.c
index 23657cee6a7702c3aacd81783acb1516b3960a1b..7f987c7f41370745b0b50acb992f21782ec4b5ad 100644 (file)
--- a/stat.c
+++ b/stat.c
@@ -1078,12 +1078,10 @@ static void show_thread_status_normal(struct thread_stat *ts,
        if (strlen(ts->description))
                log_buf(out, "  Description  : [%s]\n", ts->description);
 
-       if (ts->io_bytes[DDIR_READ])
-               show_ddir_status(rs, ts, DDIR_READ, out);
-       if (ts->io_bytes[DDIR_WRITE])
-               show_ddir_status(rs, ts, DDIR_WRITE, out);
-       if (ts->io_bytes[DDIR_TRIM])
-               show_ddir_status(rs, ts, DDIR_TRIM, out);
+       for_each_rw_ddir(ddir) {
+               if (ts->io_bytes[ddir])
+                       show_ddir_status(rs, ts, ddir, out);
+       }
 
        show_latencies(ts, out);
 
@@ -2315,9 +2313,9 @@ void __show_running_run_stats(void)
 
        for_each_td(td, i) {
                td->update_rusage = 1;
-               td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
-               td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
-               td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
+               for_each_rw_ddir(ddir) {
+                       td->ts.io_bytes[ddir] = td->io_bytes[ddir];
+               }
                td->ts.total_run_time = mtime_since(&td->epoch, &ts);
 
                rt[i] = mtime_since(&td->start, &ts);