Base the rwmix switch algorithm on io issues
authorJens Axboe <jens.axboe@oracle.com>
Mon, 7 Apr 2008 07:19:46 +0000 (09:19 +0200)
committerJens Axboe <jens.axboe@oracle.com>
Mon, 7 Apr 2008 07:19:46 +0000 (09:19 +0200)
The time based switch can be unfair sometimes, so just simplify it a bit
and use issues only.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
fio.h
io_u.c

diff --git a/fio.h b/fio.h
index 661dd49b2519f11e748451e98e3af5c2b48f6a08..036e62afe115316fed70751d5f2e3374da84485c 100644 (file)
--- a/fio.h
+++ b/fio.h
@@ -598,8 +598,7 @@ struct thread_data {
         * read/write mixed workload state
         */
        os_random_state_t rwmix_state;
-       unsigned long long rwmix_bytes;
-       struct timeval rwmix_switch;
+       unsigned long rwmix_issues;
        enum fio_ddir rwmix_ddir;
        unsigned int ddir_nr;
 
diff --git a/io_u.c b/io_u.c
index 7a23d2cf7fdd4a9127ec09298b8548cbca60588c..46d9e0b7f86a8d21f592cb473edba6eb126d99d9 100644 (file)
--- a/io_u.c
+++ b/io_u.c
@@ -243,7 +243,7 @@ static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
 
 static void set_rwmix_bytes(struct thread_data *td)
 {
-       unsigned long long rbytes;
+       unsigned long issues;
        unsigned int diff;
 
        /*
@@ -251,11 +251,11 @@ static void set_rwmix_bytes(struct thread_data *td)
         * buffered writes may issue a lot quicker than they complete,
         * whereas reads do not.
         */
-       rbytes = td->io_bytes[td->rwmix_ddir] - td->rwmix_bytes;
+       issues = td->io_issues[td->rwmix_ddir] - td->rwmix_issues;
        diff = td->o.rwmix[td->rwmix_ddir ^ 1];
 
-       td->rwmix_bytes = td->io_bytes[td->rwmix_ddir]
-                               + (rbytes * ((100 - diff)) / diff);
+       td->rwmix_issues = td->io_issues[td->rwmix_ddir]
+                               + (issues * ((100 - diff)) / diff);
 }
 
 static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
@@ -279,25 +279,10 @@ static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
 static enum fio_ddir get_rw_ddir(struct thread_data *td)
 {
        if (td_rw(td)) {
-               struct timeval now;
-               unsigned long elapsed;
-               unsigned int cycle;
-
-               fio_gettime(&now, NULL);
-               elapsed = mtime_since_now(&td->rwmix_switch);
-
-               /*
-                * if this is the first cycle, make it shorter
-                */
-               cycle = td->o.rwmixcycle;
-               if (!td->rwmix_bytes)
-                       cycle /= 10;
-
                /*
                 * Check if it's time to seed a new data direction.
                 */
-               if (elapsed >= cycle ||
-                   td->io_bytes[td->rwmix_ddir] >= td->rwmix_bytes) {
+               if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
                        unsigned long long max_bytes;
                        enum fio_ddir ddir;
 
@@ -310,11 +295,9 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td)
                        max_bytes = td->this_io_bytes[ddir];
                        if (max_bytes >=
                            (td->o.size * td->o.rwmix[ddir] / 100)) {
-                               if (!td->rw_end_set[ddir]) {
+                               if (!td->rw_end_set[ddir])
                                        td->rw_end_set[ddir] = 1;
-                                       memcpy(&td->rw_end[ddir], &now,
-                                               sizeof(now));
-                               }
+
                                ddir ^= 1;
                        }
 
@@ -322,7 +305,6 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td)
                                set_rwmix_bytes(td);
 
                        td->rwmix_ddir = ddir;
-                       memcpy(&td->rwmix_switch, &now, sizeof(now));
                }
                return td->rwmix_ddir;
        } else if (td_read(td))