Improve rwmix states
authorJens Axboe <jens.axboe@oracle.com>
Wed, 21 Mar 2007 13:05:39 +0000 (14:05 +0100)
committerJens Axboe <jens.axboe@oracle.com>
Wed, 21 Mar 2007 13:05:39 +0000 (14:05 +0100)
We didn't use to look at the previous bytes done for the
data directions, which skewed the mix for buffered IO.
Fix that up.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
fio.h
init.c
io_u.c
options.c

diff --git a/fio.h b/fio.h
index ab8f1d9a28e13162d4b4b0c2e32d465e0d582fe7..1fb01d9522f5f2d4bd20babd81cc227bb87238e4 100644 (file)
--- a/fio.h
+++ b/fio.h
@@ -381,8 +381,7 @@ struct thread_options {
        unsigned int iolog;
        unsigned int read_iolog;
        unsigned int rwmixcycle;
-       unsigned int rwmixread;
-       unsigned int rwmixwrite;
+       unsigned int rwmix[2];
        unsigned int nice;
        unsigned int file_service_type;
        unsigned int group_reporting;
diff --git a/init.c b/init.c
index b4b4abe2a8d3119edd30de60b7ab15c57ffe2a7c..911ee32f995d4c6fd6562a2cde3194eaa1beca8c 100644 (file)
--- a/init.c
+++ b/init.c
@@ -171,8 +171,8 @@ static int fixup_options(struct thread_data *td)
 {
        struct thread_options *o = &td->o;
 
-       if (!o->rwmixread && o->rwmixwrite)
-               o->rwmixread = 100 - o->rwmixwrite;
+       if (o->rwmix[DDIR_READ] + o->rwmix[DDIR_WRITE] > 100)
+               o->rwmix[DDIR_WRITE] = 100 - o->rwmix[DDIR_READ];
 
        if (o->write_iolog_file && o->read_iolog_file) {
                log_err("fio: read iolog overrides write_iolog\n");
diff --git a/io_u.c b/io_u.c
index 9cb68af4691025ff83f50c9d562292d94bfefccc..1f9ebbc83bd61da39947274130aef529bc5708e0 100644 (file)
--- a/io_u.c
+++ b/io_u.c
@@ -180,11 +180,22 @@ static void set_rwmix_bytes(struct thread_data *td)
         * whereas reads do not.
         */
        rbytes = td->io_bytes[td->rwmix_ddir] - td->rwmix_bytes;
-       diff = td->o.rwmixread;
-       if (td->rwmix_ddir == DDIR_WRITE)
-               diff = 100 - diff;
+       diff = td->o.rwmix[td->rwmix_ddir ^ 1];
 
-       td->rwmix_bytes = td->io_bytes[td->rwmix_ddir] + (rbytes * (100 - diff)) / diff;
+       td->rwmix_bytes = td->io_bytes[td->rwmix_ddir] + (rbytes * ((100 - diff)) / diff);
+}
+
+static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
+{
+       unsigned int v;
+       long r;
+
+       r = os_random_long(&td->rwmix_state);
+       v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0)));
+       if (v < td->o.rwmix[DDIR_READ])
+               return DDIR_READ;
+
+       return DDIR_WRITE;
 }
 
 /*
@@ -202,6 +213,9 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td)
                fio_gettime(&now, NULL);
                elapsed = mtime_since_now(&td->rwmix_switch);
 
+               /*
+                * if this is the first cycle, make it shorter
+                */
                cycle = td->o.rwmixcycle;
                if (!td->rwmix_bytes)
                        cycle /= 10;
@@ -209,22 +223,25 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td)
                /*
                 * Check if it's time to seed a new data direction.
                 */
-               if (elapsed >= cycle &&
+               if (elapsed >= cycle ||
                    td->io_bytes[td->rwmix_ddir] >= td->rwmix_bytes) {
-                       unsigned int v;
-                       long r;
-
-                       r = os_random_long(&td->rwmix_state);
-                       v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0)));
-                       if (v < td->o.rwmixread) {
-                               if (td->rwmix_ddir != DDIR_READ)
-                                       set_rwmix_bytes(td);
-                               td->rwmix_ddir = DDIR_READ;
-                       } else {
-                               if (td->rwmix_ddir != DDIR_WRITE)
-                                       set_rwmix_bytes(td);
-                               td->rwmix_ddir = DDIR_WRITE;
-                       }
+                       unsigned long long max_bytes;
+                       enum fio_ddir ddir;                     
+
+                       /*
+                        * Put a top limit on how many bytes we do for
+                        * one data direction, to avoid overflowing the
+                        * ranges too much
+                        */
+                       ddir = get_rand_ddir(td);
+                       max_bytes = td->this_io_bytes[ddir];
+                       if (max_bytes >= (td->io_size * td->o.rwmix[ddir] / 100))
+                               ddir ^= 1;
+
+                       if (ddir != td->rwmix_ddir)
+                               set_rwmix_bytes(td);
+
+                       td->rwmix_ddir = ddir;
                        memcpy(&td->rwmix_switch, &now, sizeof(now));
                }
                return td->rwmix_ddir;
index 0642a625e2113ae58c0fd9e301d0251d779c0dc5..e13b2b241342092b0d60b5c7b499fed4e85a0582 100644 (file)
--- a/options.c
+++ b/options.c
@@ -576,7 +576,7 @@ static struct fio_option options[] = {
        {
                .name   = "rwmixread",
                .type   = FIO_OPT_INT,
-               .off1   = td_var_offset(rwmixread),
+               .off1   = td_var_offset(rwmix[DDIR_READ]),
                .maxval = 100,
                .help   = "Percentage of mixed workload that is reads",
                .def    = "50",
@@ -584,7 +584,7 @@ static struct fio_option options[] = {
        {
                .name   = "rwmixwrite",
                .type   = FIO_OPT_INT,
-               .off1   = td_var_offset(rwmixwrite),
+               .off1   = td_var_offset(rwmix[DDIR_WRITE]),
                .maxval = 100,
                .help   = "Percentage of mixed workload that is writes",
                .def    = "50",