Improve rwmix buffered split
[fio.git] / io_u.c
diff --git a/io_u.c b/io_u.c
index 647751385e35c613ab821a5a2fe7d61aa7a924cd..9cb68af4691025ff83f50c9d562292d94bfefccc 100644 (file)
--- a/io_u.c
+++ b/io_u.c
@@ -169,6 +169,24 @@ static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
        return buflen;
 }
 
+static void set_rwmix_bytes(struct thread_data *td)
+{
+       unsigned long long rbytes;
+       unsigned int diff;
+
+       /*
+        * we do time or byte based switch. this is needed because
+        * buffered writes may issue a lot quicker than they complete,
+        * whereas reads do not.
+        */
+       rbytes = td->io_bytes[td->rwmix_ddir] - td->rwmix_bytes;
+       diff = td->o.rwmixread;
+       if (td->rwmix_ddir == DDIR_WRITE)
+               diff = 100 - diff;
+
+       td->rwmix_bytes = td->io_bytes[td->rwmix_ddir] + (rbytes * (100 - diff)) / diff;
+}
+
 /*
  * Return the data direction for the next io_u. If the job is a
  * mixed read/write workload, check the rwmix cycle and switch if
@@ -179,23 +197,34 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td)
        if (td_rw(td)) {
                struct timeval now;
                unsigned long elapsed;
+               unsigned int cycle;
 
                fio_gettime(&now, NULL);
                elapsed = mtime_since_now(&td->rwmix_switch);
 
+               cycle = td->o.rwmixcycle;
+               if (!td->rwmix_bytes)
+                       cycle /= 10;
+
                /*
                 * Check if it's time to seed a new data direction.
                 */
-               if (elapsed >= td->o.rwmixcycle) {
+               if (elapsed >= cycle &&
+                   td->io_bytes[td->rwmix_ddir] >= td->rwmix_bytes) {
                        unsigned int v;
                        long r;
 
                        r = os_random_long(&td->rwmix_state);
                        v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0)));
-                       if (v < td->o.rwmixread)
+                       if (v < td->o.rwmixread) {
+                               if (td->rwmix_ddir != DDIR_READ)
+                                       set_rwmix_bytes(td);
                                td->rwmix_ddir = DDIR_READ;
-                       else
+                       } else {
+                               if (td->rwmix_ddir != DDIR_WRITE)
+                                       set_rwmix_bytes(td);
                                td->rwmix_ddir = DDIR_WRITE;
+                       }
                        memcpy(&td->rwmix_switch, &now, sizeof(now));
                }
                return td->rwmix_ddir;
@@ -754,7 +783,7 @@ static void io_u_timeout_handler(int fio_unused sig)
                return;
        }
 
-       log_err("fio: io_u timeout: job=%s, pid=%d\n", td->name, td->pid);
+       log_err("fio: io_u timeout: job=%s, pid=%d\n", td->o.name, td->pid);
 
        list_for_each(entry, &td->io_u_busylist) {
                io_u = list_entry(entry, struct io_u, list);