Improve mixed random append option
[fio.git] / io_u.c
diff --git a/io_u.c b/io_u.c
index 647751385e35c613ab821a5a2fe7d61aa7a924cd..a171ee4be8998cfd07612ea79bc6d854dbfcc3f3 100644 (file)
--- a/io_u.c
+++ b/io_u.c
@@ -52,7 +52,11 @@ static void mark_random_map(struct thread_data *td, struct io_u *io_u)
        while (blocks < nr_blocks) {
                unsigned int idx, bit;
 
-               if (!random_map_free(td, f, block))
+               /*
+                * If we have a mixed random workload, we may
+                * encounter blocks we already did IO to.
+                */
+               if (!td->o.ddir_nr && !random_map_free(td, f, block))
                        break;
 
                idx = RAND_MAP_IDX(td, f, block);
@@ -109,6 +113,15 @@ static int get_next_offset(struct thread_data *td, struct io_u *io_u)
                unsigned long long max_blocks = f->file_size / td->o.min_bs[ddir];
                int loops = 5;
 
+               if (td->o.ddir_nr) {
+                       if (!--td->ddir_nr)
+                               td->ddir_nr = td->o.ddir_nr;
+                       else {
+                               b = f->last_pos / td->o.min_bs[ddir];
+                               goto out;
+                       }
+               }
+
                do {
                        r = os_random_long(&td->random_state);
                        if (!max_blocks)
@@ -130,6 +143,7 @@ static int get_next_offset(struct thread_data *td, struct io_u *io_u)
        } else
                b = f->last_pos / td->o.min_bs[ddir];
 
+out:
        io_u->offset = (b * td->o.min_bs[ddir]) + f->file_offset;
        if (io_u->offset >= f->real_file_size)
                return 1;
@@ -169,6 +183,35 @@ static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
        return buflen;
 }
 
+static void set_rwmix_bytes(struct thread_data *td)
+{
+       unsigned long long rbytes;
+       unsigned int diff;
+
+       /*
+        * we do time or byte based switch. this is needed because
+        * buffered writes may issue a lot quicker than they complete,
+        * whereas reads do not.
+        */
+       rbytes = td->io_bytes[td->rwmix_ddir] - td->rwmix_bytes;
+       diff = td->o.rwmix[td->rwmix_ddir ^ 1];
+
+       td->rwmix_bytes = td->io_bytes[td->rwmix_ddir] + (rbytes * ((100 - diff)) / diff);
+}
+
+static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
+{
+       unsigned int v;
+       long r;
+
+       r = os_random_long(&td->rwmix_state);
+       v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0)));
+       if (v < td->o.rwmix[DDIR_READ])
+               return DDIR_READ;
+
+       return DDIR_WRITE;
+}
+
 /*
  * Return the data direction for the next io_u. If the job is a
  * mixed read/write workload, check the rwmix cycle and switch if
@@ -179,23 +222,45 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td)
        if (td_rw(td)) {
                struct timeval now;
                unsigned long elapsed;
+               unsigned int cycle;
 
                fio_gettime(&now, NULL);
                elapsed = mtime_since_now(&td->rwmix_switch);
 
+               /*
+                * if this is the first cycle, make it shorter
+                */
+               cycle = td->o.rwmixcycle;
+               if (!td->rwmix_bytes)
+                       cycle /= 10;
+
                /*
                 * Check if it's time to seed a new data direction.
                 */
-               if (elapsed >= td->o.rwmixcycle) {
-                       unsigned int v;
-                       long r;
-
-                       r = os_random_long(&td->rwmix_state);
-                       v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0)));
-                       if (v < td->o.rwmixread)
-                               td->rwmix_ddir = DDIR_READ;
-                       else
-                               td->rwmix_ddir = DDIR_WRITE;
+               if (elapsed >= cycle ||
+                   td->io_bytes[td->rwmix_ddir] >= td->rwmix_bytes) {
+                       unsigned long long max_bytes;
+                       enum fio_ddir ddir;                     
+
+                       /*
+                        * Put a top limit on how many bytes we do for
+                        * one data direction, to avoid overflowing the
+                        * ranges too much
+                        */
+                       ddir = get_rand_ddir(td);
+                       max_bytes = td->this_io_bytes[ddir];
+                       if (max_bytes >= (td->io_size * td->o.rwmix[ddir] / 100)) {
+                               if (!td->rw_end_set[ddir]) {
+                                       td->rw_end_set[ddir] = 1;
+                                       memcpy(&td->rw_end[ddir], &now, sizeof(now));
+                               }
+                               ddir ^= 1;
+                       }
+
+                       if (ddir != td->rwmix_ddir)
+                               set_rwmix_bytes(td);
+
+                       td->rwmix_ddir = ddir;
                        memcpy(&td->rwmix_switch, &now, sizeof(now));
                }
                return td->rwmix_ddir;
@@ -660,12 +725,10 @@ long io_u_queued_complete(struct thread_data *td, int min_events)
        struct io_completion_data icd;
        struct timespec *tvp = NULL;
        int ret;
+       struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
 
-       if (!min_events) {
-               struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
-
+       if (!min_events)
                tvp = &ts;
-       }
 
        ret = td_io_getevents(td, min_events, td->cur_depth, tvp);
        if (ret < 0) {
@@ -754,7 +817,7 @@ static void io_u_timeout_handler(int fio_unused sig)
                return;
        }
 
-       log_err("fio: io_u timeout: job=%s, pid=%d\n", td->name, td->pid);
+       log_err("fio: io_u timeout: job=%s, pid=%d\n", td->o.name, td->pid);
 
        list_for_each(entry, &td->io_u_busylist) {
                io_u = list_entry(entry, struct io_u, list);