Improve iodepth logging
[fio.git] / io_u.c
diff --git a/io_u.c b/io_u.c
index 1e60eee5ccee03f20126a825ff9ad4989661c7cf..7f52a243fbdbc8e2b12706c1f45c52716729a8ab 100644 (file)
--- a/io_u.c
+++ b/io_u.c
@@ -24,11 +24,10 @@ struct io_completion_data {
  * The ->file_map[] contains a map of blocks we have or have not done io
  * to yet. Used to make sure we cover the entire range in a fair fashion.
  */
-static int random_map_free(struct thread_data *td, struct fio_file *f,
-                          const unsigned long long block)
+static int random_map_free(struct fio_file *f, const unsigned long long block)
 {
-       unsigned int idx = RAND_MAP_IDX(td, f, block);
-       unsigned int bit = RAND_MAP_BIT(td, f, block);
+       unsigned int idx = RAND_MAP_IDX(f, block);
+       unsigned int bit = RAND_MAP_BIT(f, block);
 
        dprint(FD_RANDOM, "free: b=%llu, idx=%u, bit=%u\n", block, idx, bit);
 
@@ -57,11 +56,11 @@ static void mark_random_map(struct thread_data *td, struct io_u *io_u)
                 * If we have a mixed random workload, we may
                 * encounter blocks we already did IO to.
                 */
-               if ((td->o.ddir_nr == 1) && !random_map_free(td, f, block))
+               if ((td->o.ddir_nr == 1) && !random_map_free(f, block))
                        break;
 
-               idx = RAND_MAP_IDX(td, f, block);
-               bit = RAND_MAP_BIT(td, f, block);
+               idx = RAND_MAP_IDX(f, block);
+               bit = RAND_MAP_BIT(f, block);
 
                fio_assert(td, idx < f->num_maps);
 
@@ -84,7 +83,7 @@ static inline unsigned long long last_block(struct thread_data *td,
        if (!max_blocks)
                return 0;
 
-       return max_blocks - 1;
+       return max_blocks;
 }
 
 /*
@@ -130,13 +129,13 @@ static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
                /*
                 * if we are not maintaining a random map, we are done.
                 */
-               if (td->o.norandommap)
+               if (!file_randommap(td, f))
                        return 0;
 
                /*
                 * calculate map offset and check if it's free
                 */
-               if (random_map_free(td, f, *b))
+               if (random_map_free(f, *b))
                        return 0;
 
                dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
@@ -243,7 +242,7 @@ static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
 
 static void set_rwmix_bytes(struct thread_data *td)
 {
-       unsigned long long rbytes;
+       unsigned long issues;
        unsigned int diff;
 
        /*
@@ -251,11 +250,11 @@ static void set_rwmix_bytes(struct thread_data *td)
         * buffered writes may issue a lot quicker than they complete,
         * whereas reads do not.
         */
-       rbytes = td->io_bytes[td->rwmix_ddir] - td->rwmix_bytes;
+       issues = td->io_issues[td->rwmix_ddir] - td->rwmix_issues;
        diff = td->o.rwmix[td->rwmix_ddir ^ 1];
 
-       td->rwmix_bytes = td->io_bytes[td->rwmix_ddir]
-                               + (rbytes * ((100 - diff)) / diff);
+       td->rwmix_issues = td->io_issues[td->rwmix_ddir]
+                               + (issues * ((100 - diff)) / diff);
 }
 
 static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
@@ -279,25 +278,10 @@ static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
 static enum fio_ddir get_rw_ddir(struct thread_data *td)
 {
        if (td_rw(td)) {
-               struct timeval now;
-               unsigned long elapsed;
-               unsigned int cycle;
-
-               fio_gettime(&now, NULL);
-               elapsed = mtime_since_now(&td->rwmix_switch);
-
-               /*
-                * if this is the first cycle, make it shorter
-                */
-               cycle = td->o.rwmixcycle;
-               if (!td->rwmix_bytes)
-                       cycle /= 10;
-
                /*
                 * Check if it's time to seed a new data direction.
                 */
-               if (elapsed >= cycle ||
-                   td->io_bytes[td->rwmix_ddir] >= td->rwmix_bytes) {
+               if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
                        unsigned long long max_bytes;
                        enum fio_ddir ddir;
 
@@ -310,11 +294,9 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td)
                        max_bytes = td->this_io_bytes[ddir];
                        if (max_bytes >=
                            (td->o.size * td->o.rwmix[ddir] / 100)) {
-                               if (!td->rw_end_set[ddir]) {
+                               if (!td->rw_end_set[ddir])
                                        td->rw_end_set[ddir] = 1;
-                                       memcpy(&td->rw_end[ddir], &now,
-                                               sizeof(now));
-                               }
+
                                ddir ^= 1;
                        }
 
@@ -322,7 +304,6 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td)
                                set_rwmix_bytes(td);
 
                        td->rwmix_ddir = ddir;
-                       memcpy(&td->rwmix_switch, &now, sizeof(now));
                }
                return td->rwmix_ddir;
        } else if (td_read(td))
@@ -416,7 +397,7 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u)
        /*
         * mark entry before potentially trimming io_u
         */
-       if (td_random(td) && !td->o.norandommap)
+       if (td_random(td) && file_randommap(td, io_u->file))
                mark_random_map(td, io_u);
 
        /*
@@ -429,13 +410,10 @@ out:
        return 0;
 }
 
-void io_u_mark_depth(struct thread_data *td, struct io_u *io_u)
+void io_u_mark_depth(struct thread_data *td, unsigned int nr)
 {
        int index = 0;
 
-       if (io_u->ddir == DDIR_SYNC)
-               return;
-
        switch (td->cur_depth) {
        default:
                index = 6;
@@ -458,8 +436,7 @@ void io_u_mark_depth(struct thread_data *td, struct io_u *io_u)
                break;
        }
 
-       td->ts.io_u_map[index]++;
-       td->ts.total_io_u[io_u->ddir]++;
+       td->ts.io_u_map[index] += nr;
 }
 
 static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec)