Add support for absolute random zones
[fio.git] / io_u.c
diff --git a/io_u.c b/io_u.c
index fb4180a3bc35f16cf6a0463b01be1ff9b9e7f347..6ec04fa30607ba755835bf96034bff1edcf34bd5 100644 (file)
--- a/io_u.c
+++ b/io_u.c
@@ -157,6 +157,80 @@ static int __get_next_rand_offset_gauss(struct thread_data *td,
        return 0;
 }
 
+static int __get_next_rand_offset_zoned_abs(struct thread_data *td,
+                                           struct fio_file *f,
+                                           enum fio_ddir ddir, uint64_t *b)
+{
+       struct zone_split_index *zsi;
+       uint64_t offset, lastb;
+       uint64_t send, stotal;
+       static int warned;
+       unsigned int v;
+
+       lastb = last_block(td, f, ddir);
+       if (!lastb)
+               return 1;
+
+       if (!td->o.zone_split_nr[ddir]) {
+bail:
+               return __get_next_rand_offset(td, f, ddir, b, lastb);
+       }
+
+       /*
+        * Generate a value, v, between 1 and 100, both inclusive
+        */
+       v = rand32_between(&td->zone_state, 1, 100);
+
+       zsi = &td->zone_state_index[ddir][v - 1];
+       stotal = zsi->size_prev / td->o.ba[ddir];
+       send = zsi->size / td->o.ba[ddir];
+
+       /*
+        * Should never happen
+        */
+       if (send == -1U) {
+               if (!warned) {
+                       log_err("fio: bug in zoned generation\n");
+                       warned = 1;
+               }
+               goto bail;
+       } else if (send > lastb) {
+               /*
+                * This happens if the user specifies ranges that exceed
+                * the file/device size. We can't handle that gracefully,
+                * so error and exit.
+                */
+               log_err("fio: zoned_abs sizes exceed file size\n");
+               return 1;
+       }
+
+       /*
+        * 'send' is some percentage below or equal to 100 that
+        * marks the end of the current IO range. 'stotal' marks
+        * the start, in percent.
+        */
+       if (stotal)
+               offset = stotal;
+       else
+               offset = 0;
+
+       lastb = send - stotal;
+
+       /*
+        * Generate index from 0..send-of-lastb
+        */
+       if (__get_next_rand_offset(td, f, ddir, b, lastb) == 1)
+               return 1;
+
+       /*
+        * Add our start offset, if any
+        */
+       if (offset)
+               *b += offset;
+
+       return 0;
+}
+
 static int __get_next_rand_offset_zoned(struct thread_data *td,
                                        struct fio_file *f, enum fio_ddir ddir,
                                        uint64_t *b)
@@ -249,6 +323,8 @@ static int get_off_from_method(struct thread_data *td, struct fio_file *f,
                return __get_next_rand_offset_gauss(td, f, ddir, b);
        else if (td->o.random_distribution == FIO_RAND_DIST_ZONED)
                return __get_next_rand_offset_zoned(td, f, ddir, b);
+       else if (td->o.random_distribution == FIO_RAND_DIST_ZONED_ABS)
+               return __get_next_rand_offset_zoned_abs(td, f, ddir, b);
 
        log_err("fio: unknown random distribution: %d\n", td->o.random_distribution);
        return 1;
@@ -323,6 +399,17 @@ fetch:
        goto fetch;
 }
 
+static void loop_cache_invalidate(struct thread_data *td, struct fio_file *f)
+{
+       struct thread_options *o = &td->o;
+
+       if (o->invalidate_cache && !o->odirect) {
+               int fio_unused ret;
+
+               ret = file_invalidate_cache(td, f);
+       }
+}
+
 static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
                               enum fio_ddir ddir, uint64_t *b)
 {
@@ -334,6 +421,7 @@ static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
                fio_file_reset(td, f);
                if (!get_next_rand_offset(td, f, ddir, b))
                        return 0;
+               loop_cache_invalidate(td, f);
        }
 
        dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
@@ -349,15 +437,14 @@ static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
 
        assert(ddir_rw(ddir));
 
+       /*
+        * If we reach the end for a time based run, reset us back to 0
+        * and invalidate the cache, if we need to.
+        */
        if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) &&
            o->time_based) {
-               struct thread_options *o = &td->o;
-               uint64_t io_size = f->io_size + (f->io_size % o->min_bs[ddir]);
-
-               if (io_size > f->last_pos[ddir])
-                       f->last_pos[ddir] = 0;
-               else
-                       f->last_pos[ddir] = f->last_pos[ddir] - io_size;
+               f->last_pos[ddir] = f->file_offset;
+               loop_cache_invalidate(td, f);
        }
 
        if (f->last_pos[ddir] < f->real_file_size) {