Merge branch 'expression-parser'
[fio.git] / io_u.c
diff --git a/io_u.c b/io_u.c
index 7cbdb915a19629fcbd3b500b9255c918bfbd6dd1..612057d15762dfa13e7e48bc48e99658f9e8880a 100644 (file)
--- a/io_u.c
+++ b/io_u.c
@@ -271,20 +271,32 @@ static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
 static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
                               enum fio_ddir ddir, uint64_t *offset)
 {
+       struct thread_options *o = &td->o;
+
        assert(ddir_rw(ddir));
 
-       if (f->last_pos >= f->io_size + get_start_offset(td, f) && td->o.time_based)
+       if (f->last_pos >= f->io_size + get_start_offset(td, f) &&
+           o->time_based)
                f->last_pos = f->last_pos - f->io_size;
 
        if (f->last_pos < f->real_file_size) {
                uint64_t pos;
 
-               if (f->last_pos == f->file_offset && td->o.ddir_seq_add < 0)
+               if (f->last_pos == f->file_offset && o->ddir_seq_add < 0)
                        f->last_pos = f->real_file_size;
 
                pos = f->last_pos - f->file_offset;
-               if (pos)
-                       pos += td->o.ddir_seq_add;
+               if (pos && o->ddir_seq_add) {
+                       pos += o->ddir_seq_add;
+
+                       /*
+                        * If we reach beyond the end of the file
+                        * with holed IO, wrap around to the
+                        * beginning again.
+                        */
+                       if (pos >= f->real_file_size)
+                               pos = f->file_offset;
+               }
 
                *offset = pos;
                return 0;
@@ -748,9 +760,17 @@ static int fill_io_u(struct thread_data *td, struct io_u *io_u)
         * See if it's time to switch to a new zone
         */
        if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
+               struct fio_file *f = io_u->file;
+
                td->zone_bytes = 0;
-               io_u->file->file_offset += td->o.zone_range + td->o.zone_skip;
-               io_u->file->last_pos = io_u->file->file_offset;
+               f->file_offset += td->o.zone_range + td->o.zone_skip;
+
+               /*
+                * Wrap from the beginning, if we exceed the file size
+                */
+               if (f->file_offset >= f->real_file_size)
+                       f->file_offset = f->real_file_size - f->file_offset;
+               f->last_pos = f->file_offset;
                td->io_skip_bytes += td->o.zone_skip;
        }
 
@@ -1281,7 +1301,7 @@ void lat_target_check(struct thread_data *td)
  * If latency target is enabled, we might be ramping up or down and not
  * using the full queue depth available.
  */
-int queue_full(struct thread_data *td)
+int queue_full(const struct thread_data *td)
 {
        const int qempty = io_u_qempty(&td->io_u_freelist);
 
@@ -1487,7 +1507,8 @@ struct io_u *get_io_u(struct thread_data *td)
                if (io_u->ddir == DDIR_WRITE) {
                        if (td->flags & TD_F_REFILL_BUFFERS) {
                                io_u_fill_buffer(td, io_u,
-                                       io_u->xfer_buflen, io_u->xfer_buflen);
+                                       td->o.min_bs[DDIR_WRITE],
+                                       io_u->xfer_buflen);
                        } else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) &&
                                   !(td->flags & TD_F_COMPRESS))
                                do_scramble = 1;
@@ -1828,6 +1849,34 @@ void io_u_queued(struct thread_data *td, struct io_u *io_u)
        }
 }
 
+/*
+ * See if we should reuse the last seed, if dedupe is enabled
+ */
+static struct frand_state *get_buf_state(struct thread_data *td)
+{
+       unsigned int v;
+       unsigned long r;
+
+       if (!td->o.dedupe_percentage)
+               return &td->buf_state;
+       else if (td->o.dedupe_percentage == 100)
+               return &td->buf_state_prev;
+
+       r = __rand(&td->dedupe_state);
+       v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
+
+       if (v <= td->o.dedupe_percentage)
+               return &td->buf_state_prev;
+
+       return &td->buf_state;
+}
+
+static void save_buf_state(struct thread_data *td, struct frand_state *rs)
+{
+       if (rs == &td->buf_state)
+               frand_copy(&td->buf_state_prev, rs);
+}
+
 void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
                    unsigned int max_bs)
 {
@@ -1835,18 +1884,30 @@ void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
                fill_buffer_pattern(td, buf, max_bs);
        else if (!td->o.zero_buffers) {
                unsigned int perc = td->o.compress_percentage;
+               struct frand_state *rs;
+               unsigned int left = max_bs;
+
+               do {
+                       rs = get_buf_state(td);
+
+                       min_write = min(min_write, left);
+
+                       if (perc) {
+                               unsigned int seg = min_write;
 
-               if (perc) {
-                       unsigned int seg = min_write;
+                               seg = min(min_write, td->o.compress_chunk);
+                               if (!seg)
+                                       seg = min_write;
 
-                       seg = min(min_write, td->o.compress_chunk);
-                       if (!seg)
-                               seg = min_write;
+                               fill_random_buf_percentage(rs, buf, perc, seg,
+                                                               min_write);
+                       } else
+                               fill_random_buf(rs, buf, min_write);
 
-                       fill_random_buf_percentage(&td->buf_state, buf,
-                                               perc, seg, max_bs);
-               } else
-                       fill_random_buf(&td->buf_state, buf, max_bs);
+                       buf += min_write;
+                       left -= min_write;
+                       save_buf_state(td, rs);
+               } while (left);
        } else
                memset(buf, 0, max_bs);
 }