Reuse filled pattern
[fio.git] / io_u.c
diff --git a/io_u.c b/io_u.c
index 9b9570eaee1fa925e3f9494c5615210a42b92b7e..dc4473beab1737660f2ebeb2f1c91418b1a14da6 100644 (file)
--- a/io_u.c
+++ b/io_u.c
@@ -8,6 +8,7 @@
 #include "fio.h"
 #include "hash.h"
 #include "verify.h"
+#include "lib/rand.h"
 
 struct io_completion_data {
        int nr;                         /* input */
@@ -53,11 +54,8 @@ static void mark_random_map(struct thread_data *td, struct io_u *io_u)
                 * If we have a mixed random workload, we may
                 * encounter blocks we already did IO to.
                 */
-               if ((td->o.ddir_nr == 1) && !random_map_free(f, block)) {
-                       if (!blocks)
-                               blocks = 1;
+               if ((td->o.ddir_nr == 1) && !random_map_free(f, block))
                        break;
-               }
 
                idx = RAND_MAP_IDX(f, block);
                bit = RAND_MAP_BIT(f, block);
@@ -68,10 +66,20 @@ static void mark_random_map(struct thread_data *td, struct io_u *io_u)
                if (this_blocks + bit > BLOCKS_PER_MAP)
                        this_blocks = BLOCKS_PER_MAP - bit;
 
-               if (this_blocks == BLOCKS_PER_MAP)
-                       mask = -1U;
-               else
-                       mask = ((1U << this_blocks) - 1) << bit;
+               do {
+                       if (this_blocks == BLOCKS_PER_MAP)
+                               mask = -1U;
+                       else
+                               mask = ((1U << this_blocks) - 1) << bit;
+       
+                       if (!(f->file_map[idx] & mask))
+                               break;
+
+                       this_blocks--;
+               } while (this_blocks);
+
+               if (!this_blocks)
+                       break;
 
                f->file_map[idx] |= mask;
                nr_blocks -= this_blocks;
@@ -394,6 +402,14 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td)
             td->io_issues[DDIR_WRITE] && should_fsync(td))
                return DDIR_DATASYNC;
 
+       /*
+        * see if it's time to sync_file_range
+        */
+       if (td->sync_file_range_nr &&
+          !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) &&
+            td->io_issues[DDIR_WRITE] && should_fsync(td))
+               return DDIR_SYNC_FILE_RANGE;
+
        if (td_rw(td)) {
                /*
                 * Check if it's time to seed a new data direction.
@@ -880,14 +896,6 @@ again:
                io_u->end_io = NULL;
        }
 
-       /*
-        * We ran out, wait for async verify threads to finish and return one
-        */
-       if (!io_u && td->o.verify_async) {
-               pthread_cond_wait(&td->free_cond, &td->io_u_lock);
-               goto again;
-       }
-
        if (io_u) {
                assert(io_u->flags & IO_U_F_FREE);
                io_u->flags &= ~(IO_U_F_FREE | IO_U_F_FREE_DEF);
@@ -897,6 +905,13 @@ again:
                flist_add(&io_u->list, &td->io_u_busylist);
                td->cur_depth++;
                io_u->flags |= IO_U_F_IN_CUR_DEPTH;
+       } else if (td->o.verify_async) {
+               /*
+                * We ran out, wait for async verify threads to finish and
+                * return one
+                */
+               pthread_cond_wait(&td->free_cond, &td->io_u_lock);
+               goto again;
        }
 
        td_io_u_unlock(td);
@@ -918,6 +933,24 @@ struct io_u *get_io_u(struct thread_data *td)
                return NULL;
        }
 
+       if (td->o.verify_backlog && td->io_hist_len) {
+               int get_verify = 0;
+
+               if (td->verify_batch) {
+                       td->verify_batch--;
+                       get_verify = 1;
+               } else if (!(td->io_hist_len % td->o.verify_backlog) &&
+                        td->last_ddir != DDIR_READ) {
+                       td->verify_batch = td->o.verify_batch;
+                       if (!td->verify_batch)
+                               td->verify_batch = td->o.verify_backlog;
+                       get_verify = 1;
+               }
+
+               if (get_verify && !get_next_verify(td, io_u))
+                       goto out;
+       }
+
        /*
         * from a requeue, io_u already setup
         */
@@ -950,6 +983,14 @@ struct io_u *get_io_u(struct thread_data *td)
                        populate_verify_io_u(td, io_u);
                else if (td->o.refill_buffers && io_u->ddir == DDIR_WRITE)
                        io_u_fill_buffer(td, io_u, io_u->xfer_buflen);
+               else if (io_u->ddir == DDIR_READ) {
+                       /*
+                        * Reset the buf_filled parameters so next time if the
+                        * buffer is used for writes it is refilled.
+                        */
+                       io_u->buf_filled = 0;
+                       io_u->buf_filled_len = 0;
+               }
        }
 
        /*
@@ -996,6 +1037,7 @@ static void io_completed(struct thread_data *td, struct io_u *io_u,
         * initialized, silence that warning.
         */
        unsigned long uninitialized_var(usec);
+       struct fio_file *f;
 
        dprint_io_u(io_u, "io complete");
 
@@ -1006,10 +1048,16 @@ static void io_completed(struct thread_data *td, struct io_u *io_u,
 
        if (ddir_sync(io_u->ddir)) {
                td->last_was_sync = 1;
+               f = io_u->file;
+               if (f) {
+                       f->first_write = -1ULL;
+                       f->last_write = -1ULL;
+               }
                return;
        }
 
        td->last_was_sync = 0;
+       td->last_ddir = io_u->ddir;
 
        if (!io_u->error) {
                unsigned int bytes = io_u->buflen - io_u->resid;
@@ -1021,13 +1069,31 @@ static void io_completed(struct thread_data *td, struct io_u *io_u,
                td->io_bytes[idx] += bytes;
                td->this_io_bytes[idx] += bytes;
 
+               if (idx == DDIR_WRITE) {
+                       f = io_u->file;
+                       if (f) {
+                               if (f->first_write == -1ULL ||
+                                   io_u->offset < f->first_write)
+                                       f->first_write = io_u->offset;
+                               if (f->last_write == -1ULL ||
+                                   ((io_u->offset + bytes) > f->last_write))
+                                       f->last_write = io_u->offset + bytes;
+                       }
+               }
+
                if (ramp_time_over(td)) {
                        unsigned long uninitialized_var(lusec);
 
                        if (!td->o.disable_clat || !td->o.disable_bw)
                                lusec = utime_since(&io_u->issue_time,
                                                        &icd->time);
+                       if (!td->o.disable_lat) {
+                               unsigned long tusec;
 
+                               tusec = utime_since(&io_u->start_time,
+                                                       &icd->time);
+                               add_lat_sample(td, idx, tusec, bytes);
+                       }
                        if (!td->o.disable_clat) {
                                add_clat_sample(td, idx, lusec, bytes);
                                io_u_mark_latency(td, lusec);
@@ -1188,13 +1254,8 @@ void io_u_queued(struct thread_data *td, struct io_u *io_u)
 void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
                      unsigned int max_bs)
 {
-       long *ptr = io_u->buf;
-
-       if (!td->o.zero_buffers) {
-               while ((void *) ptr - io_u->buf < max_bs) {
-                       *ptr = rand() * GOLDEN_RATIO_PRIME;
-                       ptr++;
-               }
-       } else
-               memset(ptr, 0, max_bs);
+       if (!td->o.zero_buffers)
+               fill_random_buf(io_u->buf, max_bs);
+       else
+               memset(io_u->buf, 0, max_bs);
 }