Default verify backlog batch to verify backlog setting if not given
[fio.git] / io_u.c
diff --git a/io_u.c b/io_u.c
index 9b9570eaee1fa925e3f9494c5615210a42b92b7e..bffacb404e860864b51e7c89b443eb0a54cb15b6 100644 (file)
--- a/io_u.c
+++ b/io_u.c
@@ -8,6 +8,7 @@
 #include "fio.h"
 #include "hash.h"
 #include "verify.h"
+#include "lib/rand.h"
 
 struct io_completion_data {
        int nr;                         /* input */
@@ -394,6 +395,14 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td)
             td->io_issues[DDIR_WRITE] && should_fsync(td))
                return DDIR_DATASYNC;
 
+       /*
+        * see if it's time to sync_file_range
+        */
+       if (td->sync_file_range_nr &&
+          !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) &&
+            td->io_issues[DDIR_WRITE] && should_fsync(td))
+               return DDIR_SYNC_FILE_RANGE;
+
        if (td_rw(td)) {
                /*
                 * Check if it's time to seed a new data direction.
@@ -880,14 +889,6 @@ again:
                io_u->end_io = NULL;
        }
 
-       /*
-        * We ran out, wait for async verify threads to finish and return one
-        */
-       if (!io_u && td->o.verify_async) {
-               pthread_cond_wait(&td->free_cond, &td->io_u_lock);
-               goto again;
-       }
-
        if (io_u) {
                assert(io_u->flags & IO_U_F_FREE);
                io_u->flags &= ~(IO_U_F_FREE | IO_U_F_FREE_DEF);
@@ -897,6 +898,13 @@ again:
                flist_add(&io_u->list, &td->io_u_busylist);
                td->cur_depth++;
                io_u->flags |= IO_U_F_IN_CUR_DEPTH;
+       } else if (td->o.verify_async) {
+               /*
+                * We ran out, wait for async verify threads to finish and
+                * return one
+                */
+               pthread_cond_wait(&td->free_cond, &td->io_u_lock);
+               goto again;
        }
 
        td_io_u_unlock(td);
@@ -918,6 +926,24 @@ struct io_u *get_io_u(struct thread_data *td)
                return NULL;
        }
 
+       if (td->o.verify_backlog && td->io_hist_len) {
+               int get_verify = 0;
+
+               if (td->verify_batch) {
+                       td->verify_batch--;
+                       get_verify = 1;
+               } else if (!(td->io_hist_len % td->o.verify_backlog) &&
+                        td->last_ddir != DDIR_READ) {
+                       td->verify_batch = td->o.verify_batch;
+                       if (!td->verify_batch)
+                               td->verify_batch = td->o.verify_backlog;
+                       get_verify = 1;
+               }
+
+               if (get_verify && !get_next_verify(td, io_u))
+                       goto out;
+       }
+
        /*
         * from a requeue, io_u already setup
         */
@@ -996,6 +1022,7 @@ static void io_completed(struct thread_data *td, struct io_u *io_u,
         * initialized, silence that warning.
         */
        unsigned long uninitialized_var(usec);
+       struct fio_file *f;
 
        dprint_io_u(io_u, "io complete");
 
@@ -1006,10 +1033,16 @@ static void io_completed(struct thread_data *td, struct io_u *io_u,
 
        if (ddir_sync(io_u->ddir)) {
                td->last_was_sync = 1;
+               f = io_u->file;
+               if (f) {
+                       f->first_write = -1ULL;
+                       f->last_write = -1ULL;
+               }
                return;
        }
 
        td->last_was_sync = 0;
+       td->last_ddir = io_u->ddir;
 
        if (!io_u->error) {
                unsigned int bytes = io_u->buflen - io_u->resid;
@@ -1021,6 +1054,18 @@ static void io_completed(struct thread_data *td, struct io_u *io_u,
                td->io_bytes[idx] += bytes;
                td->this_io_bytes[idx] += bytes;
 
+               if (idx == DDIR_WRITE) {
+                       f = io_u->file;
+                       if (f) {
+                               if (f->first_write == -1ULL ||
+                                   io_u->offset < f->first_write)
+                                       f->first_write = io_u->offset;
+                               if (f->last_write == -1ULL ||
+                                   ((io_u->offset + bytes) > f->last_write))
+                                       f->last_write = io_u->offset + bytes;
+                       }
+               }
+
                if (ramp_time_over(td)) {
                        unsigned long uninitialized_var(lusec);
 
@@ -1191,9 +1236,16 @@ void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
        long *ptr = io_u->buf;
 
        if (!td->o.zero_buffers) {
+               unsigned long r = __rand(&__fio_rand_state);
+
+               if (sizeof(int) != sizeof(*ptr))
+                       r *= (unsigned long) __rand(&__fio_rand_state);
+
                while ((void *) ptr - io_u->buf < max_bs) {
-                       *ptr = rand() * GOLDEN_RATIO_PRIME;
+                       *ptr = r;
                        ptr++;
+                       r *= GOLDEN_RATIO_PRIME;
+                       r >>= 3;
                }
        } else
                memset(ptr, 0, max_bs);