Include file name in offset failure
[fio.git] / io_u.c
diff --git a/io_u.c b/io_u.c
index 4d3116b7fd4fc0901f4419e8690270c2c18a2d43..f451d1a756141e3a29eaad8f3f2fb74d113ae82e 100644 (file)
--- a/io_u.c
+++ b/io_u.c
@@ -54,11 +54,8 @@ static void mark_random_map(struct thread_data *td, struct io_u *io_u)
                 * If we have a mixed random workload, we may
                 * encounter blocks we already did IO to.
                 */
-               if ((td->o.ddir_nr == 1) && !random_map_free(f, block)) {
-                       if (!blocks)
-                               blocks = 1;
+               if ((td->o.ddir_nr == 1) && !random_map_free(f, block))
                        break;
-               }
 
                idx = RAND_MAP_IDX(f, block);
                bit = RAND_MAP_BIT(f, block);
@@ -69,10 +66,20 @@ static void mark_random_map(struct thread_data *td, struct io_u *io_u)
                if (this_blocks + bit > BLOCKS_PER_MAP)
                        this_blocks = BLOCKS_PER_MAP - bit;
 
-               if (this_blocks == BLOCKS_PER_MAP)
-                       mask = -1U;
-               else
-                       mask = ((1U << this_blocks) - 1) << bit;
+               do {
+                       if (this_blocks == BLOCKS_PER_MAP)
+                               mask = -1U;
+                       else
+                               mask = ((1U << this_blocks) - 1) << bit;
+       
+                       if (!(f->file_map[idx] & mask))
+                               break;
+
+                       this_blocks--;
+               } while (this_blocks);
+
+               if (!this_blocks)
+                       break;
 
                f->file_map[idx] |= mask;
                nr_blocks -= this_blocks;
@@ -926,6 +933,24 @@ struct io_u *get_io_u(struct thread_data *td)
                return NULL;
        }
 
+       if (td->o.verify_backlog && td->io_hist_len) {
+               int get_verify = 0;
+
+               if (td->verify_batch) {
+                       td->verify_batch--;
+                       get_verify = 1;
+               } else if (!(td->io_hist_len % td->o.verify_backlog) &&
+                        td->last_ddir != DDIR_READ) {
+                       td->verify_batch = td->o.verify_batch;
+                       if (!td->verify_batch)
+                               td->verify_batch = td->o.verify_backlog;
+                       get_verify = 1;
+               }
+
+               if (get_verify && !get_next_verify(td, io_u))
+                       goto out;
+       }
+
        /*
         * from a requeue, io_u already setup
         */
@@ -1024,6 +1049,7 @@ static void io_completed(struct thread_data *td, struct io_u *io_u,
        }
 
        td->last_was_sync = 0;
+       td->last_ddir = io_u->ddir;
 
        if (!io_u->error) {
                unsigned int bytes = io_u->buflen - io_u->resid;
@@ -1217,9 +1243,16 @@ void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
        long *ptr = io_u->buf;
 
        if (!td->o.zero_buffers) {
+               unsigned long r = __rand(&__fio_rand_state);
+
+               if (sizeof(int) != sizeof(*ptr))
+                       r *= (unsigned long) __rand(&__fio_rand_state);
+
                while ((void *) ptr - io_u->buf < max_bs) {
-                       *ptr = __rand(&__fio_rand_state);
+                       *ptr = r;
                        ptr++;
+                       r *= GOLDEN_RATIO_PRIME;
+                       r >>= 3;
                }
        } else
                memset(ptr, 0, max_bs);