Add rw_sequencer option
[fio.git] / io_u.c
diff --git a/io_u.c b/io_u.c
index 23037f1a8c227114bbcf90b8c0372d8ca5ce108d..3eeade2647a19a260d6a5a84571704699481be18 100644 (file)
--- a/io_u.c
+++ b/io_u.c
@@ -41,10 +41,12 @@ static void mark_random_map(struct thread_data *td, struct io_u *io_u)
        struct fio_file *f = io_u->file;
        unsigned long long block;
        unsigned int blocks, nr_blocks;
+       int busy_check;
 
        block = (io_u->offset - f->file_offset) / (unsigned long long) min_bs;
        nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
        blocks = 0;
+       busy_check = !(io_u->flags & IO_U_F_BUSY_OK);
 
        while (nr_blocks) {
                unsigned int this_blocks, mask;
@@ -54,11 +56,12 @@ static void mark_random_map(struct thread_data *td, struct io_u *io_u)
                 * If we have a mixed random workload, we may
                 * encounter blocks we already did IO to.
                 */
-               if ((td->o.ddir_nr == 1) && !random_map_free(f, block)) {
-                       if (!blocks)
-                               blocks = 1;
+               if (!busy_check) {
+                       blocks = nr_blocks;
                        break;
                }
+               if ((td->o.ddir_seq_nr == 1) && !random_map_free(f, block))
+                       break;
 
                idx = RAND_MAP_IDX(f, block);
                bit = RAND_MAP_BIT(f, block);
@@ -69,10 +72,20 @@ static void mark_random_map(struct thread_data *td, struct io_u *io_u)
                if (this_blocks + bit > BLOCKS_PER_MAP)
                        this_blocks = BLOCKS_PER_MAP - bit;
 
-               if (this_blocks == BLOCKS_PER_MAP)
-                       mask = -1U;
-               else
-                       mask = ((1U << this_blocks) - 1) << bit;
+               do {
+                       if (this_blocks == BLOCKS_PER_MAP)
+                               mask = -1U;
+                       else
+                               mask = ((1U << this_blocks) - 1) << bit;
+       
+                       if (!(f->file_map[idx] & mask))
+                               break;
+
+                       this_blocks--;
+               } while (this_blocks);
+
+               if (!this_blocks)
+                       break;
 
                f->file_map[idx] |= mask;
                nr_blocks -= this_blocks;
@@ -183,6 +196,62 @@ static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
        return get_next_free_block(td, f, ddir, b);
 }
 
+static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
+                              enum fio_ddir ddir, unsigned long long *b)
+{
+       if (get_next_rand_offset(td, f, ddir, b)) {
+               dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
+                               f->file_name, f->last_pos, f->real_file_size);
+               return 1;
+       }
+
+       return 0;
+}
+
+static int get_next_seq_block(struct thread_data *td, struct fio_file *f,
+                             enum fio_ddir ddir, unsigned long long *b)
+{
+       if (f->last_pos < f->real_file_size) {
+               *b = (f->last_pos - f->file_offset) / td->o.min_bs[ddir];
+               return 0;
+       }
+
+       return 1;
+}
+
+static int get_next_block(struct thread_data *td, struct io_u *io_u,
+                         enum fio_ddir ddir, int rw_seq, unsigned long long *b)
+{
+       struct fio_file *f = io_u->file;
+       int ret;
+
+       if (rw_seq) {
+               if (td_random(td))
+                       ret = get_next_rand_block(td, f, ddir, b);
+               else
+                       ret = get_next_seq_block(td, f, ddir, b);
+       } else {
+               io_u->flags |= IO_U_F_BUSY_OK;
+
+               if (td->o.rw_seq == RW_SEQ_SEQ) {
+                       ret = get_next_seq_block(td, f, ddir, b);
+                       if (ret)
+                               ret = get_next_rand_block(td, f, ddir, b);
+               } else if (td->o.rw_seq == RW_SEQ_IDENT) {
+                       if (f->last_start != -1ULL)
+                               *b = (f->last_start - f->file_offset) / td->o.min_bs[ddir];
+                       else
+                               *b = 0;
+                       ret = 0;
+               } else {
+                       log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq);
+                       ret = 1;
+               }
+       }
+       
+       return ret;
+}
+
 /*
  * For random io, generate a random new block and see if it's used. Repeat
  * until we find a free one. For sequential io, just return the end of
@@ -193,26 +262,16 @@ static int __get_next_offset(struct thread_data *td, struct io_u *io_u)
        struct fio_file *f = io_u->file;
        unsigned long long b;
        enum fio_ddir ddir = io_u->ddir;
+       int rw_seq_hit = 0;
 
-       if (td_random(td) && (td->o.ddir_nr && !--td->ddir_nr)) {
-               td->ddir_nr = td->o.ddir_nr;
+       if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) {
+               rw_seq_hit = 1;
+               td->ddir_seq_nr = td->o.ddir_seq_nr;
+       }
 
-               if (get_next_rand_offset(td, f, ddir, &b)) {
-                       dprint(FD_IO, "%s: getting rand offset failed\n",
-                               f->file_name);
-                       return 1;
-               }
-       } else {
-               if (f->last_pos >= f->real_file_size) {
-                       if (!td_random(td) ||
-                            get_next_rand_offset(td, f, ddir, &b)) {
-                               dprint(FD_IO, "%s: pos %llu > size %llu\n",
-                                               f->file_name, f->last_pos,
-                                               f->real_file_size);
-                               return 1;
-                       }
-               } else
-                       b = (f->last_pos - f->file_offset) / td->o.min_bs[ddir];
+       if (get_next_block(td, io_u, ddir, rw_seq_hit, &b)) {
+               printf("fail\n");
+               return 1;
        }
 
        io_u->offset = b * td->o.ba[ddir];
@@ -926,6 +985,24 @@ struct io_u *get_io_u(struct thread_data *td)
                return NULL;
        }
 
+       if (td->o.verify_backlog && td->io_hist_len) {
+               int get_verify = 0;
+
+               if (td->verify_batch) {
+                       td->verify_batch--;
+                       get_verify = 1;
+               } else if (!(td->io_hist_len % td->o.verify_backlog) &&
+                        td->last_ddir != DDIR_READ) {
+                       td->verify_batch = td->o.verify_batch;
+                       if (!td->verify_batch)
+                               td->verify_batch = td->o.verify_backlog;
+                       get_verify = 1;
+               }
+
+               if (get_verify && !get_next_verify(td, io_u))
+                       goto out;
+       }
+
        /*
         * from a requeue, io_u already setup
         */
@@ -952,12 +1029,20 @@ struct io_u *get_io_u(struct thread_data *td)
                        goto err_put;
                }
 
+               f->last_start = io_u->offset;
                f->last_pos = io_u->offset + io_u->buflen;
 
                if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_WRITE)
                        populate_verify_io_u(td, io_u);
                else if (td->o.refill_buffers && io_u->ddir == DDIR_WRITE)
                        io_u_fill_buffer(td, io_u, io_u->xfer_buflen);
+               else if (io_u->ddir == DDIR_READ) {
+                       /*
+                        * Reset the buf_filled parameters so next time if the
+                        * buffer is used for writes it is refilled.
+                        */
+                       io_u->buf_filled_len = 0;
+               }
        }
 
        /*
@@ -1010,7 +1095,7 @@ static void io_completed(struct thread_data *td, struct io_u *io_u,
 
        td_io_u_lock(td);
        assert(io_u->flags & IO_U_F_FLIGHT);
-       io_u->flags &= ~IO_U_F_FLIGHT;
+       io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
        td_io_u_unlock(td);
 
        if (ddir_sync(io_u->ddir)) {
@@ -1024,6 +1109,7 @@ static void io_completed(struct thread_data *td, struct io_u *io_u,
        }
 
        td->last_was_sync = 0;
+       td->last_ddir = io_u->ddir;
 
        if (!io_u->error) {
                unsigned int bytes = io_u->buflen - io_u->resid;
@@ -1053,7 +1139,13 @@ static void io_completed(struct thread_data *td, struct io_u *io_u,
                        if (!td->o.disable_clat || !td->o.disable_bw)
                                lusec = utime_since(&io_u->issue_time,
                                                        &icd->time);
+                       if (!td->o.disable_lat) {
+                               unsigned long tusec;
 
+                               tusec = utime_since(&io_u->start_time,
+                                                       &icd->time);
+                               add_lat_sample(td, idx, tusec, bytes);
+                       }
                        if (!td->o.disable_clat) {
                                add_clat_sample(td, idx, lusec, bytes);
                                io_u_mark_latency(td, lusec);
@@ -1214,20 +1306,8 @@ void io_u_queued(struct thread_data *td, struct io_u *io_u)
 void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
                      unsigned int max_bs)
 {
-       long *ptr = io_u->buf;
-
-       if (!td->o.zero_buffers) {
-               unsigned long r = __rand(&__fio_rand_state);
-
-               if (sizeof(int) != sizeof(*ptr))
-                       r *= (unsigned long) __rand(&__fio_rand_state);
-
-               while ((void *) ptr - io_u->buf < max_bs) {
-                       *ptr = r;
-                       ptr++;
-                       r *= GOLDEN_RATIO_PRIME;
-                       r >>= 3;
-               }
-       } else
-               memset(ptr, 0, max_bs);
+       if (!td->o.zero_buffers)
+               fill_random_buf(io_u->buf, max_bs);
+       else
+               memset(io_u->buf, 0, max_bs);
 }