Build t/ tools by default
[fio.git] / io_u.c
diff --git a/io_u.c b/io_u.c
index ba192a32a985d88e87378d196404cddd10e93936..eac871bfe9d91890e70e9110bd575e9a725b1a4c 100644 (file)
--- a/io_u.c
+++ b/io_u.c
@@ -1528,21 +1528,16 @@ err_put:
 void io_u_log_error(struct thread_data *td, struct io_u *io_u)
 {
        enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error);
-       const char *msg[] = { "read", "write", "sync", "datasync",
-                               "sync_file_range", "wait", "trim" };
 
        if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump)
                return;
 
-       log_err("fio: io_u error");
-
-       if (io_u->file)
-               log_err(" on file %s", io_u->file->file_name);
-
-       log_err(": %s\n", strerror(io_u->error));
-
-       log_err("     %s offset=%llu, buflen=%lu\n", msg[io_u->ddir],
-                                       io_u->offset, io_u->xfer_buflen);
+       log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%lu\n",
+               io_u->file ? " on file " : "",
+               io_u->file ? io_u->file->file_name : "",
+               strerror(io_u->error),
+               io_ddir_name(io_u->ddir),
+               io_u->offset, io_u->xfer_buflen);
 
        if (!td->error)
                td_verror(td, io_u->error, "io_u error");
@@ -1792,6 +1787,8 @@ int io_u_queued_complete(struct thread_data *td, int min_evts,
 
        if (!min_evts)
                tvp = &ts;
+       else if (min_evts > td->cur_depth)
+               min_evts = td->cur_depth;
 
        ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp);
        if (ret < 0) {
@@ -1831,6 +1828,34 @@ void io_u_queued(struct thread_data *td, struct io_u *io_u)
        }
 }
 
+/*
+ * See if we should reuse the last seed, if dedupe is enabled
+ */
+static struct frand_state *get_buf_state(struct thread_data *td)
+{
+       unsigned int v;
+       unsigned long r;
+
+       if (!td->o.dedupe_percentage)
+               return &td->buf_state;
+       else if (td->o.dedupe_percentage == 100)
+               return &td->buf_state_prev;
+
+       r = __rand(&td->dedupe_state);
+       v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
+
+       if (v <= td->o.dedupe_percentage)
+               return &td->buf_state_prev;
+
+       return &td->buf_state;
+}
+
+static void save_buf_state(struct thread_data *td, struct frand_state *rs)
+{
+       if (rs == &td->buf_state)
+               frand_copy(&td->buf_state_prev, rs);
+}
+
 void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
                    unsigned int max_bs)
 {
@@ -1838,6 +1863,9 @@ void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
                fill_buffer_pattern(td, buf, max_bs);
        else if (!td->o.zero_buffers) {
                unsigned int perc = td->o.compress_percentage;
+               struct frand_state *rs;
+
+               rs = get_buf_state(td);
 
                if (perc) {
                        unsigned int seg = min_write;
@@ -1846,10 +1874,12 @@ void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
                        if (!seg)
                                seg = min_write;
 
-                       fill_random_buf_percentage(&td->buf_state, buf,
-                                               perc, seg, max_bs);
-               } else
-                       fill_random_buf(&td->buf_state, buf, max_bs);
+                       fill_random_buf_percentage(rs, buf, perc, seg,max_bs);
+                       save_buf_state(td, rs);
+               } else {
+                       fill_random_buf(rs, buf, max_bs);
+                       save_buf_state(td, rs);
+               }
        } else
                memset(buf, 0, max_bs);
 }