t/io_uring: support using preadv2
[fio.git] / dedupe.c
index 043a376c6757641b5f2817c21b6d9b819e037bbb..fd116dfba4933396ee6a928dd87866d417cf67a1 100644 (file)
--- a/dedupe.c
+++ b/dedupe.c
@@ -2,12 +2,14 @@
 
 int init_dedupe_working_set_seeds(struct thread_data *td)
 {
-       unsigned long long i;
+       unsigned long long i, j, num_seed_advancements;
        struct frand_state dedupe_working_set_state = {0};
 
        if (!td->o.dedupe_percentage || !(td->o.dedupe_mode == DEDUPE_MODE_WORKING_SET))
                return 0;
 
+       num_seed_advancements = td->o.min_bs[DDIR_WRITE] /
+               min_not_zero(td->o.min_bs[DDIR_WRITE], (unsigned long long) td->o.compress_chunk);
        /*
         * The dedupe working set keeps seeds of unique data (generated by buf_state).
         * Dedupe-ed pages will be generated using those seeds.
@@ -21,7 +23,13 @@ int init_dedupe_working_set_seeds(struct thread_data *td)
        frand_copy(&dedupe_working_set_state, &td->buf_state);
        for (i = 0; i < td->num_unique_pages; i++) {
                frand_copy(&td->dedupe_working_set_states[i], &dedupe_working_set_state);
-               __get_next_seed(&dedupe_working_set_state);
+               /*
+                * When compression is used the seed is advanced multiple times to
+                * generate the buffer. We want to regenerate the same buffer when
+                * deduping against this page
+                */
+               for (j = 0; j < num_seed_advancements; j++)
+                       __get_next_seed(&dedupe_working_set_state);
        }
 
        return 0;