1 # Writing to 2 files that share the duplicate blocks.
2 # The dedupe working set is spread uniformly such that when
3 # each of the jobs choose to perform a dedup operation they will
4 # regenerate a buffer from the global space.
5 # If you test the dedup ratio on either file by itself the result
6 # is likely lower than if you test the ratio of the two files combined.
8 # Use `./t/fio-dedupe <file> -C 1 -c 1 -b 4096` to test the total
9 # data reduction ratio.
12 # Full example of test:
13 # $ ./fio ./examples/dedupe-global.fio
15 # Checking ratio on a and b individually:
16 # $ ./t/fio-dedupe a.0.0 -C 1 -c 1 -b 4096
18 # $ Extents=25600, Unique extents=16817 Duplicated extents=5735
19 # $ De-dupe ratio: 1:0.52
20 # $ De-dupe working set at least: 22.40%
21 # $ Fio setting: dedupe_percentage=34
22 # $ Unique capacity 33MB
24 # ./t/fio-dedupe b.0.0 -C 1 -c 1 -b 4096
25 # $ Extents=25600, Unique extents=17009 Duplicated extents=5636
26 # $ De-dupe ratio: 1:0.51
27 # $ De-dupe working set at least: 22.02%
28 # $ Fio setting: dedupe_percentage=34
29 # $ Unique capacity 34MB
33 # $ cat b.0.0 >> c.0.0
35 # Checking data reduction ratio on combined file:
36 # $ ./t/fio-dedupe c.0.0 -C 1 -c 1 -b 4096
37 # $ Extents=51200, Unique extents=25747 Duplicated extents=11028
38 # $ De-dupe ratio: 1:0.99
39 # $ De-dupe working set at least: 21.54%
40 # $ Fio setting: dedupe_percentage=50
41 # $ Unique capacity 51MB
47 dedupe_mode=working_set
52 buffer_compress_percentage=50
53 dedupe_working_set_percentage=50