summaryrefslogtreecommitdiff
path: root/dedupe.c
blob: fd116dfba4933396ee6a928dd87866d417cf67a1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
#include "fio.h"

int init_dedupe_working_set_seeds(struct thread_data *td)
{
	unsigned long long i, j, num_seed_advancements;
	struct frand_state dedupe_working_set_state = {0};

	if (!td->o.dedupe_percentage || !(td->o.dedupe_mode == DEDUPE_MODE_WORKING_SET))
		return 0;

	num_seed_advancements = td->o.min_bs[DDIR_WRITE] /
		min_not_zero(td->o.min_bs[DDIR_WRITE], (unsigned long long) td->o.compress_chunk);
	/*
	 * The dedupe working set keeps seeds of unique data (generated by buf_state).
	 * Dedupe-ed pages will be generated using those seeds.
	 */
	td->num_unique_pages = (td->o.size * (unsigned long long)td->o.dedupe_working_set_percentage / 100) / td->o.min_bs[DDIR_WRITE];
	td->dedupe_working_set_states = malloc(sizeof(struct frand_state) * td->num_unique_pages);
	if (!td->dedupe_working_set_states) {
		log_err("fio: could not allocate dedupe working set\n");
		return 1;
	}
	frand_copy(&dedupe_working_set_state, &td->buf_state);
	for (i = 0; i < td->num_unique_pages; i++) {
		frand_copy(&td->dedupe_working_set_states[i], &dedupe_working_set_state);
		/*
		 * When compression is used the seed is advanced multiple times to
		 * generate the buffer. We want to regenerate the same buffer when
		 * deduping against this page
		 */
		for (j = 0; j < num_seed_advancements; j++)
			__get_next_seed(&dedupe_working_set_state);
	}

	return 0;
}