2 * Small tool to check for dedupable blocks in a file or device. Basically
3 * just scans the filename for extents of the given size, checksums them,
15 #include "../fio_sem.h"
16 #include "../smalloc.h"
17 #include "../minmax.h"
18 #include "../crc/md5.h"
19 #include "../lib/memalign.h"
21 #include "../gettime.h"
22 #include "../fio_time.h"
23 #include "../lib/rbtree.h"
25 #include "../lib/bloom.h"
28 struct worker_thread {
43 struct flist_head list;
48 struct fio_rb_node rb_node;
50 uint32_t hash[MD5_HASH_WORDS];
51 struct flist_head extent_list[0];
56 uint32_t hash[MD5_HASH_WORDS];
59 static struct rb_root rb_root;
60 static struct bloom *bloom;
61 static struct fio_sem *rb_lock;
63 static unsigned int blocksize = 4096;
64 static unsigned int num_threads;
65 static unsigned int chunk_size = 1048576;
66 static unsigned int dump_output;
67 static unsigned int odirect;
68 static unsigned int collision_check;
69 static unsigned int print_progress = 1;
70 static unsigned int use_bloom = 1;
72 static uint64_t total_size;
73 static uint64_t cur_offset;
74 static struct fio_sem *size_lock;
76 static struct fio_file file;
78 static uint64_t get_size(struct fio_file *f, struct stat *sb)
82 if (S_ISBLK(sb->st_mode)) {
83 unsigned long long bytes = 0;
85 if (blockdev_size(f, &bytes)) {
86 log_err("dedupe: failed getting bdev size\n");
93 return (ret & ~((uint64_t)blocksize - 1));
96 static int get_work(uint64_t *offset, uint64_t *size)
101 fio_sem_down(size_lock);
103 if (cur_offset < total_size) {
104 *offset = cur_offset;
105 this_chunk = min((uint64_t)chunk_size, total_size - cur_offset);
107 cur_offset += this_chunk;
111 fio_sem_up(size_lock);
115 static int __read_block(int fd, void *buf, off_t offset, size_t count)
119 ret = pread(fd, buf, count, offset);
125 else if (ret != count) {
126 log_err("dedupe: short read on block\n");
133 static int read_block(int fd, void *buf, off_t offset)
135 return __read_block(fd, buf, offset, blocksize);
138 static void add_item(struct chunk *c, struct item *i)
141 * Save some memory and don't add extent items, if we don't
144 if (dump_output || collision_check) {
147 e = malloc(sizeof(*e));
148 e->offset = i->offset;
149 flist_add_tail(&e->list, &c->extent_list[0]);
155 static int col_check(struct chunk *c, struct item *i)
161 cbuf = fio_memalign(blocksize, blocksize);
162 ibuf = fio_memalign(blocksize, blocksize);
164 e = flist_entry(c->extent_list[0].next, struct extent, list);
165 if (read_block(file.fd, cbuf, e->offset))
168 if (read_block(file.fd, ibuf, i->offset))
171 ret = memcmp(ibuf, cbuf, blocksize);
173 fio_memfree(cbuf, blocksize);
174 fio_memfree(ibuf, blocksize);
178 static struct chunk *alloc_chunk(void)
182 if (collision_check || dump_output) {
183 c = malloc(sizeof(struct chunk) + sizeof(struct flist_head));
184 INIT_FLIST_HEAD(&c->extent_list[0]);
186 c = malloc(sizeof(struct chunk));
191 static void insert_chunk(struct item *i)
193 struct fio_rb_node **p, *parent;
197 p = &rb_root.rb_node;
202 c = rb_entry(parent, struct chunk, rb_node);
203 diff = memcmp(i->hash, c->hash, sizeof(i->hash));
211 if (!collision_check)
215 ret = col_check(c, i);
216 fio_sem_down(rb_lock);
226 RB_CLEAR_NODE(&c->rb_node);
228 memcpy(c->hash, i->hash, sizeof(i->hash));
229 rb_link_node(&c->rb_node, parent, p);
230 rb_insert_color(&c->rb_node, &rb_root);
235 static void insert_chunks(struct item *items, unsigned int nitems,
240 fio_sem_down(rb_lock);
242 for (i = 0; i < nitems; i++) {
247 s = sizeof(items[i].hash) / sizeof(uint32_t);
248 r = bloom_set(bloom, items[i].hash, s);
251 insert_chunk(&items[i]);
257 static void crc_buf(void *buf, uint32_t *hash)
259 struct fio_md5_ctx ctx = { .hash = hash };
262 fio_md5_update(&ctx, buf, blocksize);
266 static unsigned int read_blocks(int fd, void *buf, off_t offset, size_t size)
268 if (__read_block(fd, buf, offset, size))
271 return size / blocksize;
274 static int do_work(struct worker_thread *thread, void *buf)
276 unsigned int nblocks, i;
282 offset = thread->cur_offset;
284 nblocks = read_blocks(thread->fd, buf, offset, min(thread->size, (uint64_t)chunk_size));
288 items = malloc(sizeof(*items) * nblocks);
290 for (i = 0; i < nblocks; i++) {
291 void *thisptr = buf + (i * blocksize);
293 items[i].offset = offset;
294 crc_buf(thisptr, items[i].hash);
299 insert_chunks(items, nitems, &ndupes);
302 thread->items += nitems;
303 thread->dupes += ndupes;
307 static void *thread_fn(void *data)
309 struct worker_thread *thread = data;
312 buf = fio_memalign(blocksize, chunk_size);
315 if (get_work(&thread->cur_offset, &thread->size)) {
319 if (do_work(thread, buf)) {
326 fio_memfree(buf, chunk_size);
330 static void show_progress(struct worker_thread *threads, unsigned long total)
332 unsigned long last_nitems = 0;
333 struct timespec last_tv;
335 fio_gettime(&last_tv, NULL);
337 while (print_progress) {
338 unsigned long this_items;
339 unsigned long nitems = 0;
345 for (i = 0; i < num_threads; i++) {
346 nitems += threads[i].items;
347 some_done = threads[i].done;
355 perc = (float) nitems / (float) total;
357 this_items = nitems - last_nitems;
358 this_items *= blocksize;
359 tdiff = mtime_since_now(&last_tv);
361 this_items = (this_items * 1000) / (tdiff * 1024);
362 printf("%3.2f%% done (%luKiB/sec)\r", perc, this_items);
363 last_nitems = nitems;
364 fio_gettime(&last_tv, NULL);
366 printf("%3.2f%% done\r", perc);
372 static int run_dedupe_threads(struct fio_file *f, uint64_t dev_size,
373 uint64_t *nextents, uint64_t *nchunks)
375 struct worker_thread *threads;
376 unsigned long nitems, total_items;
379 total_size = dev_size;
380 total_items = dev_size / blocksize;
382 size_lock = fio_sem_init(FIO_SEM_UNLOCKED);
384 threads = malloc(num_threads * sizeof(struct worker_thread));
385 for (i = 0; i < num_threads; i++) {
386 memset(&threads[i], 0, sizeof(struct worker_thread));
387 threads[i].fd = f->fd;
389 err = pthread_create(&threads[i].thread, NULL, thread_fn, &threads[i]);
391 log_err("fio: thread startup failed\n");
396 show_progress(threads, total_items);
401 for (i = 0; i < num_threads; i++) {
403 pthread_join(threads[i].thread, &ret);
404 nitems += threads[i].items;
405 *nchunks += threads[i].dupes;
408 printf("Threads(%u): %lu items processed\n", num_threads, nitems);
411 *nchunks = nitems - *nchunks;
413 fio_sem_remove(size_lock);
418 static int dedupe_check(const char *filename, uint64_t *nextents,
427 flags |= OS_O_DIRECT;
429 memset(&file, 0, sizeof(file));
430 file.file_name = strdup(filename);
432 file.fd = open(filename, flags);
438 if (fstat(file.fd, &sb) < 0) {
443 dev_size = get_size(&file, &sb);
448 uint64_t bloom_entries;
450 bloom_entries = 8 * (dev_size / blocksize);
451 bloom = bloom_new(bloom_entries);
454 printf("Will check <%s>, size <%llu>, using %u threads\n", filename, (unsigned long long) dev_size, num_threads);
456 return run_dedupe_threads(&file, dev_size, nextents, nchunks);
460 free(file.file_name);
464 static void show_chunk(struct chunk *c)
466 struct flist_head *n;
469 printf("c hash %8x %8x %8x %8x, count %lu\n", c->hash[0], c->hash[1], c->hash[2], c->hash[3], (unsigned long) c->count);
470 flist_for_each(n, &c->extent_list[0]) {
471 e = flist_entry(n, struct extent, list);
472 printf("\toffset %llu\n", (unsigned long long) e->offset);
476 static void show_stat(uint64_t nextents, uint64_t nchunks)
480 printf("Extents=%lu, Unique extents=%lu\n", (unsigned long) nextents, (unsigned long) nchunks);
483 ratio = (double) nextents / (double) nchunks;
484 printf("De-dupe ratio: 1:%3.2f\n", ratio - 1.0);
486 printf("De-dupe ratio: 1:infinite\n");
488 perc = 1.00 - ((double) nchunks / (double) nextents);
490 printf("Fio setting: dedupe_percentage=%u\n", (int) (perc + 0.50));
494 static void iter_rb_tree(uint64_t *nextents, uint64_t *nchunks)
496 struct fio_rb_node *n;
498 *nchunks = *nextents = 0;
500 n = rb_first(&rb_root);
507 c = rb_entry(n, struct chunk, rb_node);
509 *nextents += c->count;
514 } while ((n = rb_next(n)) != NULL);
517 static int usage(char *argv[])
519 log_err("Check for dedupable blocks on a device/file\n\n");
520 log_err("%s: [options] <device or file>\n", argv[0]);
521 log_err("\t-b\tChunk size to use\n");
522 log_err("\t-t\tNumber of threads to use\n");
523 log_err("\t-d\tFull extent/chunk debug output\n");
524 log_err("\t-o\tUse O_DIRECT\n");
525 log_err("\t-c\tFull collision check\n");
526 log_err("\t-B\tUse probabilistic bloom filter\n");
527 log_err("\t-p\tPrint progress indicator\n");
531 int main(int argc, char *argv[])
533 uint64_t nextents = 0, nchunks = 0;
539 while ((c = getopt(argc, argv, "b:t:d:o:c:p:B:")) != -1) {
542 blocksize = atoi(optarg);
545 num_threads = atoi(optarg);
548 dump_output = atoi(optarg);
551 odirect = atoi(optarg);
554 collision_check = atoi(optarg);
557 print_progress = atoi(optarg);
560 use_bloom = atoi(optarg);
568 if (collision_check || dump_output)
572 num_threads = cpus_online();
580 rb_lock = fio_sem_init(FIO_SEM_UNLOCKED);
582 ret = dedupe_check(argv[optind], &nextents, &nchunks);
586 iter_rb_tree(&nextents, &nchunks);
588 show_stat(nextents, nchunks);
591 fio_sem_remove(rb_lock);