2 * Small tool to check for dedupable blocks in a file or device. Basically
3 * just scans the filename for extents of the given size, checksums them,
16 #include "../fio_sem.h"
17 #include "../smalloc.h"
18 #include "../minmax.h"
19 #include "../crc/md5.h"
21 #include "../gettime.h"
22 #include "../fio_time.h"
23 #include "../lib/rbtree.h"
25 #include "../lib/bloom.h"
31 unsigned char *buf_in;
32 unsigned char *buf_out;
35 struct worker_thread {
40 unsigned long long unique_capacity;
49 struct flist_head list;
54 struct fio_rb_node rb_node;
56 uint32_t hash[MD5_HASH_WORDS];
57 struct flist_head extent_list[0];
62 uint32_t hash[MD5_HASH_WORDS];
65 static struct rb_root rb_root;
66 static struct bloom *bloom;
67 static struct fio_sem *rb_lock;
69 static unsigned int blocksize = 4096;
70 static unsigned int num_threads;
71 static unsigned int chunk_size = 1048576;
72 static unsigned int dump_output;
73 static unsigned int odirect;
74 static unsigned int collision_check;
75 static unsigned int print_progress = 1;
76 static unsigned int use_bloom = 1;
77 static unsigned int compression = 0;
79 static uint64_t total_size;
80 static uint64_t cur_offset;
81 static struct fio_sem *size_lock;
83 static struct fio_file file;
85 static uint64_t get_size(struct fio_file *f, struct stat *sb)
89 if (S_ISBLK(sb->st_mode)) {
90 unsigned long long bytes = 0;
92 if (blockdev_size(f, &bytes)) {
93 log_err("dedupe: failed getting bdev size\n");
101 return (ret & ~((uint64_t)blocksize - 1));
104 static int get_work(uint64_t *offset, uint64_t *size)
109 fio_sem_down(size_lock);
111 if (cur_offset < total_size) {
112 *offset = cur_offset;
113 this_chunk = min((uint64_t)chunk_size, total_size - cur_offset);
115 cur_offset += this_chunk;
119 fio_sem_up(size_lock);
123 static int __read_block(int fd, void *buf, off_t offset, size_t count)
127 ret = pread(fd, buf, count, offset);
133 } else if (ret != count) {
134 log_err("dedupe: short read on block\n");
141 static int read_block(int fd, void *buf, off_t offset)
143 return __read_block(fd, buf, offset, blocksize);
146 static int account_unique_capacity(uint64_t offset, uint64_t *unique_capacity,
147 struct zlib_ctrl *zc)
149 z_stream *stream = &zc->stream;
150 unsigned int compressed_len;
153 if (read_block(file.fd, zc->buf_in, offset))
156 stream->next_in = zc->buf_in;
157 stream->avail_in = blocksize;
158 stream->avail_out = deflateBound(stream, blocksize);
159 stream->next_out = zc->buf_out;
161 ret = deflate(stream, Z_FINISH);
162 if (ret == Z_STREAM_ERROR)
164 compressed_len = blocksize - stream->avail_out;
167 printf("offset 0x%lx compressed to %d blocksize %d ratio %.2f \n",
168 (unsigned long) offset, compressed_len, blocksize,
169 (float)compressed_len / (float)blocksize);
171 *unique_capacity += compressed_len;
172 deflateReset(stream);
176 static void add_item(struct chunk *c, struct item *i)
179 * Save some memory and don't add extent items, if we don't
182 if (dump_output || collision_check) {
185 e = malloc(sizeof(*e));
186 e->offset = i->offset;
187 flist_add_tail(&e->list, &c->extent_list[0]);
193 static int col_check(struct chunk *c, struct item *i)
199 cbuf = fio_memalign(blocksize, blocksize, false);
200 ibuf = fio_memalign(blocksize, blocksize, false);
202 e = flist_entry(c->extent_list[0].next, struct extent, list);
203 if (read_block(file.fd, cbuf, e->offset))
206 if (read_block(file.fd, ibuf, i->offset))
209 ret = memcmp(ibuf, cbuf, blocksize);
211 fio_memfree(cbuf, blocksize, false);
212 fio_memfree(ibuf, blocksize, false);
216 static struct chunk *alloc_chunk(void)
220 if (collision_check || dump_output) {
221 c = malloc(sizeof(struct chunk) + sizeof(struct flist_head));
222 INIT_FLIST_HEAD(&c->extent_list[0]);
224 c = malloc(sizeof(struct chunk));
230 static int insert_chunk(struct item *i, uint64_t *unique_capacity,
231 struct zlib_ctrl *zc)
233 struct fio_rb_node **p, *parent;
237 p = &rb_root.rb_node;
242 c = rb_entry(parent, struct chunk, rb_node);
243 diff = memcmp(i->hash, c->hash, sizeof(i->hash));
246 } else if (diff > 0) {
249 if (!collision_check)
253 ret = col_check(c, i);
254 fio_sem_down(rb_lock);
264 RB_CLEAR_NODE(&c->rb_node);
266 memcpy(c->hash, i->hash, sizeof(i->hash));
267 rb_link_node(&c->rb_node, parent, p);
268 rb_insert_color(&c->rb_node, &rb_root);
270 ret = account_unique_capacity(i->offset, unique_capacity, zc);
279 static int insert_chunks(struct item *items, unsigned int nitems,
280 uint64_t *ndupes, uint64_t *unique_capacity,
281 struct zlib_ctrl *zc)
285 fio_sem_down(rb_lock);
287 for (i = 0; i < nitems; i++) {
292 s = sizeof(items[i].hash) / sizeof(uint32_t);
293 r = bloom_set(bloom, items[i].hash, s);
296 ret = insert_chunk(&items[i], unique_capacity, zc);
306 static void crc_buf(void *buf, uint32_t *hash)
308 struct fio_md5_ctx ctx = { .hash = hash };
311 fio_md5_update(&ctx, buf, blocksize);
315 static unsigned int read_blocks(int fd, void *buf, off_t offset, size_t size)
317 if (__read_block(fd, buf, offset, size))
320 return size / blocksize;
323 static int do_work(struct worker_thread *thread, void *buf)
325 unsigned int nblocks, i;
329 uint64_t unique_capacity = 0;
333 offset = thread->cur_offset;
335 nblocks = read_blocks(thread->fd, buf, offset,
336 min(thread->size, (uint64_t) chunk_size));
340 items = malloc(sizeof(*items) * nblocks);
342 for (i = 0; i < nblocks; i++) {
343 void *thisptr = buf + (i * blocksize);
345 items[i].offset = offset;
346 crc_buf(thisptr, items[i].hash);
351 ret = insert_chunks(items, nitems, &ndupes, &unique_capacity, &thread->zc);
355 thread->items += nitems;
356 thread->dupes += ndupes;
357 thread->unique_capacity += unique_capacity;
364 static void thread_init_zlib_control(struct worker_thread *thread)
368 z_stream *stream = &thread->zc.stream;
369 stream->zalloc = Z_NULL;
370 stream->zfree = Z_NULL;
371 stream->opaque = Z_NULL;
373 if (deflateInit(stream, Z_DEFAULT_COMPRESSION) != Z_OK)
376 thread->zc.buf_in = fio_memalign(blocksize, blocksize, false);
377 sz = deflateBound(stream, blocksize);
378 thread->zc.buf_out = fio_memalign(blocksize, sz, false);
381 static void *thread_fn(void *data)
383 struct worker_thread *thread = data;
386 buf = fio_memalign(blocksize, chunk_size, false);
387 thread_init_zlib_control(thread);
390 if (get_work(&thread->cur_offset, &thread->size)) {
394 if (do_work(thread, buf)) {
401 fio_memfree(buf, chunk_size, false);
405 static void show_progress(struct worker_thread *threads, unsigned long total)
407 unsigned long last_nitems = 0;
408 struct timespec last_tv;
410 fio_gettime(&last_tv, NULL);
412 while (print_progress) {
413 unsigned long this_items;
414 unsigned long nitems = 0;
420 for (i = 0; i < num_threads; i++) {
421 nitems += threads[i].items;
422 some_done = threads[i].done;
430 perc = (float) nitems / (float) total;
432 this_items = nitems - last_nitems;
433 this_items *= blocksize;
434 tdiff = mtime_since_now(&last_tv);
436 this_items = (this_items * 1000) / (tdiff * 1024);
437 printf("%3.2f%% done (%luKiB/sec)\r", perc, this_items);
438 last_nitems = nitems;
439 fio_gettime(&last_tv, NULL);
441 printf("%3.2f%% done\r", perc);
448 static int run_dedupe_threads(struct fio_file *f, uint64_t dev_size,
449 uint64_t *nextents, uint64_t *nchunks,
450 uint64_t *unique_capacity)
452 struct worker_thread *threads;
453 unsigned long nitems, total_items;
456 total_size = dev_size;
457 total_items = dev_size / blocksize;
459 size_lock = fio_sem_init(FIO_SEM_UNLOCKED);
461 threads = malloc(num_threads * sizeof(struct worker_thread));
462 for (i = 0; i < num_threads; i++) {
463 memset(&threads[i], 0, sizeof(struct worker_thread));
464 threads[i].fd = f->fd;
466 err = pthread_create(&threads[i].thread, NULL, thread_fn, &threads[i]);
468 log_err("fio: thread startup failed\n");
473 show_progress(threads, total_items);
478 *unique_capacity = 0;
479 for (i = 0; i < num_threads; i++) {
481 pthread_join(threads[i].thread, &ret);
482 nitems += threads[i].items;
483 *nchunks += threads[i].dupes;
484 *unique_capacity += threads[i].unique_capacity;
487 printf("Threads(%u): %lu items processed\n", num_threads, nitems);
490 *nchunks = nitems - *nchunks;
492 fio_sem_remove(size_lock);
497 static int dedupe_check(const char *filename, uint64_t *nextents,
498 uint64_t *nchunks, uint64_t *unique_capacity)
506 flags |= OS_O_DIRECT;
508 memset(&file, 0, sizeof(file));
509 file.file_name = strdup(filename);
511 file.fd = open(filename, flags);
517 if (fstat(file.fd, &sb) < 0) {
522 dev_size = get_size(&file, &sb);
527 uint64_t bloom_entries;
529 bloom_entries = 8 * (dev_size / blocksize);
530 bloom = bloom_new(bloom_entries);
533 printf("Will check <%s>, size <%llu>, using %u threads\n", filename,
534 (unsigned long long) dev_size, num_threads);
536 return run_dedupe_threads(&file, dev_size, nextents, nchunks,
541 free(file.file_name);
545 static void show_chunk(struct chunk *c)
547 struct flist_head *n;
550 printf("c hash %8x %8x %8x %8x, count %lu\n", c->hash[0], c->hash[1],
551 c->hash[2], c->hash[3], (unsigned long) c->count);
552 flist_for_each(n, &c->extent_list[0]) {
553 e = flist_entry(n, struct extent, list);
554 printf("\toffset %llu\n", (unsigned long long) e->offset);
558 static const char *capacity_unit[] = {"b","KB", "MB", "GB", "TB", "PB", "EB"};
560 static uint64_t bytes_to_human_readable_unit(uint64_t n, const char **unit_out)
569 *unit_out = capacity_unit[i];
573 static void show_stat(uint64_t nextents, uint64_t nchunks, uint64_t ndupextents,
574 uint64_t unique_capacity)
580 printf("Extents=%lu, Unique extents=%lu", (unsigned long) nextents,
581 (unsigned long) nchunks);
583 printf(" Duplicated extents=%lu", (unsigned long) ndupextents);
587 ratio = (double) nextents / (double) nchunks;
588 printf("De-dupe ratio: 1:%3.2f\n", ratio - 1.0);
590 printf("De-dupe ratio: 1:infinite\n");
594 printf("De-dupe working set at least: %3.2f%%\n",
595 100.0 * (double) ndupextents / (double) nextents);
598 perc = 1.00 - ((double) nchunks / (double) nextents);
600 printf("Fio setting: dedupe_percentage=%u\n", (int) (perc + 0.50));
604 uc_human = bytes_to_human_readable_unit(unique_capacity, &unit);
605 printf("Unique capacity %lu%s\n", (unsigned long) uc_human, unit);
609 static void iter_rb_tree(uint64_t *nextents, uint64_t *nchunks, uint64_t *ndupextents)
611 struct fio_rb_node *n;
612 *nchunks = *nextents = *ndupextents = 0;
614 n = rb_first(&rb_root);
621 c = rb_entry(n, struct chunk, rb_node);
623 *nextents += c->count;
624 *ndupextents += (c->count > 1);
629 } while ((n = rb_next(n)) != NULL);
632 static int usage(char *argv[])
634 log_err("Check for dedupable blocks on a device/file\n\n");
635 log_err("%s: [options] <device or file>\n", argv[0]);
636 log_err("\t-b\tChunk size to use\n");
637 log_err("\t-t\tNumber of threads to use\n");
638 log_err("\t-d\tFull extent/chunk debug output\n");
639 log_err("\t-o\tUse O_DIRECT\n");
640 log_err("\t-c\tFull collision check\n");
641 log_err("\t-B\tUse probabilistic bloom filter\n");
642 log_err("\t-p\tPrint progress indicator\n");
643 log_err("\t-C\tCalculate compressible size\n");
647 int main(int argc, char *argv[])
649 uint64_t nextents = 0, nchunks = 0, ndupextents = 0, unique_capacity;
655 while ((c = getopt(argc, argv, "b:t:d:o:c:p:B:C:")) != -1) {
658 blocksize = atoi(optarg);
661 num_threads = atoi(optarg);
664 dump_output = atoi(optarg);
667 odirect = atoi(optarg);
670 collision_check = atoi(optarg);
673 print_progress = atoi(optarg);
676 use_bloom = atoi(optarg);
679 compression = atoi(optarg);
687 if (collision_check || dump_output || compression)
691 num_threads = cpus_online();
699 rb_lock = fio_sem_init(FIO_SEM_UNLOCKED);
701 ret = dedupe_check(argv[optind], &nextents, &nchunks, &unique_capacity);
705 iter_rb_tree(&nextents, &nchunks, &ndupextents);
707 show_stat(nextents, nchunks, ndupextents, unique_capacity);
710 fio_sem_remove(rb_lock);