2 * Small tool to check for dedupable blocks in a file or device. Basically
3 * just scans the filename for extents of the given size, checksums them,
11 #include <sys/types.h>
13 #include <sys/ioctl.h>
18 #include "../lib/rbtree.h"
22 #include "../smalloc.h"
23 #include "../minmax.h"
24 #include "../crc/md5.h"
25 #include "../memalign.h"
27 #include "../gettime.h"
28 #include "../fio_time.h"
30 #include "../lib/bloom.h"
33 struct timeval *fio_tv = NULL;
34 unsigned int fio_debug = 0;
36 void __dprint(int type, const char *str, ...)
40 struct worker_thread {
55 struct flist_head list;
60 struct rb_node rb_node;
62 uint32_t hash[MD5_HASH_WORDS];
63 struct flist_head extent_list[0];
68 uint32_t hash[MD5_HASH_WORDS];
71 static struct rb_root rb_root;
72 static struct bloom *bloom;
73 static struct fio_mutex *rb_lock;
75 static unsigned int blocksize = 4096;
76 static unsigned int num_threads;
77 static unsigned int chunk_size = 1048576;
78 static unsigned int dump_output;
79 static unsigned int odirect;
80 static unsigned int collision_check;
81 static unsigned int print_progress = 1;
82 static unsigned int use_bloom = 1;
84 static uint64_t total_size;
85 static uint64_t cur_offset;
86 static struct fio_mutex *size_lock;
90 static uint64_t get_size(int fd, struct stat *sb)
94 if (S_ISBLK(sb->st_mode)) {
95 if (ioctl(fd, BLKGETSIZE64, &ret) < 0) {
102 return (ret & ~((uint64_t)blocksize - 1));
105 static int get_work(uint64_t *offset, uint64_t *size)
110 fio_mutex_down(size_lock);
112 if (cur_offset < total_size) {
113 *offset = cur_offset;
114 this_chunk = min((uint64_t)chunk_size, total_size - cur_offset);
116 cur_offset += this_chunk;
120 fio_mutex_up(size_lock);
124 static int read_block(int fd, void *buf, off_t offset)
128 ret = pread(fd, buf, blocksize, offset);
134 else if (ret != blocksize) {
135 log_err("dedupe: short read on block\n");
142 static void add_item(struct chunk *c, struct item *i)
145 * Save some memory and don't add extent items, if we don't
148 if (dump_output || collision_check) {
151 e = malloc(sizeof(*e));
152 e->offset = i->offset;
153 flist_add_tail(&e->list, &c->extent_list[0]);
159 static int col_check(struct chunk *c, struct item *i)
165 cbuf = fio_memalign(blocksize, blocksize);
166 ibuf = fio_memalign(blocksize, blocksize);
168 e = flist_entry(c->extent_list[0].next, struct extent, list);
169 if (read_block(dev_fd, cbuf, e->offset))
172 if (read_block(dev_fd, ibuf, i->offset))
175 ret = memcmp(ibuf, cbuf, blocksize);
177 fio_memfree(cbuf, blocksize);
178 fio_memfree(ibuf, blocksize);
182 static struct chunk *alloc_chunk(void)
186 if (collision_check || dump_output) {
187 c = malloc(sizeof(struct chunk) + sizeof(struct flist_head));
188 INIT_FLIST_HEAD(&c->extent_list[0]);
190 c = malloc(sizeof(struct chunk));
195 static void insert_chunk(struct item *i)
197 struct rb_node **p, *parent;
201 p = &rb_root.rb_node;
206 c = rb_entry(parent, struct chunk, rb_node);
207 diff = memcmp(i->hash, c->hash, sizeof(i->hash));
215 if (!collision_check)
218 fio_mutex_up(rb_lock);
219 ret = col_check(c, i);
220 fio_mutex_down(rb_lock);
230 RB_CLEAR_NODE(&c->rb_node);
232 memcpy(c->hash, i->hash, sizeof(i->hash));
233 rb_link_node(&c->rb_node, parent, p);
234 rb_insert_color(&c->rb_node, &rb_root);
239 static void insert_chunks(struct item *items, unsigned int nitems,
244 fio_mutex_down(rb_lock);
246 for (i = 0; i < nitems; i++) {
251 s = sizeof(items[i].hash) / sizeof(uint32_t);
252 r = bloom_set(bloom, items[i].hash, s);
255 insert_chunk(&items[i]);
258 fio_mutex_up(rb_lock);
261 static void crc_buf(void *buf, uint32_t *hash)
263 struct fio_md5_ctx ctx = { .hash = hash };
266 fio_md5_update(&ctx, buf, blocksize);
270 static int do_work(struct worker_thread *thread, void *buf)
272 unsigned int nblocks, i;
274 int err = 0, nitems = 0;
278 nblocks = thread->size / blocksize;
279 offset = thread->cur_offset;
280 items = malloc(sizeof(*items) * nblocks);
282 for (i = 0; i < nblocks; i++) {
283 if (read_block(thread->fd, buf, offset))
286 items[i].offset = offset;
287 crc_buf(buf, items[i].hash);
292 insert_chunks(items, nitems, &ndupes);
295 thread->items += nitems;
296 thread->dupes += ndupes;
300 static void *thread_fn(void *data)
302 struct worker_thread *thread = data;
305 buf = fio_memalign(blocksize, blocksize);
308 if (get_work(&thread->cur_offset, &thread->size)) {
312 if (do_work(thread, buf)) {
319 fio_memfree(buf, blocksize);
323 static void show_progress(struct worker_thread *threads, unsigned long total)
325 unsigned long last_nitems = 0;
326 struct timeval last_tv;
328 fio_gettime(&last_tv, NULL);
330 while (print_progress) {
331 unsigned long this_items;
332 unsigned long nitems = 0;
338 for (i = 0; i < num_threads; i++) {
339 nitems += threads[i].items;
340 some_done = threads[i].done;
348 perc = (float) nitems / (float) total;
350 this_items = nitems - last_nitems;
351 this_items *= blocksize;
352 tdiff = mtime_since_now(&last_tv);
355 printf("%3.2f%% done (%luKB/sec)\r", perc, this_items);
356 last_nitems = nitems;
357 fio_gettime(&last_tv, NULL);
359 printf("%3.2f%% done\r", perc);
365 static int run_dedupe_threads(int fd, uint64_t dev_size, uint64_t *nextents,
368 struct worker_thread *threads;
369 unsigned long nitems, total_items;
372 total_size = dev_size;
373 total_items = dev_size / blocksize;
375 size_lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
377 threads = malloc(num_threads * sizeof(struct worker_thread));
378 for (i = 0; i < num_threads; i++) {
380 threads[i].items = 0;
384 err = pthread_create(&threads[i].thread, NULL, thread_fn, &threads[i]);
386 log_err("fio: thread startup failed\n");
391 show_progress(threads, total_items);
396 for (i = 0; i < num_threads; i++) {
398 pthread_join(threads[i].thread, &ret);
399 nitems += threads[i].items;
400 *nchunks += threads[i].dupes;
403 printf("Threads(%u): %lu items processed\n", num_threads, nitems);
406 *nchunks = nitems - *nchunks;
408 fio_mutex_remove(size_lock);
413 static int dedupe_check(const char *filename, uint64_t *nextents,
424 dev_fd = open(filename, flags);
430 if (fstat(dev_fd, &sb) < 0) {
436 dev_size = get_size(dev_fd, &sb);
443 uint64_t bloom_entries;
445 bloom_entries = (3 * dev_size ) / (blocksize * 2);
446 bloom = bloom_new(bloom_entries);
449 printf("Will check <%s>, size <%llu>, using %u threads\n", filename, (unsigned long long) dev_size, num_threads);
451 return run_dedupe_threads(dev_fd, dev_size, nextents, nchunks);
454 static void show_chunk(struct chunk *c)
456 struct flist_head *n;
459 printf("c hash %8x %8x %8x %8x, count %lu\n", c->hash[0], c->hash[1], c->hash[2], c->hash[3], (unsigned long) c->count);
460 flist_for_each(n, &c->extent_list[0]) {
461 e = flist_entry(n, struct extent, list);
462 printf("\toffset %llu\n", (unsigned long long) e->offset);
466 static void show_stat(uint64_t nextents, uint64_t nchunks)
470 printf("Extents=%lu, Unique extents=%lu\n", (unsigned long) nextents, (unsigned long) nchunks);
471 printf("De-dupe factor: %3.2f\n", (double) nextents / (double) nchunks);
473 perc = 1.00 - ((double) nchunks / (double) nextents);
475 printf("Fio setting: dedupe_percentage=%u\n", (int) (perc + 0.50));
479 static void iter_rb_tree(uint64_t *nextents, uint64_t *nchunks)
483 *nchunks = *nextents = 0;
485 n = rb_first(&rb_root);
492 c = rb_entry(n, struct chunk, rb_node);
494 *nextents += c->count;
499 } while ((n = rb_next(n)) != NULL);
502 static int usage(char *argv[])
504 log_err("Check for dedupable blocks on a device/file\n\n");
505 log_err("%s: [options] <device or file>\n", argv[0]);
506 log_err("\t-b\tChunk size to use\n");
507 log_err("\t-t\tNumber of threads to use\n");
508 log_err("\t-d\tFull extent/chunk debug output\n");
509 log_err("\t-o\tUse O_DIRECT\n");
510 log_err("\t-c\tFull collision check\n");
511 log_err("\t-B\tUse probabilistic bloom filter\n");
512 log_err("\t-p\tPrint progress indicator\n");
516 int main(int argc, char *argv[])
518 uint64_t nextents, nchunks;
521 while ((c = getopt(argc, argv, "b:t:d:o:c:p:B:")) != -1) {
524 blocksize = atoi(optarg);
527 num_threads = atoi(optarg);
530 dump_output = atoi(optarg);
533 odirect = atoi(optarg);
536 collision_check = atoi(optarg);
539 print_progress = atoi(optarg);
542 use_bloom = atoi(optarg);
550 if (collision_check || dump_output)
554 num_threads = cpus_online();
562 rb_lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
564 ret = dedupe_check(argv[optind], &nextents, &nchunks);
567 iter_rb_tree(&nextents, &nchunks);
569 show_stat(nextents, nchunks);
571 fio_mutex_remove(rb_lock);