2 * Small tool to check for dedupable blocks in a file or device. Basically
3 * just scans the filename for extents of the given size, checksums them,
11 #include <sys/types.h>
13 #include <sys/ioctl.h>
18 #include "../lib/rbtree.h"
22 #include "../smalloc.h"
23 #include "../minmax.h"
24 #include "../crc/md5.h"
25 #include "../memalign.h"
29 struct timeval *fio_tv = NULL;
30 unsigned int fio_debug = 0;
32 void __dprint(int type, const char *str, ...)
36 struct worker_thread {
50 struct flist_head list;
55 struct rb_node rb_node;
56 struct flist_head extent_list;
58 uint32_t hash[MD5_HASH_WORDS];
63 uint32_t hash[MD5_HASH_WORDS];
66 static struct rb_root rb_root;
67 static struct fio_mutex *rb_lock;
69 static unsigned int blocksize = 4096;
70 static unsigned int num_threads;
71 static unsigned int chunk_size = 1048576;
72 static unsigned int dump_output;
73 static unsigned int odirect;
74 static unsigned int collision_check;
75 static unsigned int print_progress = 1;
77 static uint64_t total_size;
78 static uint64_t cur_offset;
79 static struct fio_mutex *size_lock;
83 static uint64_t get_size(int fd, struct stat *sb)
87 if (S_ISBLK(sb->st_mode)) {
88 if (ioctl(fd, BLKGETSIZE64, &ret) < 0) {
95 return (ret & ~((uint64_t)blocksize - 1));
98 static int get_work(uint64_t *offset, uint64_t *size)
103 fio_mutex_down(size_lock);
105 if (cur_offset < total_size) {
106 *offset = cur_offset;
107 this_chunk = min((uint64_t)chunk_size, total_size - cur_offset);
109 cur_offset += this_chunk;
113 fio_mutex_up(size_lock);
117 static int read_block(int fd, void *buf, off_t offset)
121 ret = pread(fd, buf, blocksize, offset);
127 else if (ret != blocksize) {
128 log_err("dedupe: short read on block\n");
135 static void add_item(struct chunk *c, struct item *i)
139 e = malloc(sizeof(*e));
140 e->offset = i->offset;
141 flist_add_tail(&e->list, &c->extent_list);
145 static int col_check(struct chunk *c, struct item *i)
151 cbuf = fio_memalign(blocksize, blocksize);
152 ibuf = fio_memalign(blocksize, blocksize);
154 e = flist_entry(c->extent_list.next, struct extent, list);
155 if (read_block(dev_fd, cbuf, e->offset))
158 if (read_block(dev_fd, ibuf, i->offset))
161 ret = memcmp(ibuf, cbuf, blocksize);
163 fio_memfree(cbuf, blocksize);
164 fio_memfree(ibuf, blocksize);
168 static void insert_chunk(struct item *i)
170 struct rb_node **p, *parent;
174 p = &rb_root.rb_node;
179 c = rb_entry(parent, struct chunk, rb_node);
180 diff = memcmp(i->hash, c->hash, sizeof(i->hash));
188 if (!collision_check)
191 fio_mutex_up(rb_lock);
192 ret = col_check(c, i);
193 fio_mutex_down(rb_lock);
202 c = malloc(sizeof(*c));
203 RB_CLEAR_NODE(&c->rb_node);
204 INIT_FLIST_HEAD(&c->extent_list);
206 memcpy(c->hash, i->hash, sizeof(i->hash));
207 rb_link_node(&c->rb_node, parent, p);
208 rb_insert_color(&c->rb_node, &rb_root);
213 static void insert_chunks(struct item *items, unsigned int nitems)
217 fio_mutex_down(rb_lock);
219 for (i = 0; i < nitems; i++)
220 insert_chunk(&items[i]);
222 fio_mutex_up(rb_lock);
225 static void crc_buf(void *buf, uint32_t *hash)
227 struct fio_md5_ctx ctx = { .hash = hash };
230 fio_md5_update(&ctx, buf, blocksize);
234 static int do_work(struct worker_thread *thread, void *buf)
236 unsigned int nblocks, i;
238 int err = 0, nitems = 0;
241 nblocks = thread->size / blocksize;
242 offset = thread->cur_offset;
243 items = malloc(sizeof(*items) * nblocks);
245 for (i = 0; i < nblocks; i++) {
246 if (read_block(thread->fd, buf, offset))
248 items[i].offset = offset;
249 crc_buf(buf, items[i].hash);
254 insert_chunks(items, nitems);
255 thread->items += nitems;
260 static void *thread_fn(void *data)
262 struct worker_thread *thread = data;
265 buf = fio_memalign(blocksize, blocksize);
268 if (get_work(&thread->cur_offset, &thread->size)) {
272 if (do_work(thread, buf)) {
279 fio_memfree(buf, blocksize);
283 static int __dedupe_check(int fd, uint64_t dev_size)
285 struct worker_thread *threads;
286 unsigned long nitems, total_items;
289 total_size = dev_size;
290 total_items = dev_size / blocksize;
292 size_lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
294 threads = malloc(num_threads * sizeof(struct worker_thread));
295 for (i = 0; i < num_threads; i++) {
297 threads[i].items = 0;
301 err = pthread_create(&threads[i].thread, NULL, thread_fn, &threads[i]);
303 log_err("fio: thread startup failed\n");
308 while (print_progress) {
313 for (i = 0; i < num_threads; i++) {
314 nitems += threads[i].items;
315 some_done = threads[i].done;
323 perc = (float) nitems / (float) total_items;
325 printf("%3.2f%% done\r", perc);
331 for (i = 0; i < num_threads; i++) {
333 pthread_join(threads[i].thread, &ret);
334 nitems += threads[i].items;
337 printf("Threads(%u): %lu items processed\n", num_threads, nitems);
339 fio_mutex_remove(size_lock);
343 static int dedupe_check(const char *filename)
353 dev_fd = open(filename, flags);
359 if (fstat(dev_fd, &sb) < 0) {
365 dev_size = get_size(dev_fd, &sb);
371 printf("Will check <%s>, size <%lu>\n", filename, dev_size);
373 return __dedupe_check(dev_fd, dev_size);
376 static void show_chunk(struct chunk *c)
378 struct flist_head *n;
381 printf("c hash %8x %8x %8x %8x, count %lu\n", c->hash[0], c->hash[1], c->hash[2], c->hash[3], c->count);
382 flist_for_each(n, &c->extent_list) {
383 e = flist_entry(n, struct extent, list);
384 printf("\toffset %lu\n", e->offset);
388 static void iter_rb_tree(void)
395 nchunks = nextents = 0;
397 n = rb_first(&rb_root);
404 c = rb_entry(n, struct chunk, rb_node);
406 nextents += c->count;
411 } while ((n = rb_next(n)) != NULL);
413 printf("Extents=%lu, Unique extents=%lu\n", nextents, nchunks);
414 printf("De-dupe factor: %3.2f\n", (double) nextents / (double) nchunks);
416 perc = 1.00 - ((double) nchunks / (double) nextents);
418 printf("Fio setting: dedupe_percentage=%u\n", (int) (perc + 0.50));
421 static int usage(char *argv[])
423 log_err("Check for dedupable blocks on a device/file\n\n");
424 log_err("%s: [options] <device or file>\n", argv[0]);
425 log_err("\t-b\tChunk size to use\n");
426 log_err("\t-t\tNumber of threads to use\n");
427 log_err("\t-d\tFull extent/chunk debug output\n");
428 log_err("\t-o\tUse O_DIRECT\n");
429 log_err("\t-c\tFull collision check\n");
430 log_err("\t-p\tPrint progress indicator\n");
434 int main(int argc, char *argv[])
438 while ((c = getopt(argc, argv, "b:t:d:o:c:p:")) != -1) {
441 blocksize = atoi(optarg);
444 num_threads = atoi(optarg);
447 dump_output = atoi(optarg);
450 odirect = atoi(optarg);
453 collision_check = atoi(optarg);
456 print_progress = atoi(optarg);
465 num_threads = cpus_online();
473 rb_lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
475 ret = dedupe_check(argv[optind]);