dedupe: default to using a bloom filter to save memory
[fio.git] / t / dedupe.c
CommitLineData
bf481692
JA
1/*
2 * Small tool to check for dedupable blocks in a file or device. Basically
3 * just scans the filename for extents of the given size, checksums them,
4 * and orders them up.
5 */
6#include <stdio.h>
7#include <stdio.h>
8#include <unistd.h>
9#include <inttypes.h>
10#include <assert.h>
11#include <sys/types.h>
12#include <sys/stat.h>
13#include <sys/ioctl.h>
14#include <linux/fs.h>
15#include <fcntl.h>
16#include <string.h>
17
18#include "../lib/rbtree.h"
19#include "../flist.h"
20#include "../log.h"
21#include "../mutex.h"
22#include "../smalloc.h"
23#include "../minmax.h"
24#include "../crc/md5.h"
25#include "../memalign.h"
26#include "../os/os.h"
5a155943
JA
27#include "../gettime.h"
28#include "../fio_time.h"
bf481692 29
fa88cd09
JA
30#include "../lib/bloom.h"
31
bf481692
JA
32FILE *f_err;
33struct timeval *fio_tv = NULL;
34unsigned int fio_debug = 0;
35
36void __dprint(int type, const char *str, ...)
37{
38}
39
40struct worker_thread {
41 pthread_t thread;
42
4d53d6c0
JA
43 volatile int done;
44
bf481692
JA
45 int fd;
46 uint64_t cur_offset;
47 uint64_t size;
48
49 unsigned long items;
fa88cd09 50 unsigned long dupes;
bf481692
JA
51 int err;
52};
53
54struct extent {
55 struct flist_head list;
56 uint64_t offset;
57};
58
59struct chunk {
60 struct rb_node rb_node;
bf481692
JA
61 uint64_t count;
62 uint32_t hash[MD5_HASH_WORDS];
5a155943 63 struct flist_head extent_list[0];
bf481692
JA
64};
65
66struct item {
67 uint64_t offset;
68 uint32_t hash[MD5_HASH_WORDS];
69};
70
71static struct rb_root rb_root;
fa88cd09 72static struct bloom *bloom;
bf481692
JA
73static struct fio_mutex *rb_lock;
74
75static unsigned int blocksize = 4096;
76static unsigned int num_threads;
77static unsigned int chunk_size = 1048576;
78static unsigned int dump_output;
79static unsigned int odirect;
80static unsigned int collision_check;
4d53d6c0 81static unsigned int print_progress = 1;
fa88cd09 82static unsigned int use_bloom = 1;
bf481692
JA
83
84static uint64_t total_size;
85static uint64_t cur_offset;
86static struct fio_mutex *size_lock;
87
88static int dev_fd;
89
90static uint64_t get_size(int fd, struct stat *sb)
91{
92 uint64_t ret;
93
94 if (S_ISBLK(sb->st_mode)) {
95 if (ioctl(fd, BLKGETSIZE64, &ret) < 0) {
96 perror("ioctl");
97 return 0;
98 }
99 } else
100 ret = sb->st_size;
101
102 return (ret & ~((uint64_t)blocksize - 1));
103}
104
105static int get_work(uint64_t *offset, uint64_t *size)
106{
107 uint64_t this_chunk;
108 int ret = 1;
109
110 fio_mutex_down(size_lock);
111
112 if (cur_offset < total_size) {
113 *offset = cur_offset;
114 this_chunk = min((uint64_t)chunk_size, total_size - cur_offset);
115 *size = this_chunk;
116 cur_offset += this_chunk;
117 ret = 0;
118 }
119
120 fio_mutex_up(size_lock);
121 return ret;
122}
123
124static int read_block(int fd, void *buf, off_t offset)
125{
126 ssize_t ret;
127
128 ret = pread(fd, buf, blocksize, offset);
129 if (ret < 0) {
130 perror("pread");
131 return 1;
132 } else if (!ret)
133 return 1;
134 else if (ret != blocksize) {
135 log_err("dedupe: short read on block\n");
136 return 1;
137 }
138
139 return 0;
140}
141
142static void add_item(struct chunk *c, struct item *i)
143{
87818659
JA
144 /*
145 * Save some memory and don't add extent items, if we don't
146 * use them.
147 */
148 if (dump_output || collision_check) {
149 struct extent *e;
150
151 e = malloc(sizeof(*e));
152 e->offset = i->offset;
5a155943 153 flist_add_tail(&e->list, &c->extent_list[0]);
87818659 154 }
bf481692 155
bf481692
JA
156 c->count++;
157}
158
159static int col_check(struct chunk *c, struct item *i)
160{
161 struct extent *e;
162 char *cbuf, *ibuf;
163 int ret = 1;
164
165 cbuf = fio_memalign(blocksize, blocksize);
166 ibuf = fio_memalign(blocksize, blocksize);
167
5a155943 168 e = flist_entry(c->extent_list[0].next, struct extent, list);
bf481692
JA
169 if (read_block(dev_fd, cbuf, e->offset))
170 goto out;
171
172 if (read_block(dev_fd, ibuf, i->offset))
173 goto out;
174
175 ret = memcmp(ibuf, cbuf, blocksize);
176out:
177 fio_memfree(cbuf, blocksize);
178 fio_memfree(ibuf, blocksize);
179 return ret;
180}
181
5a155943
JA
182static struct chunk *alloc_chunk(void)
183{
184 struct chunk *c;
185
186 if (collision_check || dump_output) {
187 c = malloc(sizeof(struct chunk) + sizeof(struct flist_head));
188 INIT_FLIST_HEAD(&c->extent_list[0]);
189 } else
190 c = malloc(sizeof(struct chunk));
191
192 return c;
193}
194
bf481692
JA
195static void insert_chunk(struct item *i)
196{
197 struct rb_node **p, *parent;
198 struct chunk *c;
199 int diff;
200
201 p = &rb_root.rb_node;
202 parent = NULL;
203 while (*p) {
204 parent = *p;
205
206 c = rb_entry(parent, struct chunk, rb_node);
207 diff = memcmp(i->hash, c->hash, sizeof(i->hash));
208 if (diff < 0)
209 p = &(*p)->rb_left;
210 else if (diff > 0)
211 p = &(*p)->rb_right;
212 else {
213 int ret;
214
215 if (!collision_check)
216 goto add;
217
218 fio_mutex_up(rb_lock);
219 ret = col_check(c, i);
220 fio_mutex_down(rb_lock);
221
222 if (!ret)
223 goto add;
224
225 p = &(*p)->rb_right;
226 }
227 }
228
5a155943 229 c = alloc_chunk();
bf481692 230 RB_CLEAR_NODE(&c->rb_node);
bf481692
JA
231 c->count = 0;
232 memcpy(c->hash, i->hash, sizeof(i->hash));
233 rb_link_node(&c->rb_node, parent, p);
234 rb_insert_color(&c->rb_node, &rb_root);
235add:
236 add_item(c, i);
237}
238
fa88cd09
JA
239static void insert_chunks(struct item *items, unsigned int nitems,
240 uint64_t *ndupes)
bf481692
JA
241{
242 int i;
243
244 fio_mutex_down(rb_lock);
245
fa88cd09
JA
246 for (i = 0; i < nitems; i++) {
247 if (bloom) {
248 unsigned int s;
249 int r;
250
251 s = sizeof(items[i].hash) / sizeof(uint32_t);
252 r = bloom_set(bloom, items[i].hash, s);
253 *ndupes += r;
254 } else
255 insert_chunk(&items[i]);
256 }
bf481692
JA
257
258 fio_mutex_up(rb_lock);
259}
260
261static void crc_buf(void *buf, uint32_t *hash)
262{
263 struct fio_md5_ctx ctx = { .hash = hash };
264
265 fio_md5_init(&ctx);
266 fio_md5_update(&ctx, buf, blocksize);
267 fio_md5_final(&ctx);
268}
269
270static int do_work(struct worker_thread *thread, void *buf)
271{
272 unsigned int nblocks, i;
273 off_t offset;
274 int err = 0, nitems = 0;
fa88cd09 275 uint64_t ndupes = 0;
bf481692
JA
276 struct item *items;
277
278 nblocks = thread->size / blocksize;
279 offset = thread->cur_offset;
280 items = malloc(sizeof(*items) * nblocks);
281
282 for (i = 0; i < nblocks; i++) {
283 if (read_block(thread->fd, buf, offset))
284 break;
fa88cd09
JA
285 if (items)
286 items[i].offset = offset;
bf481692
JA
287 crc_buf(buf, items[i].hash);
288 offset += blocksize;
289 nitems++;
290 }
291
fa88cd09
JA
292 insert_chunks(items, nitems, &ndupes);
293
bf481692 294 free(items);
fa88cd09
JA
295 thread->items += nitems;
296 thread->dupes += ndupes;
bf481692
JA
297 return err;
298}
299
300static void *thread_fn(void *data)
301{
302 struct worker_thread *thread = data;
303 void *buf;
304
305 buf = fio_memalign(blocksize, blocksize);
306
307 do {
308 if (get_work(&thread->cur_offset, &thread->size)) {
309 thread->err = 1;
310 break;
311 }
312 if (do_work(thread, buf)) {
313 thread->err = 1;
314 break;
315 }
316 } while (1);
317
4d53d6c0 318 thread->done = 1;
bf481692
JA
319 fio_memfree(buf, blocksize);
320 return NULL;
321}
322
5a155943
JA
323static void show_progress(struct worker_thread *threads, unsigned long total)
324{
325 unsigned long last_nitems = 0;
326 struct timeval last_tv;
327
328 fio_gettime(&last_tv, NULL);
329
330 while (print_progress) {
331 unsigned long this_items;
332 unsigned long nitems = 0;
333 uint64_t tdiff;
334 float perc;
335 int some_done;
336 int i;
337
338 for (i = 0; i < num_threads; i++) {
339 nitems += threads[i].items;
340 some_done = threads[i].done;
341 if (some_done)
342 break;
343 }
344
345 if (some_done)
346 break;
347
348 perc = (float) nitems / (float) total;
349 perc *= 100.0;
350 this_items = nitems - last_nitems;
351 this_items *= blocksize;
352 tdiff = mtime_since_now(&last_tv);
353 if (tdiff) {
354 this_items /= tdiff;
355 printf("%3.2f%% done (%luKB/sec)\r", perc, this_items);
356 last_nitems = nitems;
357 fio_gettime(&last_tv, NULL);
358 } else
359 printf("%3.2f%% done\r", perc);
360 fflush(stdout);
361 usleep(250000);
362 };
363}
364
fa88cd09
JA
365static int run_dedupe_threads(int fd, uint64_t dev_size, uint64_t *nextents,
366 uint64_t *nchunks)
bf481692
JA
367{
368 struct worker_thread *threads;
4d53d6c0 369 unsigned long nitems, total_items;
bf481692
JA
370 int i, err = 0;
371
372 total_size = dev_size;
4d53d6c0 373 total_items = dev_size / blocksize;
bf481692
JA
374 cur_offset = 0;
375 size_lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
376
377 threads = malloc(num_threads * sizeof(struct worker_thread));
378 for (i = 0; i < num_threads; i++) {
379 threads[i].fd = fd;
380 threads[i].items = 0;
381 threads[i].err = 0;
4d53d6c0 382 threads[i].done = 0;
bf481692
JA
383
384 err = pthread_create(&threads[i].thread, NULL, thread_fn, &threads[i]);
385 if (err) {
386 log_err("fio: thread startup failed\n");
387 break;
388 }
389 }
390
5a155943 391 show_progress(threads, total_items);
4d53d6c0 392
bf481692 393 nitems = 0;
fa88cd09
JA
394 *nextents = 0;
395 *nchunks = 1;
bf481692
JA
396 for (i = 0; i < num_threads; i++) {
397 void *ret;
398 pthread_join(threads[i].thread, &ret);
399 nitems += threads[i].items;
fa88cd09 400 *nchunks += threads[i].dupes;
bf481692
JA
401 }
402
403 printf("Threads(%u): %lu items processed\n", num_threads, nitems);
404
fa88cd09
JA
405 *nextents = nitems;
406 *nchunks = nitems - *nchunks;
407
bf481692 408 fio_mutex_remove(size_lock);
5a155943 409 free(threads);
bf481692
JA
410 return err;
411}
412
fa88cd09
JA
413static int dedupe_check(const char *filename, uint64_t *nextents,
414 uint64_t *nchunks)
bf481692
JA
415{
416 uint64_t dev_size;
417 struct stat sb;
418 int flags;
419
420 flags = O_RDONLY;
421 if (odirect)
422 flags |= O_DIRECT;
423
424 dev_fd = open(filename, flags);
425 if (dev_fd == -1) {
426 perror("open");
427 return 1;
428 }
429
430 if (fstat(dev_fd, &sb) < 0) {
431 perror("fstat");
432 close(dev_fd);
433 return 1;
434 }
435
436 dev_size = get_size(dev_fd, &sb);
437 if (!dev_size) {
438 close(dev_fd);
439 return 1;
440 }
441
fa88cd09
JA
442 if (use_bloom) {
443 uint64_t bloom_entries;
444
445 bloom_entries = (3 * dev_size ) / (blocksize * 2);
446 bloom = bloom_new(bloom_entries);
447 }
448
f7bd62dd 449 printf("Will check <%s>, size <%llu>, using %u threads\n", filename, (unsigned long long) dev_size, num_threads);
bf481692 450
fa88cd09 451 return run_dedupe_threads(dev_fd, dev_size, nextents, nchunks);
bf481692
JA
452}
453
454static void show_chunk(struct chunk *c)
455{
456 struct flist_head *n;
457 struct extent *e;
458
761c2729 459 printf("c hash %8x %8x %8x %8x, count %lu\n", c->hash[0], c->hash[1], c->hash[2], c->hash[3], (unsigned long) c->count);
5a155943 460 flist_for_each(n, &c->extent_list[0]) {
bf481692 461 e = flist_entry(n, struct extent, list);
761c2729 462 printf("\toffset %llu\n", (unsigned long long) e->offset);
bf481692
JA
463 }
464}
465
fa88cd09 466static void show_stat(uint64_t nextents, uint64_t nchunks)
bf481692 467{
bf481692
JA
468 double perc;
469
fa88cd09
JA
470 printf("Extents=%lu, Unique extents=%lu\n", (unsigned long) nextents, (unsigned long) nchunks);
471 printf("De-dupe factor: %3.2f\n", (double) nextents / (double) nchunks);
472
473 perc = 1.00 - ((double) nchunks / (double) nextents);
474 perc *= 100.0;
475 printf("Fio setting: dedupe_percentage=%u\n", (int) (perc + 0.50));
476
477}
478
479static void iter_rb_tree(uint64_t *nextents, uint64_t *nchunks)
480{
481 struct rb_node *n;
482
483 *nchunks = *nextents = 0;
bf481692
JA
484
485 n = rb_first(&rb_root);
486 if (!n)
487 return;
488
489 do {
490 struct chunk *c;
491
492 c = rb_entry(n, struct chunk, rb_node);
fa88cd09
JA
493 (*nchunks)++;
494 *nextents += c->count;
bf481692
JA
495
496 if (dump_output)
497 show_chunk(c);
498
499 } while ((n = rb_next(n)) != NULL);
bf481692
JA
500}
501
502static int usage(char *argv[])
503{
504 log_err("Check for dedupable blocks on a device/file\n\n");
505 log_err("%s: [options] <device or file>\n", argv[0]);
506 log_err("\t-b\tChunk size to use\n");
507 log_err("\t-t\tNumber of threads to use\n");
508 log_err("\t-d\tFull extent/chunk debug output\n");
509 log_err("\t-o\tUse O_DIRECT\n");
510 log_err("\t-c\tFull collision check\n");
fa88cd09 511 log_err("\t-B\tUse probabilistic bloom filter\n");
4d53d6c0 512 log_err("\t-p\tPrint progress indicator\n");
bf481692
JA
513 return 1;
514}
515
516int main(int argc, char *argv[])
517{
fa88cd09 518 uint64_t nextents, nchunks;
bf481692
JA
519 int c, ret;
520
fa88cd09 521 while ((c = getopt(argc, argv, "b:t:d:o:c:p:B:")) != -1) {
bf481692
JA
522 switch (c) {
523 case 'b':
524 blocksize = atoi(optarg);
525 break;
526 case 't':
527 num_threads = atoi(optarg);
528 break;
529 case 'd':
530 dump_output = atoi(optarg);
531 break;
532 case 'o':
533 odirect = atoi(optarg);
534 break;
535 case 'c':
536 collision_check = atoi(optarg);
537 break;
4d53d6c0
JA
538 case 'p':
539 print_progress = atoi(optarg);
540 break;
fa88cd09
JA
541 case 'B':
542 use_bloom = atoi(optarg);
543 break;
bf481692
JA
544 case '?':
545 default:
546 return usage(argv);
547 }
548 }
549
fa88cd09
JA
550 if (collision_check || dump_output)
551 use_bloom = 0;
552
bf481692
JA
553 if (!num_threads)
554 num_threads = cpus_online();
555
556 if (argc == optind)
557 return usage(argv);
558
559 sinit();
560
561 rb_root = RB_ROOT;
562 rb_lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
563
fa88cd09
JA
564 ret = dedupe_check(argv[optind], &nextents, &nchunks);
565
566 if (!bloom)
567 iter_rb_tree(&nextents, &nchunks);
bf481692 568
fa88cd09 569 show_stat(nextents, nchunks);
bf481692 570
1956a9e5 571 fio_mutex_remove(rb_lock);
fa88cd09 572 bloom_free(bloom);
bf481692
JA
573 scleanup();
574 return ret;
575}