dedupe: print threads used up front
[fio.git] / t / dedupe.c
CommitLineData
bf481692
JA
1/*
2 * Small tool to check for dedupable blocks in a file or device. Basically
3 * just scans the filename for extents of the given size, checksums them,
4 * and orders them up.
5 */
6#include <stdio.h>
7#include <stdio.h>
8#include <unistd.h>
9#include <inttypes.h>
10#include <assert.h>
11#include <sys/types.h>
12#include <sys/stat.h>
13#include <sys/ioctl.h>
14#include <linux/fs.h>
15#include <fcntl.h>
16#include <string.h>
17
18#include "../lib/rbtree.h"
19#include "../flist.h"
20#include "../log.h"
21#include "../mutex.h"
22#include "../smalloc.h"
23#include "../minmax.h"
24#include "../crc/md5.h"
25#include "../memalign.h"
26#include "../os/os.h"
5a155943
JA
27#include "../gettime.h"
28#include "../fio_time.h"
bf481692
JA
29
30FILE *f_err;
31struct timeval *fio_tv = NULL;
32unsigned int fio_debug = 0;
33
34void __dprint(int type, const char *str, ...)
35{
36}
37
38struct worker_thread {
39 pthread_t thread;
40
4d53d6c0
JA
41 volatile int done;
42
bf481692
JA
43 int fd;
44 uint64_t cur_offset;
45 uint64_t size;
46
47 unsigned long items;
48 int err;
49};
50
51struct extent {
52 struct flist_head list;
53 uint64_t offset;
54};
55
56struct chunk {
57 struct rb_node rb_node;
bf481692
JA
58 uint64_t count;
59 uint32_t hash[MD5_HASH_WORDS];
5a155943 60 struct flist_head extent_list[0];
bf481692
JA
61};
62
63struct item {
64 uint64_t offset;
65 uint32_t hash[MD5_HASH_WORDS];
66};
67
68static struct rb_root rb_root;
69static struct fio_mutex *rb_lock;
70
71static unsigned int blocksize = 4096;
72static unsigned int num_threads;
73static unsigned int chunk_size = 1048576;
74static unsigned int dump_output;
75static unsigned int odirect;
76static unsigned int collision_check;
4d53d6c0 77static unsigned int print_progress = 1;
bf481692
JA
78
79static uint64_t total_size;
80static uint64_t cur_offset;
81static struct fio_mutex *size_lock;
82
83static int dev_fd;
84
85static uint64_t get_size(int fd, struct stat *sb)
86{
87 uint64_t ret;
88
89 if (S_ISBLK(sb->st_mode)) {
90 if (ioctl(fd, BLKGETSIZE64, &ret) < 0) {
91 perror("ioctl");
92 return 0;
93 }
94 } else
95 ret = sb->st_size;
96
97 return (ret & ~((uint64_t)blocksize - 1));
98}
99
100static int get_work(uint64_t *offset, uint64_t *size)
101{
102 uint64_t this_chunk;
103 int ret = 1;
104
105 fio_mutex_down(size_lock);
106
107 if (cur_offset < total_size) {
108 *offset = cur_offset;
109 this_chunk = min((uint64_t)chunk_size, total_size - cur_offset);
110 *size = this_chunk;
111 cur_offset += this_chunk;
112 ret = 0;
113 }
114
115 fio_mutex_up(size_lock);
116 return ret;
117}
118
119static int read_block(int fd, void *buf, off_t offset)
120{
121 ssize_t ret;
122
123 ret = pread(fd, buf, blocksize, offset);
124 if (ret < 0) {
125 perror("pread");
126 return 1;
127 } else if (!ret)
128 return 1;
129 else if (ret != blocksize) {
130 log_err("dedupe: short read on block\n");
131 return 1;
132 }
133
134 return 0;
135}
136
137static void add_item(struct chunk *c, struct item *i)
138{
87818659
JA
139 /*
140 * Save some memory and don't add extent items, if we don't
141 * use them.
142 */
143 if (dump_output || collision_check) {
144 struct extent *e;
145
146 e = malloc(sizeof(*e));
147 e->offset = i->offset;
5a155943 148 flist_add_tail(&e->list, &c->extent_list[0]);
87818659 149 }
bf481692 150
bf481692
JA
151 c->count++;
152}
153
154static int col_check(struct chunk *c, struct item *i)
155{
156 struct extent *e;
157 char *cbuf, *ibuf;
158 int ret = 1;
159
160 cbuf = fio_memalign(blocksize, blocksize);
161 ibuf = fio_memalign(blocksize, blocksize);
162
5a155943 163 e = flist_entry(c->extent_list[0].next, struct extent, list);
bf481692
JA
164 if (read_block(dev_fd, cbuf, e->offset))
165 goto out;
166
167 if (read_block(dev_fd, ibuf, i->offset))
168 goto out;
169
170 ret = memcmp(ibuf, cbuf, blocksize);
171out:
172 fio_memfree(cbuf, blocksize);
173 fio_memfree(ibuf, blocksize);
174 return ret;
175}
176
5a155943
JA
177static struct chunk *alloc_chunk(void)
178{
179 struct chunk *c;
180
181 if (collision_check || dump_output) {
182 c = malloc(sizeof(struct chunk) + sizeof(struct flist_head));
183 INIT_FLIST_HEAD(&c->extent_list[0]);
184 } else
185 c = malloc(sizeof(struct chunk));
186
187 return c;
188}
189
bf481692
JA
190static void insert_chunk(struct item *i)
191{
192 struct rb_node **p, *parent;
193 struct chunk *c;
194 int diff;
195
196 p = &rb_root.rb_node;
197 parent = NULL;
198 while (*p) {
199 parent = *p;
200
201 c = rb_entry(parent, struct chunk, rb_node);
202 diff = memcmp(i->hash, c->hash, sizeof(i->hash));
203 if (diff < 0)
204 p = &(*p)->rb_left;
205 else if (diff > 0)
206 p = &(*p)->rb_right;
207 else {
208 int ret;
209
210 if (!collision_check)
211 goto add;
212
213 fio_mutex_up(rb_lock);
214 ret = col_check(c, i);
215 fio_mutex_down(rb_lock);
216
217 if (!ret)
218 goto add;
219
220 p = &(*p)->rb_right;
221 }
222 }
223
5a155943 224 c = alloc_chunk();
bf481692 225 RB_CLEAR_NODE(&c->rb_node);
bf481692
JA
226 c->count = 0;
227 memcpy(c->hash, i->hash, sizeof(i->hash));
228 rb_link_node(&c->rb_node, parent, p);
229 rb_insert_color(&c->rb_node, &rb_root);
230add:
231 add_item(c, i);
232}
233
234static void insert_chunks(struct item *items, unsigned int nitems)
235{
236 int i;
237
238 fio_mutex_down(rb_lock);
239
240 for (i = 0; i < nitems; i++)
241 insert_chunk(&items[i]);
242
243 fio_mutex_up(rb_lock);
244}
245
246static void crc_buf(void *buf, uint32_t *hash)
247{
248 struct fio_md5_ctx ctx = { .hash = hash };
249
250 fio_md5_init(&ctx);
251 fio_md5_update(&ctx, buf, blocksize);
252 fio_md5_final(&ctx);
253}
254
255static int do_work(struct worker_thread *thread, void *buf)
256{
257 unsigned int nblocks, i;
258 off_t offset;
259 int err = 0, nitems = 0;
260 struct item *items;
261
262 nblocks = thread->size / blocksize;
263 offset = thread->cur_offset;
264 items = malloc(sizeof(*items) * nblocks);
265
266 for (i = 0; i < nblocks; i++) {
267 if (read_block(thread->fd, buf, offset))
268 break;
269 items[i].offset = offset;
270 crc_buf(buf, items[i].hash);
271 offset += blocksize;
272 nitems++;
273 }
274
275 insert_chunks(items, nitems);
276 thread->items += nitems;
277 free(items);
278 return err;
279}
280
281static void *thread_fn(void *data)
282{
283 struct worker_thread *thread = data;
284 void *buf;
285
286 buf = fio_memalign(blocksize, blocksize);
287
288 do {
289 if (get_work(&thread->cur_offset, &thread->size)) {
290 thread->err = 1;
291 break;
292 }
293 if (do_work(thread, buf)) {
294 thread->err = 1;
295 break;
296 }
297 } while (1);
298
4d53d6c0 299 thread->done = 1;
bf481692
JA
300 fio_memfree(buf, blocksize);
301 return NULL;
302}
303
5a155943
JA
304static void show_progress(struct worker_thread *threads, unsigned long total)
305{
306 unsigned long last_nitems = 0;
307 struct timeval last_tv;
308
309 fio_gettime(&last_tv, NULL);
310
311 while (print_progress) {
312 unsigned long this_items;
313 unsigned long nitems = 0;
314 uint64_t tdiff;
315 float perc;
316 int some_done;
317 int i;
318
319 for (i = 0; i < num_threads; i++) {
320 nitems += threads[i].items;
321 some_done = threads[i].done;
322 if (some_done)
323 break;
324 }
325
326 if (some_done)
327 break;
328
329 perc = (float) nitems / (float) total;
330 perc *= 100.0;
331 this_items = nitems - last_nitems;
332 this_items *= blocksize;
333 tdiff = mtime_since_now(&last_tv);
334 if (tdiff) {
335 this_items /= tdiff;
336 printf("%3.2f%% done (%luKB/sec)\r", perc, this_items);
337 last_nitems = nitems;
338 fio_gettime(&last_tv, NULL);
339 } else
340 printf("%3.2f%% done\r", perc);
341 fflush(stdout);
342 usleep(250000);
343 };
344}
345
346static int run_dedupe_threads(int fd, uint64_t dev_size)
bf481692
JA
347{
348 struct worker_thread *threads;
4d53d6c0 349 unsigned long nitems, total_items;
bf481692
JA
350 int i, err = 0;
351
352 total_size = dev_size;
4d53d6c0 353 total_items = dev_size / blocksize;
bf481692
JA
354 cur_offset = 0;
355 size_lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
356
357 threads = malloc(num_threads * sizeof(struct worker_thread));
358 for (i = 0; i < num_threads; i++) {
359 threads[i].fd = fd;
360 threads[i].items = 0;
361 threads[i].err = 0;
4d53d6c0 362 threads[i].done = 0;
bf481692
JA
363
364 err = pthread_create(&threads[i].thread, NULL, thread_fn, &threads[i]);
365 if (err) {
366 log_err("fio: thread startup failed\n");
367 break;
368 }
369 }
370
5a155943 371 show_progress(threads, total_items);
4d53d6c0 372
bf481692
JA
373 nitems = 0;
374 for (i = 0; i < num_threads; i++) {
375 void *ret;
376 pthread_join(threads[i].thread, &ret);
377 nitems += threads[i].items;
378 }
379
380 printf("Threads(%u): %lu items processed\n", num_threads, nitems);
381
382 fio_mutex_remove(size_lock);
5a155943 383 free(threads);
bf481692
JA
384 return err;
385}
386
387static int dedupe_check(const char *filename)
388{
389 uint64_t dev_size;
390 struct stat sb;
391 int flags;
392
393 flags = O_RDONLY;
394 if (odirect)
395 flags |= O_DIRECT;
396
397 dev_fd = open(filename, flags);
398 if (dev_fd == -1) {
399 perror("open");
400 return 1;
401 }
402
403 if (fstat(dev_fd, &sb) < 0) {
404 perror("fstat");
405 close(dev_fd);
406 return 1;
407 }
408
409 dev_size = get_size(dev_fd, &sb);
410 if (!dev_size) {
411 close(dev_fd);
412 return 1;
413 }
414
f7bd62dd 415 printf("Will check <%s>, size <%llu>, using %u threads\n", filename, (unsigned long long) dev_size, num_threads);
bf481692 416
5a155943 417 return run_dedupe_threads(dev_fd, dev_size);
bf481692
JA
418}
419
420static void show_chunk(struct chunk *c)
421{
422 struct flist_head *n;
423 struct extent *e;
424
761c2729 425 printf("c hash %8x %8x %8x %8x, count %lu\n", c->hash[0], c->hash[1], c->hash[2], c->hash[3], (unsigned long) c->count);
5a155943 426 flist_for_each(n, &c->extent_list[0]) {
bf481692 427 e = flist_entry(n, struct extent, list);
761c2729 428 printf("\toffset %llu\n", (unsigned long long) e->offset);
bf481692
JA
429 }
430}
431
432static void iter_rb_tree(void)
433{
434 struct rb_node *n;
435 uint64_t nchunks;
436 uint64_t nextents;
437 double perc;
438
439 nchunks = nextents = 0;
440
441 n = rb_first(&rb_root);
442 if (!n)
443 return;
444
445 do {
446 struct chunk *c;
447
448 c = rb_entry(n, struct chunk, rb_node);
449 nchunks++;
450 nextents += c->count;
451
452 if (dump_output)
453 show_chunk(c);
454
455 } while ((n = rb_next(n)) != NULL);
456
761c2729 457 printf("Extents=%lu, Unique extents=%lu\n", (unsigned long) nextents, (unsigned long) nchunks);
bf481692
JA
458 printf("De-dupe factor: %3.2f\n", (double) nextents / (double) nchunks);
459
460 perc = 1.00 - ((double) nchunks / (double) nextents);
461 perc *= 100.0;
a2971665 462 printf("Fio setting: dedupe_percentage=%u\n", (int) (perc + 0.50));
bf481692
JA
463}
464
465static int usage(char *argv[])
466{
467 log_err("Check for dedupable blocks on a device/file\n\n");
468 log_err("%s: [options] <device or file>\n", argv[0]);
469 log_err("\t-b\tChunk size to use\n");
470 log_err("\t-t\tNumber of threads to use\n");
471 log_err("\t-d\tFull extent/chunk debug output\n");
472 log_err("\t-o\tUse O_DIRECT\n");
473 log_err("\t-c\tFull collision check\n");
4d53d6c0 474 log_err("\t-p\tPrint progress indicator\n");
bf481692
JA
475 return 1;
476}
477
478int main(int argc, char *argv[])
479{
480 int c, ret;
481
4d53d6c0 482 while ((c = getopt(argc, argv, "b:t:d:o:c:p:")) != -1) {
bf481692
JA
483 switch (c) {
484 case 'b':
485 blocksize = atoi(optarg);
486 break;
487 case 't':
488 num_threads = atoi(optarg);
489 break;
490 case 'd':
491 dump_output = atoi(optarg);
492 break;
493 case 'o':
494 odirect = atoi(optarg);
495 break;
496 case 'c':
497 collision_check = atoi(optarg);
498 break;
4d53d6c0
JA
499 case 'p':
500 print_progress = atoi(optarg);
501 break;
bf481692
JA
502 case '?':
503 default:
504 return usage(argv);
505 }
506 }
507
508 if (!num_threads)
509 num_threads = cpus_online();
510
511 if (argc == optind)
512 return usage(argv);
513
514 sinit();
515
516 rb_root = RB_ROOT;
517 rb_lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
518
519 ret = dedupe_check(argv[optind]);
520
521 iter_rb_tree();
522
1956a9e5 523 fio_mutex_remove(rb_lock);
bf481692
JA
524 scleanup();
525 return ret;
526}