Merge branch 'master' of ssh://git.kernel.dk/data/git/fio
[fio.git] / t / dedupe.c
... / ...
CommitLineData
1/*
2 * Small tool to check for dedupable blocks in a file or device. Basically
3 * just scans the filename for extents of the given size, checksums them,
4 * and orders them up.
5 */
6#include <stdio.h>
7#include <stdio.h>
8#include <unistd.h>
9#include <inttypes.h>
10#include <assert.h>
11#include <sys/types.h>
12#include <sys/stat.h>
13#include <sys/ioctl.h>
14#include <linux/fs.h>
15#include <fcntl.h>
16#include <string.h>
17
18#include "../lib/rbtree.h"
19#include "../flist.h"
20#include "../log.h"
21#include "../mutex.h"
22#include "../smalloc.h"
23#include "../minmax.h"
24#include "../crc/md5.h"
25#include "../memalign.h"
26#include "../os/os.h"
27
28FILE *f_err;
29struct timeval *fio_tv = NULL;
30unsigned int fio_debug = 0;
31
32void __dprint(int type, const char *str, ...)
33{
34}
35
36struct worker_thread {
37 pthread_t thread;
38
39 volatile int done;
40
41 int fd;
42 uint64_t cur_offset;
43 uint64_t size;
44
45 unsigned long items;
46 int err;
47};
48
49struct extent {
50 struct flist_head list;
51 uint64_t offset;
52};
53
54struct chunk {
55 struct rb_node rb_node;
56 struct flist_head extent_list;
57 uint64_t count;
58 uint32_t hash[MD5_HASH_WORDS];
59};
60
61struct item {
62 uint64_t offset;
63 uint32_t hash[MD5_HASH_WORDS];
64};
65
66static struct rb_root rb_root;
67static struct fio_mutex *rb_lock;
68
69static unsigned int blocksize = 4096;
70static unsigned int num_threads;
71static unsigned int chunk_size = 1048576;
72static unsigned int dump_output;
73static unsigned int odirect;
74static unsigned int collision_check;
75static unsigned int print_progress = 1;
76
77static uint64_t total_size;
78static uint64_t cur_offset;
79static struct fio_mutex *size_lock;
80
81static int dev_fd;
82
83static uint64_t get_size(int fd, struct stat *sb)
84{
85 uint64_t ret;
86
87 if (S_ISBLK(sb->st_mode)) {
88 if (ioctl(fd, BLKGETSIZE64, &ret) < 0) {
89 perror("ioctl");
90 return 0;
91 }
92 } else
93 ret = sb->st_size;
94
95 return (ret & ~((uint64_t)blocksize - 1));
96}
97
98static int get_work(uint64_t *offset, uint64_t *size)
99{
100 uint64_t this_chunk;
101 int ret = 1;
102
103 fio_mutex_down(size_lock);
104
105 if (cur_offset < total_size) {
106 *offset = cur_offset;
107 this_chunk = min((uint64_t)chunk_size, total_size - cur_offset);
108 *size = this_chunk;
109 cur_offset += this_chunk;
110 ret = 0;
111 }
112
113 fio_mutex_up(size_lock);
114 return ret;
115}
116
117static int read_block(int fd, void *buf, off_t offset)
118{
119 ssize_t ret;
120
121 ret = pread(fd, buf, blocksize, offset);
122 if (ret < 0) {
123 perror("pread");
124 return 1;
125 } else if (!ret)
126 return 1;
127 else if (ret != blocksize) {
128 log_err("dedupe: short read on block\n");
129 return 1;
130 }
131
132 return 0;
133}
134
135static void add_item(struct chunk *c, struct item *i)
136{
137 /*
138 * Save some memory and don't add extent items, if we don't
139 * use them.
140 */
141 if (dump_output || collision_check) {
142 struct extent *e;
143
144 e = malloc(sizeof(*e));
145 e->offset = i->offset;
146 flist_add_tail(&e->list, &c->extent_list);
147 }
148
149 c->count++;
150}
151
152static int col_check(struct chunk *c, struct item *i)
153{
154 struct extent *e;
155 char *cbuf, *ibuf;
156 int ret = 1;
157
158 cbuf = fio_memalign(blocksize, blocksize);
159 ibuf = fio_memalign(blocksize, blocksize);
160
161 e = flist_entry(c->extent_list.next, struct extent, list);
162 if (read_block(dev_fd, cbuf, e->offset))
163 goto out;
164
165 if (read_block(dev_fd, ibuf, i->offset))
166 goto out;
167
168 ret = memcmp(ibuf, cbuf, blocksize);
169out:
170 fio_memfree(cbuf, blocksize);
171 fio_memfree(ibuf, blocksize);
172 return ret;
173}
174
175static void insert_chunk(struct item *i)
176{
177 struct rb_node **p, *parent;
178 struct chunk *c;
179 int diff;
180
181 p = &rb_root.rb_node;
182 parent = NULL;
183 while (*p) {
184 parent = *p;
185
186 c = rb_entry(parent, struct chunk, rb_node);
187 diff = memcmp(i->hash, c->hash, sizeof(i->hash));
188 if (diff < 0)
189 p = &(*p)->rb_left;
190 else if (diff > 0)
191 p = &(*p)->rb_right;
192 else {
193 int ret;
194
195 if (!collision_check)
196 goto add;
197
198 fio_mutex_up(rb_lock);
199 ret = col_check(c, i);
200 fio_mutex_down(rb_lock);
201
202 if (!ret)
203 goto add;
204
205 p = &(*p)->rb_right;
206 }
207 }
208
209 c = malloc(sizeof(*c));
210 RB_CLEAR_NODE(&c->rb_node);
211 INIT_FLIST_HEAD(&c->extent_list);
212 c->count = 0;
213 memcpy(c->hash, i->hash, sizeof(i->hash));
214 rb_link_node(&c->rb_node, parent, p);
215 rb_insert_color(&c->rb_node, &rb_root);
216add:
217 add_item(c, i);
218}
219
220static void insert_chunks(struct item *items, unsigned int nitems)
221{
222 int i;
223
224 fio_mutex_down(rb_lock);
225
226 for (i = 0; i < nitems; i++)
227 insert_chunk(&items[i]);
228
229 fio_mutex_up(rb_lock);
230}
231
232static void crc_buf(void *buf, uint32_t *hash)
233{
234 struct fio_md5_ctx ctx = { .hash = hash };
235
236 fio_md5_init(&ctx);
237 fio_md5_update(&ctx, buf, blocksize);
238 fio_md5_final(&ctx);
239}
240
241static int do_work(struct worker_thread *thread, void *buf)
242{
243 unsigned int nblocks, i;
244 off_t offset;
245 int err = 0, nitems = 0;
246 struct item *items;
247
248 nblocks = thread->size / blocksize;
249 offset = thread->cur_offset;
250 items = malloc(sizeof(*items) * nblocks);
251
252 for (i = 0; i < nblocks; i++) {
253 if (read_block(thread->fd, buf, offset))
254 break;
255 items[i].offset = offset;
256 crc_buf(buf, items[i].hash);
257 offset += blocksize;
258 nitems++;
259 }
260
261 insert_chunks(items, nitems);
262 thread->items += nitems;
263 free(items);
264 return err;
265}
266
267static void *thread_fn(void *data)
268{
269 struct worker_thread *thread = data;
270 void *buf;
271
272 buf = fio_memalign(blocksize, blocksize);
273
274 do {
275 if (get_work(&thread->cur_offset, &thread->size)) {
276 thread->err = 1;
277 break;
278 }
279 if (do_work(thread, buf)) {
280 thread->err = 1;
281 break;
282 }
283 } while (1);
284
285 thread->done = 1;
286 fio_memfree(buf, blocksize);
287 return NULL;
288}
289
290static int __dedupe_check(int fd, uint64_t dev_size)
291{
292 struct worker_thread *threads;
293 unsigned long nitems, total_items;
294 int i, err = 0;
295
296 total_size = dev_size;
297 total_items = dev_size / blocksize;
298 cur_offset = 0;
299 size_lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
300
301 threads = malloc(num_threads * sizeof(struct worker_thread));
302 for (i = 0; i < num_threads; i++) {
303 threads[i].fd = fd;
304 threads[i].items = 0;
305 threads[i].err = 0;
306 threads[i].done = 0;
307
308 err = pthread_create(&threads[i].thread, NULL, thread_fn, &threads[i]);
309 if (err) {
310 log_err("fio: thread startup failed\n");
311 break;
312 }
313 }
314
315 while (print_progress) {
316 float perc;
317 int some_done;
318
319 nitems = 0;
320 for (i = 0; i < num_threads; i++) {
321 nitems += threads[i].items;
322 some_done = threads[i].done;
323 if (some_done)
324 break;
325 }
326
327 if (some_done)
328 break;
329
330 perc = (float) nitems / (float) total_items;
331 perc *= 100.0;
332 printf("%3.2f%% done\r", perc);
333 fflush(stdout);
334 usleep(200000);
335 };
336
337 nitems = 0;
338 for (i = 0; i < num_threads; i++) {
339 void *ret;
340 pthread_join(threads[i].thread, &ret);
341 nitems += threads[i].items;
342 }
343
344 printf("Threads(%u): %lu items processed\n", num_threads, nitems);
345
346 fio_mutex_remove(size_lock);
347 return err;
348}
349
350static int dedupe_check(const char *filename)
351{
352 uint64_t dev_size;
353 struct stat sb;
354 int flags;
355
356 flags = O_RDONLY;
357 if (odirect)
358 flags |= O_DIRECT;
359
360 dev_fd = open(filename, flags);
361 if (dev_fd == -1) {
362 perror("open");
363 return 1;
364 }
365
366 if (fstat(dev_fd, &sb) < 0) {
367 perror("fstat");
368 close(dev_fd);
369 return 1;
370 }
371
372 dev_size = get_size(dev_fd, &sb);
373 if (!dev_size) {
374 close(dev_fd);
375 return 1;
376 }
377
378 printf("Will check <%s>, size <%llu>\n", filename, (unsigned long long) dev_size);
379
380 return __dedupe_check(dev_fd, dev_size);
381}
382
383static void show_chunk(struct chunk *c)
384{
385 struct flist_head *n;
386 struct extent *e;
387
388 printf("c hash %8x %8x %8x %8x, count %lu\n", c->hash[0], c->hash[1], c->hash[2], c->hash[3], (unsigned long) c->count);
389 flist_for_each(n, &c->extent_list) {
390 e = flist_entry(n, struct extent, list);
391 printf("\toffset %llu\n", (unsigned long long) e->offset);
392 }
393}
394
395static void iter_rb_tree(void)
396{
397 struct rb_node *n;
398 uint64_t nchunks;
399 uint64_t nextents;
400 double perc;
401
402 nchunks = nextents = 0;
403
404 n = rb_first(&rb_root);
405 if (!n)
406 return;
407
408 do {
409 struct chunk *c;
410
411 c = rb_entry(n, struct chunk, rb_node);
412 nchunks++;
413 nextents += c->count;
414
415 if (dump_output)
416 show_chunk(c);
417
418 } while ((n = rb_next(n)) != NULL);
419
420 printf("Extents=%lu, Unique extents=%lu\n", (unsigned long) nextents, (unsigned long) nchunks);
421 printf("De-dupe factor: %3.2f\n", (double) nextents / (double) nchunks);
422
423 perc = 1.00 - ((double) nchunks / (double) nextents);
424 perc *= 100.0;
425 printf("Fio setting: dedupe_percentage=%u\n", (int) (perc + 0.50));
426}
427
428static int usage(char *argv[])
429{
430 log_err("Check for dedupable blocks on a device/file\n\n");
431 log_err("%s: [options] <device or file>\n", argv[0]);
432 log_err("\t-b\tChunk size to use\n");
433 log_err("\t-t\tNumber of threads to use\n");
434 log_err("\t-d\tFull extent/chunk debug output\n");
435 log_err("\t-o\tUse O_DIRECT\n");
436 log_err("\t-c\tFull collision check\n");
437 log_err("\t-p\tPrint progress indicator\n");
438 return 1;
439}
440
441int main(int argc, char *argv[])
442{
443 int c, ret;
444
445 while ((c = getopt(argc, argv, "b:t:d:o:c:p:")) != -1) {
446 switch (c) {
447 case 'b':
448 blocksize = atoi(optarg);
449 break;
450 case 't':
451 num_threads = atoi(optarg);
452 break;
453 case 'd':
454 dump_output = atoi(optarg);
455 break;
456 case 'o':
457 odirect = atoi(optarg);
458 break;
459 case 'c':
460 collision_check = atoi(optarg);
461 break;
462 case 'p':
463 print_progress = atoi(optarg);
464 break;
465 case '?':
466 default:
467 return usage(argv);
468 }
469 }
470
471 if (!num_threads)
472 num_threads = cpus_online();
473
474 if (argc == optind)
475 return usage(argv);
476
477 sinit();
478
479 rb_root = RB_ROOT;
480 rb_lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
481
482 ret = dedupe_check(argv[optind]);
483
484 iter_rb_tree();
485
486 scleanup();
487 return ret;
488}