dedupe: improve wording in output
[fio.git] / t / dedupe.c
... / ...
CommitLineData
1/*
2 * Small tool to check for dedupable blocks in a file or device. Basically
3 * just scans the filename for extents of the given size, checksums them,
4 * and orders them up.
5 */
6#include <stdio.h>
7#include <stdio.h>
8#include <unistd.h>
9#include <inttypes.h>
10#include <assert.h>
11#include <sys/types.h>
12#include <sys/stat.h>
13#include <sys/ioctl.h>
14#include <linux/fs.h>
15#include <fcntl.h>
16#include <string.h>
17
18#include "../lib/rbtree.h"
19#include "../flist.h"
20#include "../log.h"
21#include "../mutex.h"
22#include "../smalloc.h"
23#include "../minmax.h"
24#include "../crc/md5.h"
25#include "../memalign.h"
26#include "../os/os.h"
27
28FILE *f_err;
29struct timeval *fio_tv = NULL;
30unsigned int fio_debug = 0;
31
32void __dprint(int type, const char *str, ...)
33{
34}
35
36struct worker_thread {
37 pthread_t thread;
38
39 int fd;
40 uint64_t cur_offset;
41 uint64_t size;
42
43 unsigned long items;
44 int err;
45};
46
47struct extent {
48 struct flist_head list;
49 uint64_t offset;
50};
51
52struct chunk {
53 struct rb_node rb_node;
54 struct flist_head extent_list;
55 uint64_t count;
56 uint32_t hash[MD5_HASH_WORDS];
57};
58
59struct item {
60 uint64_t offset;
61 uint32_t hash[MD5_HASH_WORDS];
62};
63
64static struct rb_root rb_root;
65static struct fio_mutex *rb_lock;
66
67static unsigned int blocksize = 4096;
68static unsigned int num_threads;
69static unsigned int chunk_size = 1048576;
70static unsigned int dump_output;
71static unsigned int odirect;
72static unsigned int collision_check;
73
74static uint64_t total_size;
75static uint64_t cur_offset;
76static struct fio_mutex *size_lock;
77
78static int dev_fd;
79
80static uint64_t get_size(int fd, struct stat *sb)
81{
82 uint64_t ret;
83
84 if (S_ISBLK(sb->st_mode)) {
85 if (ioctl(fd, BLKGETSIZE64, &ret) < 0) {
86 perror("ioctl");
87 return 0;
88 }
89 } else
90 ret = sb->st_size;
91
92 return (ret & ~((uint64_t)blocksize - 1));
93}
94
95static int get_work(uint64_t *offset, uint64_t *size)
96{
97 uint64_t this_chunk;
98 int ret = 1;
99
100 fio_mutex_down(size_lock);
101
102 if (cur_offset < total_size) {
103 *offset = cur_offset;
104 this_chunk = min((uint64_t)chunk_size, total_size - cur_offset);
105 *size = this_chunk;
106 cur_offset += this_chunk;
107 ret = 0;
108 }
109
110 fio_mutex_up(size_lock);
111 return ret;
112}
113
114static int read_block(int fd, void *buf, off_t offset)
115{
116 ssize_t ret;
117
118 ret = pread(fd, buf, blocksize, offset);
119 if (ret < 0) {
120 perror("pread");
121 return 1;
122 } else if (!ret)
123 return 1;
124 else if (ret != blocksize) {
125 log_err("dedupe: short read on block\n");
126 return 1;
127 }
128
129 return 0;
130}
131
132static void add_item(struct chunk *c, struct item *i)
133{
134 struct extent *e;
135
136 e = malloc(sizeof(*e));
137 e->offset = i->offset;
138 flist_add_tail(&e->list, &c->extent_list);
139 c->count++;
140}
141
142static int col_check(struct chunk *c, struct item *i)
143{
144 struct extent *e;
145 char *cbuf, *ibuf;
146 int ret = 1;
147
148 cbuf = fio_memalign(blocksize, blocksize);
149 ibuf = fio_memalign(blocksize, blocksize);
150
151 e = flist_entry(c->extent_list.next, struct extent, list);
152 if (read_block(dev_fd, cbuf, e->offset))
153 goto out;
154
155 if (read_block(dev_fd, ibuf, i->offset))
156 goto out;
157
158 ret = memcmp(ibuf, cbuf, blocksize);
159out:
160 fio_memfree(cbuf, blocksize);
161 fio_memfree(ibuf, blocksize);
162 return ret;
163}
164
165static void insert_chunk(struct item *i)
166{
167 struct rb_node **p, *parent;
168 struct chunk *c;
169 int diff;
170
171 p = &rb_root.rb_node;
172 parent = NULL;
173 while (*p) {
174 parent = *p;
175
176 c = rb_entry(parent, struct chunk, rb_node);
177 diff = memcmp(i->hash, c->hash, sizeof(i->hash));
178 if (diff < 0)
179 p = &(*p)->rb_left;
180 else if (diff > 0)
181 p = &(*p)->rb_right;
182 else {
183 int ret;
184
185 if (!collision_check)
186 goto add;
187
188 fio_mutex_up(rb_lock);
189 ret = col_check(c, i);
190 fio_mutex_down(rb_lock);
191
192 if (!ret)
193 goto add;
194
195 p = &(*p)->rb_right;
196 }
197 }
198
199 c = malloc(sizeof(*c));
200 RB_CLEAR_NODE(&c->rb_node);
201 INIT_FLIST_HEAD(&c->extent_list);
202 c->count = 0;
203 memcpy(c->hash, i->hash, sizeof(i->hash));
204 rb_link_node(&c->rb_node, parent, p);
205 rb_insert_color(&c->rb_node, &rb_root);
206add:
207 add_item(c, i);
208}
209
210static void insert_chunks(struct item *items, unsigned int nitems)
211{
212 int i;
213
214 fio_mutex_down(rb_lock);
215
216 for (i = 0; i < nitems; i++)
217 insert_chunk(&items[i]);
218
219 fio_mutex_up(rb_lock);
220}
221
222static void crc_buf(void *buf, uint32_t *hash)
223{
224 struct fio_md5_ctx ctx = { .hash = hash };
225
226 fio_md5_init(&ctx);
227 fio_md5_update(&ctx, buf, blocksize);
228 fio_md5_final(&ctx);
229}
230
231static int do_work(struct worker_thread *thread, void *buf)
232{
233 unsigned int nblocks, i;
234 off_t offset;
235 int err = 0, nitems = 0;
236 struct item *items;
237
238 nblocks = thread->size / blocksize;
239 offset = thread->cur_offset;
240 items = malloc(sizeof(*items) * nblocks);
241
242 for (i = 0; i < nblocks; i++) {
243 if (read_block(thread->fd, buf, offset))
244 break;
245 items[i].offset = offset;
246 crc_buf(buf, items[i].hash);
247 offset += blocksize;
248 nitems++;
249 }
250
251 insert_chunks(items, nitems);
252 thread->items += nitems;
253 free(items);
254 return err;
255}
256
257static void *thread_fn(void *data)
258{
259 struct worker_thread *thread = data;
260 void *buf;
261
262 buf = fio_memalign(blocksize, blocksize);
263
264 do {
265 if (get_work(&thread->cur_offset, &thread->size)) {
266 thread->err = 1;
267 break;
268 }
269 if (do_work(thread, buf)) {
270 thread->err = 1;
271 break;
272 }
273 } while (1);
274
275 fio_memfree(buf, blocksize);
276 return NULL;
277}
278
279static int __dedupe_check(int fd, uint64_t dev_size)
280{
281 struct worker_thread *threads;
282 unsigned long nitems;
283 int i, err = 0;
284
285 total_size = dev_size;
286 cur_offset = 0;
287 size_lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
288
289 threads = malloc(num_threads * sizeof(struct worker_thread));
290 for (i = 0; i < num_threads; i++) {
291 threads[i].fd = fd;
292 threads[i].items = 0;
293 threads[i].err = 0;
294
295 err = pthread_create(&threads[i].thread, NULL, thread_fn, &threads[i]);
296 if (err) {
297 log_err("fio: thread startup failed\n");
298 break;
299 }
300 }
301
302 nitems = 0;
303 for (i = 0; i < num_threads; i++) {
304 void *ret;
305 pthread_join(threads[i].thread, &ret);
306 nitems += threads[i].items;
307 }
308
309 printf("Threads(%u): %lu items processed\n", num_threads, nitems);
310
311 fio_mutex_remove(size_lock);
312 return err;
313}
314
315static int dedupe_check(const char *filename)
316{
317 uint64_t dev_size;
318 struct stat sb;
319 int flags;
320
321 flags = O_RDONLY;
322 if (odirect)
323 flags |= O_DIRECT;
324
325 dev_fd = open(filename, flags);
326 if (dev_fd == -1) {
327 perror("open");
328 return 1;
329 }
330
331 if (fstat(dev_fd, &sb) < 0) {
332 perror("fstat");
333 close(dev_fd);
334 return 1;
335 }
336
337 dev_size = get_size(dev_fd, &sb);
338 if (!dev_size) {
339 close(dev_fd);
340 return 1;
341 }
342
343 printf("Will check <%s>, size <%lu>\n", filename, dev_size);
344
345 return __dedupe_check(dev_fd, dev_size);
346}
347
348static void show_chunk(struct chunk *c)
349{
350 struct flist_head *n;
351 struct extent *e;
352
353 printf("c hash %8x %8x %8x %8x, count %lu\n", c->hash[0], c->hash[1], c->hash[2], c->hash[3], c->count);
354 flist_for_each(n, &c->extent_list) {
355 e = flist_entry(n, struct extent, list);
356 printf("\toffset %lu\n", e->offset);
357 }
358}
359
360static void iter_rb_tree(void)
361{
362 struct rb_node *n;
363 uint64_t nchunks;
364 uint64_t nextents;
365 double perc;
366
367 nchunks = nextents = 0;
368
369 n = rb_first(&rb_root);
370 if (!n)
371 return;
372
373 do {
374 struct chunk *c;
375
376 c = rb_entry(n, struct chunk, rb_node);
377 nchunks++;
378 nextents += c->count;
379
380 if (dump_output)
381 show_chunk(c);
382
383 } while ((n = rb_next(n)) != NULL);
384
385 printf("Extents=%lu, Unique extents=%lu\n", nextents, nchunks);
386 printf("De-dupe factor: %3.2f\n", (double) nextents / (double) nchunks);
387
388 perc = 1.00 - ((double) nchunks / (double) nextents);
389 perc *= 100.0;
390 printf("Fio setting: dedupe_percentage=%u\n", (int) (perc + 0.50));
391}
392
393static int usage(char *argv[])
394{
395 log_err("Check for dedupable blocks on a device/file\n\n");
396 log_err("%s: [options] <device or file>\n", argv[0]);
397 log_err("\t-b\tChunk size to use\n");
398 log_err("\t-t\tNumber of threads to use\n");
399 log_err("\t-d\tFull extent/chunk debug output\n");
400 log_err("\t-o\tUse O_DIRECT\n");
401 log_err("\t-c\tFull collision check\n");
402 return 1;
403}
404
405int main(int argc, char *argv[])
406{
407 int c, ret;
408
409 while ((c = getopt(argc, argv, "b:t:d:o:c:")) != -1) {
410 switch (c) {
411 case 'b':
412 blocksize = atoi(optarg);
413 break;
414 case 't':
415 num_threads = atoi(optarg);
416 break;
417 case 'd':
418 dump_output = atoi(optarg);
419 break;
420 case 'o':
421 odirect = atoi(optarg);
422 break;
423 case 'c':
424 collision_check = atoi(optarg);
425 break;
426 case '?':
427 default:
428 return usage(argv);
429 }
430 }
431
432 if (!num_threads)
433 num_threads = cpus_online();
434
435 if (argc == optind)
436 return usage(argv);
437
438 sinit();
439
440 rb_root = RB_ROOT;
441 rb_lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
442
443 ret = dedupe_check(argv[optind]);
444
445 iter_rb_tree();
446
447 scleanup();
448 return ret;
449}