Use specified compression/pattern for verify buffers too
[fio.git] / verify.c
... / ...
CommitLineData
1/*
2 * IO verification helpers
3 */
4#include <unistd.h>
5#include <fcntl.h>
6#include <string.h>
7#include <assert.h>
8#include <pthread.h>
9#include <libgen.h>
10
11#include "fio.h"
12#include "verify.h"
13#include "trim.h"
14#include "lib/rand.h"
15#include "lib/hweight.h"
16
17#include "crc/md5.h"
18#include "crc/crc64.h"
19#include "crc/crc32.h"
20#include "crc/crc32c.h"
21#include "crc/crc16.h"
22#include "crc/crc7.h"
23#include "crc/sha256.h"
24#include "crc/sha512.h"
25#include "crc/sha1.h"
26#include "crc/xxhash.h"
27
28static void populate_hdr(struct thread_data *td, struct io_u *io_u,
29 struct verify_header *hdr, unsigned int header_num,
30 unsigned int header_len);
31
32void fill_buffer_pattern(struct thread_data *td, void *p, unsigned int len)
33{
34 fill_pattern(p, len, td->o.buffer_pattern, td->o.buffer_pattern_bytes);
35}
36
37void fill_verify_pattern(struct thread_data *td, void *p, unsigned int len,
38 struct io_u *io_u, unsigned long seed, int use_seed)
39{
40 struct thread_options *o = &td->o;
41
42 if (!o->verify_pattern_bytes) {
43 dprint(FD_VERIFY, "fill random bytes len=%u\n", len);
44
45 if (use_seed)
46 __fill_random_buf_percentage(seed, p, o->compress_percentage, len, len, o->buffer_pattern, o->buffer_pattern_bytes);
47 else {
48 struct frand_state *fs = &td->verify_state;
49
50 io_u->rand_seed = fill_random_buf_percentage(fs, p, o->compress_percentage, len, len, o->buffer_pattern, o->buffer_pattern_bytes);
51 }
52 return;
53 }
54
55 if (io_u->buf_filled_len >= len) {
56 dprint(FD_VERIFY, "using already filled verify pattern b=%d len=%u\n",
57 o->verify_pattern_bytes, len);
58 return;
59 }
60
61 fill_pattern(p, len, o->verify_pattern, o->verify_pattern_bytes);
62 io_u->buf_filled_len = len;
63}
64
65static unsigned int get_hdr_inc(struct thread_data *td, struct io_u *io_u)
66{
67 unsigned int hdr_inc;
68
69 hdr_inc = io_u->buflen;
70 if (td->o.verify_interval && td->o.verify_interval <= io_u->buflen)
71 hdr_inc = td->o.verify_interval;
72
73 return hdr_inc;
74}
75
76static void fill_pattern_headers(struct thread_data *td, struct io_u *io_u,
77 unsigned long seed, int use_seed)
78{
79 unsigned int hdr_inc, header_num;
80 struct verify_header *hdr;
81 void *p = io_u->buf;
82
83 fill_verify_pattern(td, p, io_u->buflen, io_u, seed, use_seed);
84
85 hdr_inc = get_hdr_inc(td, io_u);
86 header_num = 0;
87 for (; p < io_u->buf + io_u->buflen; p += hdr_inc) {
88 hdr = p;
89 populate_hdr(td, io_u, hdr, header_num, hdr_inc);
90 header_num++;
91 }
92}
93
94static void memswp(void *buf1, void *buf2, unsigned int len)
95{
96 char swap[200];
97
98 assert(len <= sizeof(swap));
99
100 memcpy(&swap, buf1, len);
101 memcpy(buf1, buf2, len);
102 memcpy(buf2, &swap, len);
103}
104
105static void hexdump(void *buffer, int len)
106{
107 unsigned char *p = buffer;
108 int i;
109
110 for (i = 0; i < len; i++)
111 log_err("%02x", p[i]);
112 log_err("\n");
113}
114
115/*
116 * Prepare for separation of verify_header and checksum header
117 */
118static inline unsigned int __hdr_size(int verify_type)
119{
120 unsigned int len = 0;
121
122 switch (verify_type) {
123 case VERIFY_NONE:
124 case VERIFY_NULL:
125 len = 0;
126 break;
127 case VERIFY_MD5:
128 len = sizeof(struct vhdr_md5);
129 break;
130 case VERIFY_CRC64:
131 len = sizeof(struct vhdr_crc64);
132 break;
133 case VERIFY_CRC32C:
134 case VERIFY_CRC32:
135 case VERIFY_CRC32C_INTEL:
136 len = sizeof(struct vhdr_crc32);
137 break;
138 case VERIFY_CRC16:
139 len = sizeof(struct vhdr_crc16);
140 break;
141 case VERIFY_CRC7:
142 len = sizeof(struct vhdr_crc7);
143 break;
144 case VERIFY_SHA256:
145 len = sizeof(struct vhdr_sha256);
146 break;
147 case VERIFY_SHA512:
148 len = sizeof(struct vhdr_sha512);
149 break;
150 case VERIFY_XXHASH:
151 len = sizeof(struct vhdr_xxhash);
152 break;
153 case VERIFY_META:
154 len = sizeof(struct vhdr_meta);
155 break;
156 case VERIFY_SHA1:
157 len = sizeof(struct vhdr_sha1);
158 break;
159 case VERIFY_PATTERN:
160 len = 0;
161 break;
162 default:
163 log_err("fio: unknown verify header!\n");
164 assert(0);
165 }
166
167 return len + sizeof(struct verify_header);
168}
169
170static inline unsigned int hdr_size(struct verify_header *hdr)
171{
172 return __hdr_size(hdr->verify_type);
173}
174
175static void *hdr_priv(struct verify_header *hdr)
176{
177 void *priv = hdr;
178
179 return priv + sizeof(struct verify_header);
180}
181
182/*
183 * Verify container, pass info to verify handlers and allow them to
184 * pass info back in case of error
185 */
186struct vcont {
187 /*
188 * Input
189 */
190 struct io_u *io_u;
191 unsigned int hdr_num;
192 struct thread_data *td;
193
194 /*
195 * Output, only valid in case of error
196 */
197 const char *name;
198 void *good_crc;
199 void *bad_crc;
200 unsigned int crc_len;
201};
202
203#define DUMP_BUF_SZ 255
204static int dump_buf_warned;
205
206static void dump_buf(char *buf, unsigned int len, unsigned long long offset,
207 const char *type, struct fio_file *f)
208{
209 char *ptr, fname[DUMP_BUF_SZ];
210 size_t buf_left = DUMP_BUF_SZ;
211 int ret, fd;
212
213 ptr = strdup(f->file_name);
214
215 fname[DUMP_BUF_SZ - 1] = '\0';
216 strncpy(fname, basename(ptr), DUMP_BUF_SZ - 1);
217
218 buf_left -= strlen(fname);
219 if (buf_left <= 0) {
220 if (!dump_buf_warned) {
221 log_err("fio: verify failure dump buffer too small\n");
222 dump_buf_warned = 1;
223 }
224 free(ptr);
225 return;
226 }
227
228 snprintf(fname + strlen(fname), buf_left, ".%llu.%s", offset, type);
229
230 fd = open(fname, O_CREAT | O_TRUNC | O_WRONLY, 0644);
231 if (fd < 0) {
232 perror("open verify buf file");
233 return;
234 }
235
236 while (len) {
237 ret = write(fd, buf, len);
238 if (!ret)
239 break;
240 else if (ret < 0) {
241 perror("write verify buf file");
242 break;
243 }
244 len -= ret;
245 buf += ret;
246 }
247
248 close(fd);
249 log_err(" %s data dumped as %s\n", type, fname);
250 free(ptr);
251}
252
253/*
254 * Dump the contents of the read block and re-generate the correct data
255 * and dump that too.
256 */
257static void dump_verify_buffers(struct verify_header *hdr, struct vcont *vc)
258{
259 struct thread_data *td = vc->td;
260 struct io_u *io_u = vc->io_u;
261 unsigned long hdr_offset;
262 struct io_u dummy;
263 void *buf;
264
265 if (!td->o.verify_dump)
266 return;
267
268 /*
269 * Dump the contents we just read off disk
270 */
271 hdr_offset = vc->hdr_num * hdr->len;
272
273 dump_buf(io_u->buf + hdr_offset, hdr->len, io_u->offset + hdr_offset,
274 "received", vc->io_u->file);
275
276 /*
277 * Allocate a new buf and re-generate the original data
278 */
279 buf = malloc(io_u->buflen);
280 dummy = *io_u;
281 dummy.buf = buf;
282 dummy.rand_seed = hdr->rand_seed;
283 dummy.buf_filled_len = 0;
284 dummy.buflen = io_u->buflen;
285
286 fill_pattern_headers(td, &dummy, hdr->rand_seed, 1);
287
288 dump_buf(buf + hdr_offset, hdr->len, io_u->offset + hdr_offset,
289 "expected", vc->io_u->file);
290 free(buf);
291}
292
293static void log_verify_failure(struct verify_header *hdr, struct vcont *vc)
294{
295 unsigned long long offset;
296
297 offset = vc->io_u->offset;
298 offset += vc->hdr_num * hdr->len;
299 log_err("%.8s: verify failed at file %s offset %llu, length %u\n",
300 vc->name, vc->io_u->file->file_name, offset, hdr->len);
301
302 if (vc->good_crc && vc->bad_crc) {
303 log_err(" Expected CRC: ");
304 hexdump(vc->good_crc, vc->crc_len);
305 log_err(" Received CRC: ");
306 hexdump(vc->bad_crc, vc->crc_len);
307 }
308
309 dump_verify_buffers(hdr, vc);
310}
311
312/*
313 * Return data area 'header_num'
314 */
315static inline void *io_u_verify_off(struct verify_header *hdr, struct vcont *vc)
316{
317 return vc->io_u->buf + vc->hdr_num * hdr->len + hdr_size(hdr);
318}
319
320static int verify_io_u_pattern(struct verify_header *hdr, struct vcont *vc)
321{
322 struct thread_data *td = vc->td;
323 struct io_u *io_u = vc->io_u;
324 char *buf, *pattern;
325 unsigned int header_size = __hdr_size(td->o.verify);
326 unsigned int len, mod, i, size, pattern_size;
327
328 pattern = td->o.verify_pattern;
329 pattern_size = td->o.verify_pattern_bytes;
330 if (pattern_size <= 1)
331 pattern_size = MAX_PATTERN_SIZE;
332 buf = (void *) hdr + header_size;
333 len = get_hdr_inc(td, io_u) - header_size;
334 mod = header_size % pattern_size;
335
336 for (i = 0; i < len; i += size) {
337 size = pattern_size - mod;
338 if (size > (len - i))
339 size = len - i;
340 if (memcmp(buf + i, pattern + mod, size))
341 /* Let the slow compare find the first mismatch byte. */
342 break;
343 mod = 0;
344 }
345
346 for (; i < len; i++) {
347 if (buf[i] != pattern[mod]) {
348 unsigned int bits;
349
350 bits = hweight8(buf[i] ^ pattern[mod]);
351 log_err("fio: got pattern %x, wanted %x. Bad bits %d\n",
352 buf[i], pattern[mod], bits);
353 log_err("fio: bad pattern block offset %u\n", i);
354 dump_verify_buffers(hdr, vc);
355 return EILSEQ;
356 }
357 mod++;
358 if (mod == td->o.verify_pattern_bytes)
359 mod = 0;
360 }
361
362 return 0;
363}
364
365static int verify_io_u_meta(struct verify_header *hdr, struct vcont *vc)
366{
367 struct thread_data *td = vc->td;
368 struct vhdr_meta *vh = hdr_priv(hdr);
369 struct io_u *io_u = vc->io_u;
370 int ret = EILSEQ;
371
372 dprint(FD_VERIFY, "meta verify io_u %p, len %u\n", io_u, hdr->len);
373
374 if (vh->offset == io_u->offset + vc->hdr_num * td->o.verify_interval)
375 ret = 0;
376
377 if (td->o.verify_pattern_bytes)
378 ret |= verify_io_u_pattern(hdr, vc);
379
380 /*
381 * For read-only workloads, the program cannot be certain of the
382 * last numberio written to a block. Checking of numberio will be
383 * done only for workloads that write data. For verify_only,
384 * numberio will be checked in the last iteration when the correct
385 * state of numberio, that would have been written to each block
386 * in a previous run of fio, has been reached.
387 */
388 if ((td_write(td) || td_rw(td)) && (td_min_bs(td) == td_max_bs(td)) &&
389 !td->o.time_based)
390 if (!td->o.verify_only || td->o.loops == 0)
391 if (vh->numberio != io_u->numberio)
392 ret = EILSEQ;
393
394 if (!ret)
395 return 0;
396
397 vc->name = "meta";
398 log_verify_failure(hdr, vc);
399 return ret;
400}
401
402static int verify_io_u_xxhash(struct verify_header *hdr, struct vcont *vc)
403{
404 void *p = io_u_verify_off(hdr, vc);
405 struct vhdr_xxhash *vh = hdr_priv(hdr);
406 uint32_t hash;
407 void *state;
408
409 dprint(FD_VERIFY, "xxhash verify io_u %p, len %u\n", vc->io_u, hdr->len);
410
411 state = XXH32_init(1);
412 XXH32_update(state, p, hdr->len - hdr_size(hdr));
413 hash = XXH32_digest(state);
414
415 if (vh->hash == hash)
416 return 0;
417
418 vc->name = "xxhash";
419 vc->good_crc = &vh->hash;
420 vc->bad_crc = &hash;
421 vc->crc_len = sizeof(hash);
422 log_verify_failure(hdr, vc);
423 return EILSEQ;
424}
425
426static int verify_io_u_sha512(struct verify_header *hdr, struct vcont *vc)
427{
428 void *p = io_u_verify_off(hdr, vc);
429 struct vhdr_sha512 *vh = hdr_priv(hdr);
430 uint8_t sha512[128];
431 struct fio_sha512_ctx sha512_ctx = {
432 .buf = sha512,
433 };
434
435 dprint(FD_VERIFY, "sha512 verify io_u %p, len %u\n", vc->io_u, hdr->len);
436
437 fio_sha512_init(&sha512_ctx);
438 fio_sha512_update(&sha512_ctx, p, hdr->len - hdr_size(hdr));
439
440 if (!memcmp(vh->sha512, sha512_ctx.buf, sizeof(sha512)))
441 return 0;
442
443 vc->name = "sha512";
444 vc->good_crc = vh->sha512;
445 vc->bad_crc = sha512_ctx.buf;
446 vc->crc_len = sizeof(vh->sha512);
447 log_verify_failure(hdr, vc);
448 return EILSEQ;
449}
450
451static int verify_io_u_sha256(struct verify_header *hdr, struct vcont *vc)
452{
453 void *p = io_u_verify_off(hdr, vc);
454 struct vhdr_sha256 *vh = hdr_priv(hdr);
455 uint8_t sha256[64];
456 struct fio_sha256_ctx sha256_ctx = {
457 .buf = sha256,
458 };
459
460 dprint(FD_VERIFY, "sha256 verify io_u %p, len %u\n", vc->io_u, hdr->len);
461
462 fio_sha256_init(&sha256_ctx);
463 fio_sha256_update(&sha256_ctx, p, hdr->len - hdr_size(hdr));
464
465 if (!memcmp(vh->sha256, sha256_ctx.buf, sizeof(sha256)))
466 return 0;
467
468 vc->name = "sha256";
469 vc->good_crc = vh->sha256;
470 vc->bad_crc = sha256_ctx.buf;
471 vc->crc_len = sizeof(vh->sha256);
472 log_verify_failure(hdr, vc);
473 return EILSEQ;
474}
475
476static int verify_io_u_sha1(struct verify_header *hdr, struct vcont *vc)
477{
478 void *p = io_u_verify_off(hdr, vc);
479 struct vhdr_sha1 *vh = hdr_priv(hdr);
480 uint32_t sha1[5];
481 struct fio_sha1_ctx sha1_ctx = {
482 .H = sha1,
483 };
484
485 dprint(FD_VERIFY, "sha1 verify io_u %p, len %u\n", vc->io_u, hdr->len);
486
487 fio_sha1_init(&sha1_ctx);
488 fio_sha1_update(&sha1_ctx, p, hdr->len - hdr_size(hdr));
489
490 if (!memcmp(vh->sha1, sha1_ctx.H, sizeof(sha1)))
491 return 0;
492
493 vc->name = "sha1";
494 vc->good_crc = vh->sha1;
495 vc->bad_crc = sha1_ctx.H;
496 vc->crc_len = sizeof(vh->sha1);
497 log_verify_failure(hdr, vc);
498 return EILSEQ;
499}
500
501static int verify_io_u_crc7(struct verify_header *hdr, struct vcont *vc)
502{
503 void *p = io_u_verify_off(hdr, vc);
504 struct vhdr_crc7 *vh = hdr_priv(hdr);
505 unsigned char c;
506
507 dprint(FD_VERIFY, "crc7 verify io_u %p, len %u\n", vc->io_u, hdr->len);
508
509 c = fio_crc7(p, hdr->len - hdr_size(hdr));
510
511 if (c == vh->crc7)
512 return 0;
513
514 vc->name = "crc7";
515 vc->good_crc = &vh->crc7;
516 vc->bad_crc = &c;
517 vc->crc_len = 1;
518 log_verify_failure(hdr, vc);
519 return EILSEQ;
520}
521
522static int verify_io_u_crc16(struct verify_header *hdr, struct vcont *vc)
523{
524 void *p = io_u_verify_off(hdr, vc);
525 struct vhdr_crc16 *vh = hdr_priv(hdr);
526 unsigned short c;
527
528 dprint(FD_VERIFY, "crc16 verify io_u %p, len %u\n", vc->io_u, hdr->len);
529
530 c = fio_crc16(p, hdr->len - hdr_size(hdr));
531
532 if (c == vh->crc16)
533 return 0;
534
535 vc->name = "crc16";
536 vc->good_crc = &vh->crc16;
537 vc->bad_crc = &c;
538 vc->crc_len = 2;
539 log_verify_failure(hdr, vc);
540 return EILSEQ;
541}
542
543static int verify_io_u_crc64(struct verify_header *hdr, struct vcont *vc)
544{
545 void *p = io_u_verify_off(hdr, vc);
546 struct vhdr_crc64 *vh = hdr_priv(hdr);
547 unsigned long long c;
548
549 dprint(FD_VERIFY, "crc64 verify io_u %p, len %u\n", vc->io_u, hdr->len);
550
551 c = fio_crc64(p, hdr->len - hdr_size(hdr));
552
553 if (c == vh->crc64)
554 return 0;
555
556 vc->name = "crc64";
557 vc->good_crc = &vh->crc64;
558 vc->bad_crc = &c;
559 vc->crc_len = 8;
560 log_verify_failure(hdr, vc);
561 return EILSEQ;
562}
563
564static int verify_io_u_crc32(struct verify_header *hdr, struct vcont *vc)
565{
566 void *p = io_u_verify_off(hdr, vc);
567 struct vhdr_crc32 *vh = hdr_priv(hdr);
568 uint32_t c;
569
570 dprint(FD_VERIFY, "crc32 verify io_u %p, len %u\n", vc->io_u, hdr->len);
571
572 c = fio_crc32(p, hdr->len - hdr_size(hdr));
573
574 if (c == vh->crc32)
575 return 0;
576
577 vc->name = "crc32";
578 vc->good_crc = &vh->crc32;
579 vc->bad_crc = &c;
580 vc->crc_len = 4;
581 log_verify_failure(hdr, vc);
582 return EILSEQ;
583}
584
585static int verify_io_u_crc32c(struct verify_header *hdr, struct vcont *vc)
586{
587 void *p = io_u_verify_off(hdr, vc);
588 struct vhdr_crc32 *vh = hdr_priv(hdr);
589 uint32_t c;
590
591 dprint(FD_VERIFY, "crc32c verify io_u %p, len %u\n", vc->io_u, hdr->len);
592
593 c = fio_crc32c(p, hdr->len - hdr_size(hdr));
594
595 if (c == vh->crc32)
596 return 0;
597
598 vc->name = "crc32c";
599 vc->good_crc = &vh->crc32;
600 vc->bad_crc = &c;
601 vc->crc_len = 4;
602 log_verify_failure(hdr, vc);
603 return EILSEQ;
604}
605
606static int verify_io_u_md5(struct verify_header *hdr, struct vcont *vc)
607{
608 void *p = io_u_verify_off(hdr, vc);
609 struct vhdr_md5 *vh = hdr_priv(hdr);
610 uint32_t hash[MD5_HASH_WORDS];
611 struct fio_md5_ctx md5_ctx = {
612 .hash = hash,
613 };
614
615 dprint(FD_VERIFY, "md5 verify io_u %p, len %u\n", vc->io_u, hdr->len);
616
617 fio_md5_init(&md5_ctx);
618 fio_md5_update(&md5_ctx, p, hdr->len - hdr_size(hdr));
619
620 if (!memcmp(vh->md5_digest, md5_ctx.hash, sizeof(hash)))
621 return 0;
622
623 vc->name = "md5";
624 vc->good_crc = vh->md5_digest;
625 vc->bad_crc = md5_ctx.hash;
626 vc->crc_len = sizeof(hash);
627 log_verify_failure(hdr, vc);
628 return EILSEQ;
629}
630
631/*
632 * Push IO verification to a separate thread
633 */
634int verify_io_u_async(struct thread_data *td, struct io_u **io_u_ptr)
635{
636 struct io_u *io_u = *io_u_ptr;
637
638 pthread_mutex_lock(&td->io_u_lock);
639
640 if (io_u->file)
641 put_file_log(td, io_u->file);
642
643 if (io_u->flags & IO_U_F_IN_CUR_DEPTH) {
644 td->cur_depth--;
645 io_u->flags &= ~IO_U_F_IN_CUR_DEPTH;
646 }
647 flist_add_tail(&io_u->verify_list, &td->verify_list);
648 *io_u_ptr = NULL;
649 pthread_mutex_unlock(&td->io_u_lock);
650
651 pthread_cond_signal(&td->verify_cond);
652 return 0;
653}
654
655static int verify_trimmed_io_u(struct thread_data *td, struct io_u *io_u)
656{
657 static char zero_buf[1024];
658 unsigned int this_len, len;
659 int ret = 0;
660 void *p;
661
662 if (!td->o.trim_zero)
663 return 0;
664
665 len = io_u->buflen;
666 p = io_u->buf;
667 do {
668 this_len = sizeof(zero_buf);
669 if (this_len > len)
670 this_len = len;
671 if (memcmp(p, zero_buf, this_len)) {
672 ret = EILSEQ;
673 break;
674 }
675 len -= this_len;
676 p += this_len;
677 } while (len);
678
679 if (!ret)
680 return 0;
681
682 log_err("trim: verify failed at file %s offset %llu, length %lu"
683 ", block offset %lu\n",
684 io_u->file->file_name, io_u->offset, io_u->buflen,
685 (unsigned long) (p - io_u->buf));
686 return ret;
687}
688
689static int verify_header(struct io_u *io_u, struct verify_header *hdr,
690 unsigned int hdr_num, unsigned int hdr_len)
691{
692 void *p = hdr;
693 uint32_t crc;
694
695 if (hdr->magic != FIO_HDR_MAGIC) {
696 log_err("verify: bad magic header %x, wanted %x",
697 hdr->magic, FIO_HDR_MAGIC);
698 goto err;
699 }
700 if (hdr->len != hdr_len) {
701 log_err("verify: bad header length %u, wanted %u",
702 hdr->len, hdr_len);
703 goto err;
704 }
705 if (hdr->rand_seed != io_u->rand_seed) {
706 log_err("verify: bad header rand_seed %"PRIu64
707 ", wanted %"PRIu64,
708 hdr->rand_seed, io_u->rand_seed);
709 goto err;
710 }
711
712 crc = fio_crc32c(p, offsetof(struct verify_header, crc32));
713 if (crc != hdr->crc32) {
714 log_err("verify: bad header crc %x, calculated %x",
715 hdr->crc32, crc);
716 goto err;
717 }
718 return 0;
719
720err:
721 log_err(" at file %s offset %llu, length %u\n",
722 io_u->file->file_name,
723 io_u->offset + hdr_num * hdr_len, hdr_len);
724 return EILSEQ;
725}
726
727int verify_io_u(struct thread_data *td, struct io_u **io_u_ptr)
728{
729 struct verify_header *hdr;
730 struct io_u *io_u = *io_u_ptr;
731 unsigned int header_size, hdr_inc, hdr_num = 0;
732 void *p;
733 int ret;
734
735 if (td->o.verify == VERIFY_NULL || io_u->ddir != DDIR_READ)
736 return 0;
737 /*
738 * If the IO engine is faking IO (like null), then just pretend
739 * we verified everything.
740 */
741 if (td->io_ops->flags & FIO_FAKEIO)
742 return 0;
743
744 if (io_u->flags & IO_U_F_TRIMMED) {
745 ret = verify_trimmed_io_u(td, io_u);
746 goto done;
747 }
748
749 hdr_inc = get_hdr_inc(td, io_u);
750
751 ret = 0;
752 for (p = io_u->buf; p < io_u->buf + io_u->buflen;
753 p += hdr_inc, hdr_num++) {
754 struct vcont vc = {
755 .io_u = io_u,
756 .hdr_num = hdr_num,
757 .td = td,
758 };
759 unsigned int verify_type;
760
761 if (ret && td->o.verify_fatal)
762 break;
763
764 header_size = __hdr_size(td->o.verify);
765 if (td->o.verify_offset)
766 memswp(p, p + td->o.verify_offset, header_size);
767 hdr = p;
768
769 /*
770 * Make rand_seed check pass when have verifysort or
771 * verify_backlog.
772 */
773 if (td->o.verifysort || (td->flags & TD_F_VER_BACKLOG))
774 io_u->rand_seed = hdr->rand_seed;
775
776 ret = verify_header(io_u, hdr, hdr_num, hdr_inc);
777 if (ret)
778 return ret;
779
780 if (td->o.verify != VERIFY_NONE)
781 verify_type = td->o.verify;
782 else
783 verify_type = hdr->verify_type;
784
785 switch (verify_type) {
786 case VERIFY_MD5:
787 ret = verify_io_u_md5(hdr, &vc);
788 break;
789 case VERIFY_CRC64:
790 ret = verify_io_u_crc64(hdr, &vc);
791 break;
792 case VERIFY_CRC32C:
793 case VERIFY_CRC32C_INTEL:
794 ret = verify_io_u_crc32c(hdr, &vc);
795 break;
796 case VERIFY_CRC32:
797 ret = verify_io_u_crc32(hdr, &vc);
798 break;
799 case VERIFY_CRC16:
800 ret = verify_io_u_crc16(hdr, &vc);
801 break;
802 case VERIFY_CRC7:
803 ret = verify_io_u_crc7(hdr, &vc);
804 break;
805 case VERIFY_SHA256:
806 ret = verify_io_u_sha256(hdr, &vc);
807 break;
808 case VERIFY_SHA512:
809 ret = verify_io_u_sha512(hdr, &vc);
810 break;
811 case VERIFY_XXHASH:
812 ret = verify_io_u_xxhash(hdr, &vc);
813 break;
814 case VERIFY_META:
815 ret = verify_io_u_meta(hdr, &vc);
816 break;
817 case VERIFY_SHA1:
818 ret = verify_io_u_sha1(hdr, &vc);
819 break;
820 case VERIFY_PATTERN:
821 ret = verify_io_u_pattern(hdr, &vc);
822 break;
823 default:
824 log_err("Bad verify type %u\n", hdr->verify_type);
825 ret = EINVAL;
826 }
827
828 if (ret && verify_type != hdr->verify_type)
829 log_err("fio: verify type mismatch (%u media, %u given)\n",
830 hdr->verify_type, verify_type);
831 }
832
833done:
834 if (ret && td->o.verify_fatal)
835 fio_mark_td_terminate(td);
836
837 return ret;
838}
839
840static void fill_meta(struct verify_header *hdr, struct thread_data *td,
841 struct io_u *io_u, unsigned int header_num)
842{
843 struct vhdr_meta *vh = hdr_priv(hdr);
844
845 vh->thread = td->thread_number;
846
847 vh->time_sec = io_u->start_time.tv_sec;
848 vh->time_usec = io_u->start_time.tv_usec;
849
850 vh->numberio = io_u->numberio;
851
852 vh->offset = io_u->offset + header_num * td->o.verify_interval;
853}
854
855static void fill_xxhash(struct verify_header *hdr, void *p, unsigned int len)
856{
857 struct vhdr_xxhash *vh = hdr_priv(hdr);
858 void *state;
859
860 state = XXH32_init(1);
861 XXH32_update(state, p, len);
862 vh->hash = XXH32_digest(state);
863}
864
865static void fill_sha512(struct verify_header *hdr, void *p, unsigned int len)
866{
867 struct vhdr_sha512 *vh = hdr_priv(hdr);
868 struct fio_sha512_ctx sha512_ctx = {
869 .buf = vh->sha512,
870 };
871
872 fio_sha512_init(&sha512_ctx);
873 fio_sha512_update(&sha512_ctx, p, len);
874}
875
876static void fill_sha256(struct verify_header *hdr, void *p, unsigned int len)
877{
878 struct vhdr_sha256 *vh = hdr_priv(hdr);
879 struct fio_sha256_ctx sha256_ctx = {
880 .buf = vh->sha256,
881 };
882
883 fio_sha256_init(&sha256_ctx);
884 fio_sha256_update(&sha256_ctx, p, len);
885}
886
887static void fill_sha1(struct verify_header *hdr, void *p, unsigned int len)
888{
889 struct vhdr_sha1 *vh = hdr_priv(hdr);
890 struct fio_sha1_ctx sha1_ctx = {
891 .H = vh->sha1,
892 };
893
894 fio_sha1_init(&sha1_ctx);
895 fio_sha1_update(&sha1_ctx, p, len);
896}
897
898static void fill_crc7(struct verify_header *hdr, void *p, unsigned int len)
899{
900 struct vhdr_crc7 *vh = hdr_priv(hdr);
901
902 vh->crc7 = fio_crc7(p, len);
903}
904
905static void fill_crc16(struct verify_header *hdr, void *p, unsigned int len)
906{
907 struct vhdr_crc16 *vh = hdr_priv(hdr);
908
909 vh->crc16 = fio_crc16(p, len);
910}
911
912static void fill_crc32(struct verify_header *hdr, void *p, unsigned int len)
913{
914 struct vhdr_crc32 *vh = hdr_priv(hdr);
915
916 vh->crc32 = fio_crc32(p, len);
917}
918
919static void fill_crc32c(struct verify_header *hdr, void *p, unsigned int len)
920{
921 struct vhdr_crc32 *vh = hdr_priv(hdr);
922
923 vh->crc32 = fio_crc32c(p, len);
924}
925
926static void fill_crc64(struct verify_header *hdr, void *p, unsigned int len)
927{
928 struct vhdr_crc64 *vh = hdr_priv(hdr);
929
930 vh->crc64 = fio_crc64(p, len);
931}
932
933static void fill_md5(struct verify_header *hdr, void *p, unsigned int len)
934{
935 struct vhdr_md5 *vh = hdr_priv(hdr);
936 struct fio_md5_ctx md5_ctx = {
937 .hash = (uint32_t *) vh->md5_digest,
938 };
939
940 fio_md5_init(&md5_ctx);
941 fio_md5_update(&md5_ctx, p, len);
942}
943
944static void populate_hdr(struct thread_data *td, struct io_u *io_u,
945 struct verify_header *hdr, unsigned int header_num,
946 unsigned int header_len)
947{
948 unsigned int data_len;
949 void *data, *p;
950
951 p = (void *) hdr;
952
953 hdr->magic = FIO_HDR_MAGIC;
954 hdr->verify_type = td->o.verify;
955 hdr->len = header_len;
956 hdr->rand_seed = io_u->rand_seed;
957 hdr->crc32 = fio_crc32c(p, offsetof(struct verify_header, crc32));
958
959 data_len = header_len - hdr_size(hdr);
960
961 data = p + hdr_size(hdr);
962 switch (td->o.verify) {
963 case VERIFY_MD5:
964 dprint(FD_VERIFY, "fill md5 io_u %p, len %u\n",
965 io_u, hdr->len);
966 fill_md5(hdr, data, data_len);
967 break;
968 case VERIFY_CRC64:
969 dprint(FD_VERIFY, "fill crc64 io_u %p, len %u\n",
970 io_u, hdr->len);
971 fill_crc64(hdr, data, data_len);
972 break;
973 case VERIFY_CRC32C:
974 case VERIFY_CRC32C_INTEL:
975 dprint(FD_VERIFY, "fill crc32c io_u %p, len %u\n",
976 io_u, hdr->len);
977 fill_crc32c(hdr, data, data_len);
978 break;
979 case VERIFY_CRC32:
980 dprint(FD_VERIFY, "fill crc32 io_u %p, len %u\n",
981 io_u, hdr->len);
982 fill_crc32(hdr, data, data_len);
983 break;
984 case VERIFY_CRC16:
985 dprint(FD_VERIFY, "fill crc16 io_u %p, len %u\n",
986 io_u, hdr->len);
987 fill_crc16(hdr, data, data_len);
988 break;
989 case VERIFY_CRC7:
990 dprint(FD_VERIFY, "fill crc7 io_u %p, len %u\n",
991 io_u, hdr->len);
992 fill_crc7(hdr, data, data_len);
993 break;
994 case VERIFY_SHA256:
995 dprint(FD_VERIFY, "fill sha256 io_u %p, len %u\n",
996 io_u, hdr->len);
997 fill_sha256(hdr, data, data_len);
998 break;
999 case VERIFY_SHA512:
1000 dprint(FD_VERIFY, "fill sha512 io_u %p, len %u\n",
1001 io_u, hdr->len);
1002 fill_sha512(hdr, data, data_len);
1003 break;
1004 case VERIFY_XXHASH:
1005 dprint(FD_VERIFY, "fill xxhash io_u %p, len %u\n",
1006 io_u, hdr->len);
1007 fill_xxhash(hdr, data, data_len);
1008 break;
1009 case VERIFY_META:
1010 dprint(FD_VERIFY, "fill meta io_u %p, len %u\n",
1011 io_u, hdr->len);
1012 fill_meta(hdr, td, io_u, header_num);
1013 break;
1014 case VERIFY_SHA1:
1015 dprint(FD_VERIFY, "fill sha1 io_u %p, len %u\n",
1016 io_u, hdr->len);
1017 fill_sha1(hdr, data, data_len);
1018 break;
1019 case VERIFY_PATTERN:
1020 /* nothing to do here */
1021 break;
1022 default:
1023 log_err("fio: bad verify type: %d\n", td->o.verify);
1024 assert(0);
1025 }
1026 if (td->o.verify_offset)
1027 memswp(p, p + td->o.verify_offset, hdr_size(hdr));
1028}
1029
1030/*
1031 * fill body of io_u->buf with random data and add a header with the
1032 * checksum of choice
1033 */
1034void populate_verify_io_u(struct thread_data *td, struct io_u *io_u)
1035{
1036 if (td->o.verify == VERIFY_NULL)
1037 return;
1038
1039 io_u->numberio = td->io_issues[io_u->ddir];
1040
1041 fill_pattern_headers(td, io_u, 0, 0);
1042}
1043
1044int get_next_verify(struct thread_data *td, struct io_u *io_u)
1045{
1046 struct io_piece *ipo = NULL;
1047
1048 /*
1049 * this io_u is from a requeue, we already filled the offsets
1050 */
1051 if (io_u->file)
1052 return 0;
1053
1054 if (!RB_EMPTY_ROOT(&td->io_hist_tree)) {
1055 struct rb_node *n = rb_first(&td->io_hist_tree);
1056
1057 ipo = rb_entry(n, struct io_piece, rb_node);
1058
1059 /*
1060 * Ensure that the associated IO has completed
1061 */
1062 read_barrier();
1063 if (ipo->flags & IP_F_IN_FLIGHT)
1064 goto nothing;
1065
1066 rb_erase(n, &td->io_hist_tree);
1067 assert(ipo->flags & IP_F_ONRB);
1068 ipo->flags &= ~IP_F_ONRB;
1069 } else if (!flist_empty(&td->io_hist_list)) {
1070 ipo = flist_first_entry(&td->io_hist_list, struct io_piece, list);
1071
1072 /*
1073 * Ensure that the associated IO has completed
1074 */
1075 read_barrier();
1076 if (ipo->flags & IP_F_IN_FLIGHT)
1077 goto nothing;
1078
1079 flist_del(&ipo->list);
1080 assert(ipo->flags & IP_F_ONLIST);
1081 ipo->flags &= ~IP_F_ONLIST;
1082 }
1083
1084 if (ipo) {
1085 td->io_hist_len--;
1086
1087 io_u->offset = ipo->offset;
1088 io_u->buflen = ipo->len;
1089 io_u->numberio = ipo->numberio;
1090 io_u->file = ipo->file;
1091 io_u->flags |= IO_U_F_VER_LIST;
1092
1093 if (ipo->flags & IP_F_TRIMMED)
1094 io_u->flags |= IO_U_F_TRIMMED;
1095
1096 if (!fio_file_open(io_u->file)) {
1097 int r = td_io_open_file(td, io_u->file);
1098
1099 if (r) {
1100 dprint(FD_VERIFY, "failed file %s open\n",
1101 io_u->file->file_name);
1102 return 1;
1103 }
1104 }
1105
1106 get_file(ipo->file);
1107 assert(fio_file_open(io_u->file));
1108 io_u->ddir = DDIR_READ;
1109 io_u->xfer_buf = io_u->buf;
1110 io_u->xfer_buflen = io_u->buflen;
1111
1112 remove_trim_entry(td, ipo);
1113 free(ipo);
1114 dprint(FD_VERIFY, "get_next_verify: ret io_u %p\n", io_u);
1115
1116 if (!td->o.verify_pattern_bytes) {
1117 io_u->rand_seed = __rand(&td->verify_state);
1118 if (sizeof(int) != sizeof(long *))
1119 io_u->rand_seed *= __rand(&td->verify_state);
1120 }
1121 return 0;
1122 }
1123
1124nothing:
1125 dprint(FD_VERIFY, "get_next_verify: empty\n");
1126 return 1;
1127}
1128
1129void fio_verify_init(struct thread_data *td)
1130{
1131 if (td->o.verify == VERIFY_CRC32C_INTEL ||
1132 td->o.verify == VERIFY_CRC32C) {
1133 crc32c_intel_probe();
1134 }
1135}
1136
1137static void *verify_async_thread(void *data)
1138{
1139 struct thread_data *td = data;
1140 struct io_u *io_u;
1141 int ret = 0;
1142
1143 if (td->o.verify_cpumask_set &&
1144 fio_setaffinity(td->pid, td->o.verify_cpumask)) {
1145 log_err("fio: failed setting verify thread affinity\n");
1146 goto done;
1147 }
1148
1149 do {
1150 FLIST_HEAD(list);
1151
1152 read_barrier();
1153 if (td->verify_thread_exit)
1154 break;
1155
1156 pthread_mutex_lock(&td->io_u_lock);
1157
1158 while (flist_empty(&td->verify_list) &&
1159 !td->verify_thread_exit) {
1160 ret = pthread_cond_wait(&td->verify_cond,
1161 &td->io_u_lock);
1162 if (ret) {
1163 pthread_mutex_unlock(&td->io_u_lock);
1164 break;
1165 }
1166 }
1167
1168 flist_splice_init(&td->verify_list, &list);
1169 pthread_mutex_unlock(&td->io_u_lock);
1170
1171 if (flist_empty(&list))
1172 continue;
1173
1174 while (!flist_empty(&list)) {
1175 io_u = flist_first_entry(&list, struct io_u, verify_list);
1176 flist_del_init(&io_u->verify_list);
1177
1178 io_u->flags |= IO_U_F_NO_FILE_PUT;
1179 ret = verify_io_u(td, &io_u);
1180
1181 put_io_u(td, io_u);
1182 if (!ret)
1183 continue;
1184 if (td_non_fatal_error(td, ERROR_TYPE_VERIFY_BIT, ret)) {
1185 update_error_count(td, ret);
1186 td_clear_error(td);
1187 ret = 0;
1188 }
1189 }
1190 } while (!ret);
1191
1192 if (ret) {
1193 td_verror(td, ret, "async_verify");
1194 if (td->o.verify_fatal)
1195 fio_mark_td_terminate(td);
1196 }
1197
1198done:
1199 pthread_mutex_lock(&td->io_u_lock);
1200 td->nr_verify_threads--;
1201 pthread_mutex_unlock(&td->io_u_lock);
1202
1203 pthread_cond_signal(&td->free_cond);
1204 return NULL;
1205}
1206
1207int verify_async_init(struct thread_data *td)
1208{
1209 int i, ret;
1210 pthread_attr_t attr;
1211
1212 pthread_attr_init(&attr);
1213 pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN);
1214
1215 td->verify_thread_exit = 0;
1216
1217 td->verify_threads = malloc(sizeof(pthread_t) * td->o.verify_async);
1218 for (i = 0; i < td->o.verify_async; i++) {
1219 ret = pthread_create(&td->verify_threads[i], &attr,
1220 verify_async_thread, td);
1221 if (ret) {
1222 log_err("fio: async verify creation failed: %s\n",
1223 strerror(ret));
1224 break;
1225 }
1226 ret = pthread_detach(td->verify_threads[i]);
1227 if (ret) {
1228 log_err("fio: async verify thread detach failed: %s\n",
1229 strerror(ret));
1230 break;
1231 }
1232 td->nr_verify_threads++;
1233 }
1234
1235 pthread_attr_destroy(&attr);
1236
1237 if (i != td->o.verify_async) {
1238 log_err("fio: only %d verify threads started, exiting\n", i);
1239 td->verify_thread_exit = 1;
1240 write_barrier();
1241 pthread_cond_broadcast(&td->verify_cond);
1242 return 1;
1243 }
1244
1245 return 0;
1246}
1247
1248void verify_async_exit(struct thread_data *td)
1249{
1250 td->verify_thread_exit = 1;
1251 write_barrier();
1252 pthread_cond_broadcast(&td->verify_cond);
1253
1254 pthread_mutex_lock(&td->io_u_lock);
1255
1256 while (td->nr_verify_threads)
1257 pthread_cond_wait(&td->free_cond, &td->io_u_lock);
1258
1259 pthread_mutex_unlock(&td->io_u_lock);
1260 free(td->verify_threads);
1261 td->verify_threads = NULL;
1262}
1263
1264struct all_io_list *get_all_io_list(int save_mask, size_t *sz)
1265{
1266 struct all_io_list *rep;
1267 struct thread_data *td;
1268 size_t depth;
1269 void *next;
1270 int i, nr;
1271
1272 compiletime_assert(sizeof(struct all_io_list) == 8, "all_io_list");
1273
1274 /*
1275 * Calculate reply space needed. We need one 'io_state' per thread,
1276 * and the size will vary depending on depth.
1277 */
1278 depth = 0;
1279 nr = 0;
1280 for_each_td(td, i) {
1281 if (save_mask != IO_LIST_ALL && (i + 1) != save_mask)
1282 continue;
1283 td->stop_io = 1;
1284 td->flags |= TD_F_VSTATE_SAVED;
1285 depth += td->o.iodepth;
1286 nr++;
1287 }
1288
1289 if (!nr)
1290 return NULL;
1291
1292 *sz = sizeof(*rep);
1293 *sz += nr * sizeof(struct thread_io_list);
1294 *sz += depth * sizeof(uint64_t);
1295 rep = malloc(*sz);
1296
1297 rep->threads = cpu_to_le64((uint64_t) nr);
1298
1299 next = &rep->state[0];
1300 for_each_td(td, i) {
1301 struct thread_io_list *s = next;
1302 unsigned int comps;
1303
1304 if (save_mask != IO_LIST_ALL && (i + 1) != save_mask)
1305 continue;
1306
1307 if (td->last_write_comp) {
1308 int j, k;
1309
1310 if (td->io_blocks[DDIR_WRITE] < td->o.iodepth)
1311 comps = td->io_blocks[DDIR_WRITE];
1312 else
1313 comps = td->o.iodepth;
1314
1315 k = td->last_write_idx - 1;
1316 for (j = 0; j < comps; j++) {
1317 if (k == -1)
1318 k = td->o.iodepth - 1;
1319 s->offsets[j] = cpu_to_le64(td->last_write_comp[k]);
1320 k--;
1321 }
1322 } else
1323 comps = 0;
1324
1325 s->no_comps = cpu_to_le64((uint64_t) comps);
1326 s->depth = cpu_to_le64((uint64_t) td->o.iodepth);
1327 s->numberio = cpu_to_le64((uint64_t) td->io_issues[DDIR_WRITE]);
1328 s->index = cpu_to_le64((uint64_t) i);
1329 s->rand.s[0] = cpu_to_le32(td->random_state.s1);
1330 s->rand.s[1] = cpu_to_le32(td->random_state.s2);
1331 s->rand.s[2] = cpu_to_le32(td->random_state.s3);
1332 s->rand.s[3] = 0;
1333 strncpy((char *) s->name, td->o.name, sizeof(s->name));
1334 next = io_list_next(s);
1335 }
1336
1337 return rep;
1338}
1339
1340static int open_state_file(const char *name, const char *prefix, int num,
1341 int for_write)
1342{
1343 char out[64];
1344 int flags;
1345 int fd;
1346
1347 if (for_write)
1348 flags = O_CREAT | O_TRUNC | O_WRONLY | O_SYNC;
1349 else
1350 flags = O_RDONLY;
1351
1352 verify_state_gen_name(out, sizeof(out), name, prefix, num);
1353
1354 fd = open(out, flags, 0644);
1355 if (fd == -1) {
1356 perror("fio: open state file");
1357 return -1;
1358 }
1359
1360 return fd;
1361}
1362
1363static int write_thread_list_state(struct thread_io_list *s,
1364 const char *prefix)
1365{
1366 struct verify_state_hdr hdr;
1367 uint64_t crc;
1368 ssize_t ret;
1369 int fd;
1370
1371 fd = open_state_file((const char *) s->name, prefix, s->index, 1);
1372 if (fd == -1)
1373 return 1;
1374
1375 crc = fio_crc32c((void *)s, thread_io_list_sz(s));
1376
1377 hdr.version = cpu_to_le64((uint64_t) VSTATE_HDR_VERSION);
1378 hdr.size = cpu_to_le64((uint64_t) thread_io_list_sz(s));
1379 hdr.crc = cpu_to_le64(crc);
1380 ret = write(fd, &hdr, sizeof(hdr));
1381 if (ret != sizeof(hdr))
1382 goto write_fail;
1383
1384 ret = write(fd, s, thread_io_list_sz(s));
1385 if (ret != thread_io_list_sz(s)) {
1386write_fail:
1387 if (ret < 0)
1388 perror("fio: write state file");
1389 log_err("fio: failed to write state file\n");
1390 ret = 1;
1391 } else
1392 ret = 0;
1393
1394 close(fd);
1395 return ret;
1396}
1397
1398void __verify_save_state(struct all_io_list *state, const char *prefix)
1399{
1400 struct thread_io_list *s = &state->state[0];
1401 unsigned int i;
1402
1403 for (i = 0; i < le64_to_cpu(state->threads); i++) {
1404 write_thread_list_state(s, prefix);
1405 s = io_list_next(s);
1406 }
1407}
1408
1409void verify_save_state(void)
1410{
1411 struct all_io_list *state;
1412 size_t sz;
1413
1414 state = get_all_io_list(IO_LIST_ALL, &sz);
1415 if (state) {
1416 __verify_save_state(state, "local");
1417 free(state);
1418 }
1419}
1420
1421void verify_free_state(struct thread_data *td)
1422{
1423 if (td->vstate)
1424 free(td->vstate);
1425}
1426
1427void verify_convert_assign_state(struct thread_data *td,
1428 struct thread_io_list *s)
1429{
1430 int i;
1431
1432 s->no_comps = le64_to_cpu(s->no_comps);
1433 s->depth = le64_to_cpu(s->depth);
1434 s->numberio = le64_to_cpu(s->numberio);
1435 for (i = 0; i < 4; i++)
1436 s->rand.s[i] = le32_to_cpu(s->rand.s[i]);
1437 for (i = 0; i < s->no_comps; i++)
1438 s->offsets[i] = le64_to_cpu(s->offsets[i]);
1439
1440 td->vstate = s;
1441}
1442
1443int verify_state_hdr(struct verify_state_hdr *hdr, struct thread_io_list *s)
1444{
1445 uint64_t crc;
1446
1447 hdr->version = le64_to_cpu(hdr->version);
1448 hdr->size = le64_to_cpu(hdr->size);
1449 hdr->crc = le64_to_cpu(hdr->crc);
1450
1451 if (hdr->version != VSTATE_HDR_VERSION)
1452 return 1;
1453
1454 crc = fio_crc32c((void *)s, hdr->size);
1455 if (crc != hdr->crc)
1456 return 1;
1457
1458 return 0;
1459}
1460
1461int verify_load_state(struct thread_data *td, const char *prefix)
1462{
1463 struct thread_io_list *s = NULL;
1464 struct verify_state_hdr hdr;
1465 uint64_t crc;
1466 ssize_t ret;
1467 int fd;
1468
1469 if (!td->o.verify_state)
1470 return 0;
1471
1472 fd = open_state_file(td->o.name, prefix, td->thread_number - 1, 0);
1473 if (fd == -1)
1474 return 1;
1475
1476 ret = read(fd, &hdr, sizeof(hdr));
1477 if (ret != sizeof(hdr)) {
1478 if (ret < 0)
1479 td_verror(td, errno, "read verify state hdr");
1480 log_err("fio: failed reading verify state header\n");
1481 goto err;
1482 }
1483
1484 hdr.version = le64_to_cpu(hdr.version);
1485 hdr.size = le64_to_cpu(hdr.size);
1486 hdr.crc = le64_to_cpu(hdr.crc);
1487
1488 if (hdr.version != VSTATE_HDR_VERSION) {
1489 log_err("fio: bad version in verify state header\n");
1490 goto err;
1491 }
1492
1493 s = malloc(hdr.size);
1494 ret = read(fd, s, hdr.size);
1495 if (ret != hdr.size) {
1496 if (ret < 0)
1497 td_verror(td, errno, "read verify state");
1498 log_err("fio: failed reading verity state\n");
1499 goto err;
1500 }
1501
1502 crc = fio_crc32c((void *)s, hdr.size);
1503 if (crc != hdr.crc) {
1504 log_err("fio: verify state is corrupt\n");
1505 goto err;
1506 }
1507
1508 close(fd);
1509
1510 verify_convert_assign_state(td, s);
1511 return 0;
1512err:
1513 if (s)
1514 free(s);
1515 close(fd);
1516 return 1;
1517}
1518
1519/*
1520 * Use the loaded verify state to know when to stop doing verification
1521 */
1522int verify_state_should_stop(struct thread_data *td, struct io_u *io_u)
1523{
1524 struct thread_io_list *s = td->vstate;
1525 int i;
1526
1527 if (!s)
1528 return 0;
1529
1530 /*
1531 * If we're not into the window of issues - depth yet, continue
1532 */
1533 if (td->io_blocks[DDIR_READ] < s->depth ||
1534 s->numberio - td->io_blocks[DDIR_READ] > s->depth)
1535 return 0;
1536
1537 /*
1538 * We're in the window of having to check if this io was
1539 * completed or not. If the IO was seen as completed, then
1540 * lets verify it.
1541 */
1542 for (i = 0; i < s->no_comps; i++)
1543 if (io_u->offset == s->offsets[i])
1544 return 0;
1545
1546 /*
1547 * Not found, we have to stop
1548 */
1549 return 1;
1550}