2 * IO verification helpers
11 #include "arch/arch.h"
16 #include "lib/hweight.h"
17 #include "lib/pattern.h"
18 #include "oslib/asprintf.h"
21 #include "crc/crc64.h"
22 #include "crc/crc32.h"
23 #include "crc/crc32c.h"
24 #include "crc/crc16.h"
26 #include "crc/sha256.h"
27 #include "crc/sha512.h"
29 #include "crc/xxhash.h"
32 static void populate_hdr(struct thread_data *td, struct io_u *io_u,
33 struct verify_header *hdr, unsigned int header_num,
34 unsigned int header_len);
35 static void __fill_hdr(struct thread_data *td, struct io_u *io_u,
36 struct verify_header *hdr, unsigned int header_num,
37 unsigned int header_len, uint64_t rand_seed);
39 void fill_buffer_pattern(struct thread_data *td, void *p, unsigned int len)
41 (void)cpy_pattern(td->o.buffer_pattern, td->o.buffer_pattern_bytes, p, len);
44 static void __fill_buffer(struct thread_options *o, uint64_t seed, void *p,
47 __fill_random_buf_percentage(seed, p, o->compress_percentage, len, len, o->buffer_pattern, o->buffer_pattern_bytes);
50 void fill_verify_pattern(struct thread_data *td, void *p, unsigned int len,
51 struct io_u *io_u, uint64_t seed, int use_seed)
53 struct thread_options *o = &td->o;
54 unsigned int interval = o->verify_pattern_interval;
55 unsigned long long offset = io_u->offset;
57 if (!o->verify_pattern_bytes) {
58 dprint(FD_VERIFY, "fill random bytes len=%u\n", len);
61 seed = __rand(&td->verify_state);
62 if (sizeof(int) != sizeof(long *))
63 seed *= (unsigned long)__rand(&td->verify_state);
65 io_u->rand_seed = seed;
66 __fill_buffer(o, seed, p, len);
70 /* Skip if we were here and we do not need to patch pattern with
71 * format. However, we cannot skip if verify_offset is set because we
72 * have swapped the header with pattern bytes */
73 if (!td->o.verify_fmt_sz && io_u->buf_filled_len >= len && !td->o.verify_offset) {
74 dprint(FD_VERIFY, "using already filled verify pattern b=%d len=%u\n",
75 o->verify_pattern_bytes, len);
82 io_u->offset += (p - io_u->buf) - (p - io_u->buf) % interval;
83 for (unsigned int bytes_done = 0, bytes_todo = 0; bytes_done < len;
84 bytes_done += bytes_todo, p += bytes_todo, io_u->offset += interval) {
85 bytes_todo = (p - io_u->buf) % interval;
87 bytes_todo = interval;
88 bytes_todo = min(bytes_todo, len - bytes_done);
90 (void)paste_format(td->o.verify_pattern, td->o.verify_pattern_bytes,
91 td->o.verify_fmt, td->o.verify_fmt_sz,
95 io_u->buf_filled_len = len;
96 io_u->offset = offset;
99 static unsigned int get_hdr_inc(struct thread_data *td, struct io_u *io_u)
101 unsigned int hdr_inc;
104 * If we use bs_unaligned, buflen can be larger than the verify
105 * interval (which just defaults to the smallest blocksize possible).
107 hdr_inc = io_u->buflen;
108 if (td->o.verify_interval && td->o.verify_interval <= io_u->buflen &&
110 hdr_inc = td->o.verify_interval;
115 static void fill_pattern_headers(struct thread_data *td, struct io_u *io_u,
116 uint64_t seed, int use_seed)
118 unsigned int hdr_inc, header_num;
119 struct verify_header *hdr;
122 fill_verify_pattern(td, p, io_u->buflen, io_u, seed, use_seed);
124 hdr_inc = get_hdr_inc(td, io_u);
126 for (; p < io_u->buf + io_u->buflen; p += hdr_inc) {
128 populate_hdr(td, io_u, hdr, header_num, hdr_inc);
133 static void memswp(void *buf1, void *buf2, unsigned int len)
137 assert(len <= sizeof(swap));
139 memcpy(&swap, buf1, len);
140 memcpy(buf1, buf2, len);
141 memcpy(buf2, &swap, len);
144 static void hexdump(void *buffer, int len)
146 unsigned char *p = buffer;
149 for (i = 0; i < len; i++)
150 log_err("%02x", p[i]);
155 * Prepare for separation of verify_header and checksum header
157 static inline unsigned int __hdr_size(int verify_type)
159 unsigned int len = 0;
161 switch (verify_type) {
163 case VERIFY_HDR_ONLY:
169 len = sizeof(struct vhdr_md5);
172 len = sizeof(struct vhdr_crc64);
176 case VERIFY_CRC32C_INTEL:
177 len = sizeof(struct vhdr_crc32);
180 len = sizeof(struct vhdr_crc16);
183 len = sizeof(struct vhdr_crc7);
186 len = sizeof(struct vhdr_sha256);
189 len = sizeof(struct vhdr_sha512);
191 case VERIFY_SHA3_224:
192 len = sizeof(struct vhdr_sha3_224);
194 case VERIFY_SHA3_256:
195 len = sizeof(struct vhdr_sha3_256);
197 case VERIFY_SHA3_384:
198 len = sizeof(struct vhdr_sha3_384);
200 case VERIFY_SHA3_512:
201 len = sizeof(struct vhdr_sha3_512);
204 len = sizeof(struct vhdr_xxhash);
207 len = sizeof(struct vhdr_sha1);
209 case VERIFY_PATTERN_NO_HDR:
212 log_err("fio: unknown verify header!\n");
216 return len + sizeof(struct verify_header);
219 static inline unsigned int hdr_size(struct thread_data *td,
220 struct verify_header *hdr)
222 if (td->o.verify == VERIFY_PATTERN_NO_HDR)
225 return __hdr_size(hdr->verify_type);
228 static void *hdr_priv(struct verify_header *hdr)
232 return priv + sizeof(struct verify_header);
236 * Verify container, pass info to verify handlers and allow them to
237 * pass info back in case of error
244 unsigned int hdr_num;
245 struct thread_data *td;
248 * Output, only valid in case of error
253 unsigned int crc_len;
256 #define DUMP_BUF_SZ 255
258 static void dump_buf(char *buf, unsigned int len, unsigned long long offset,
259 const char *type, struct fio_file *f)
262 char sep[2] = { FIO_OS_PATH_SEPARATOR, 0 };
265 ptr = strdup(f->file_name);
267 if (asprintf(&fname, "%s%s%s.%llu.%s", aux_path ? : "",
268 aux_path ? sep : "", basename(ptr), offset, type) < 0) {
269 if (!fio_did_warn(FIO_WARN_VERIFY_BUF))
270 log_err("fio: not enough memory for dump buffer filename\n");
274 fd = open(fname, O_CREAT | O_TRUNC | O_WRONLY, 0644);
276 perror("open verify buf file");
281 ret = write(fd, buf, len);
285 perror("write verify buf file");
293 log_err(" %s data dumped as %s\n", type, fname);
303 * Dump the contents of the read block and re-generate the correct data
306 static void __dump_verify_buffers(struct verify_header *hdr, struct vcont *vc)
308 struct thread_data *td = vc->td;
309 struct io_u *io_u = vc->io_u;
310 unsigned long hdr_offset;
314 if (!td->o.verify_dump)
318 * Dump the contents we just read off disk
320 hdr_offset = vc->hdr_num * hdr->len;
322 dump_buf(io_u->buf + hdr_offset, hdr->len, io_u->verify_offset + hdr_offset,
323 "received", vc->io_u->file);
326 * Allocate a new buf and re-generate the original data
328 buf = malloc(io_u->buflen);
331 dummy.rand_seed = hdr->rand_seed;
332 dummy.buf_filled_len = 0;
333 dummy.buflen = io_u->buflen;
335 fill_pattern_headers(td, &dummy, hdr->rand_seed, 1);
337 dump_buf(buf + hdr_offset, hdr->len, io_u->verify_offset + hdr_offset,
338 "expected", vc->io_u->file);
342 static void dump_verify_buffers(struct verify_header *hdr, struct vcont *vc)
344 struct thread_data *td = vc->td;
345 struct verify_header shdr;
347 if (td->o.verify == VERIFY_PATTERN_NO_HDR) {
348 __fill_hdr(td, vc->io_u, &shdr, 0, vc->io_u->buflen, 0);
352 __dump_verify_buffers(hdr, vc);
355 static void log_verify_failure(struct verify_header *hdr, struct vcont *vc)
357 unsigned long long offset;
359 struct thread_data *td = vc->td;
361 offset = vc->io_u->verify_offset;
362 if (td->o.verify != VERIFY_PATTERN_NO_HDR) {
364 offset += (unsigned long long) vc->hdr_num * len;
366 len = vc->io_u->buflen;
369 log_err("%.8s: verify failed at file %s offset %llu, length %u"
370 " (requested block: offset=%llu, length=%llu, flags=%x)\n",
371 vc->name, vc->io_u->file->file_name, offset, len,
372 vc->io_u->verify_offset, vc->io_u->buflen, vc->io_u->flags);
374 if (vc->good_crc && vc->bad_crc) {
375 log_err(" Expected CRC: ");
376 hexdump(vc->good_crc, vc->crc_len);
377 log_err(" Received CRC: ");
378 hexdump(vc->bad_crc, vc->crc_len);
381 dump_verify_buffers(hdr, vc);
385 * Return data area 'header_num'
387 static inline void *io_u_verify_off(struct verify_header *hdr, struct vcont *vc)
389 return vc->io_u->buf + vc->hdr_num * hdr->len + hdr_size(vc->td, hdr);
392 static int check_pattern(char *buf, unsigned int len, unsigned int mod,
393 unsigned int pattern_size, char *pattern, unsigned int header_size)
398 rc = cmp_pattern(pattern, pattern_size, mod, buf, len);
402 /* Slow path, compare each byte */
403 for (i = 0; i < len; i++) {
404 if (buf[i] != pattern[mod]) {
407 bits = hweight8(buf[i] ^ pattern[mod]);
408 log_err("fio: got pattern '%02x', wanted '%02x'. Bad bits %d\n",
409 (unsigned char)buf[i],
410 (unsigned char)pattern[mod],
412 log_err("fio: bad pattern block offset %u\n",
418 if (mod == pattern_size)
427 * The current thread will need its own buffer if there are multiple threads
428 * and the pattern contains the offset. Fio currently only has one pattern
429 * format specifier so we only need to check that one, but this may need to be
430 * changed if fio ever gains more pattern format specifiers.
432 static inline bool pattern_need_buffer(struct thread_data *td)
434 return td->o.verify_async &&
435 td->o.verify_fmt_sz &&
436 td->o.verify_fmt[0].desc->paste == paste_blockoff;
439 static int verify_io_u_pattern(struct verify_header *hdr, struct vcont *vc)
441 struct thread_data *td = vc->td;
442 struct io_u *io_u = vc->io_u;
444 unsigned int header_size = __hdr_size(td->o.verify);
445 unsigned int len, mod, pattern_size, pattern_interval_mod, bytes_done = 0, bytes_todo;
447 unsigned long long offset = io_u->offset;
449 pattern = td->o.verify_pattern;
450 pattern_size = td->o.verify_pattern_bytes;
451 assert(pattern_size != 0);
454 * Make this thread safe when verify_async is set and the verify
455 * pattern includes the offset.
457 if (pattern_need_buffer(td)) {
458 pattern = malloc(pattern_size);
460 memcpy(pattern, td->o.verify_pattern, pattern_size);
463 if (!td->o.verify_pattern_interval) {
464 (void)paste_format_inplace(pattern, pattern_size,
465 td->o.verify_fmt, td->o.verify_fmt_sz, io_u);
469 * We have 3 cases here:
470 * 1. Compare the entire buffer if (1) verify_interval is not set and
471 * (2) verify_pattern_interval is not set
472 * 2. Compare the entire *verify_interval* if (1) verify_interval *is*
473 * set and (2) verify_pattern_interval is not set
474 * 3. Compare *verify_pattern_interval* segments or subsets thereof if
475 * (2) verify_pattern_interval is set
478 buf = (char *) hdr + header_size;
479 len = get_hdr_inc(td, io_u) - header_size;
480 if (td->o.verify_pattern_interval) {
481 unsigned int extent = get_hdr_inc(td, io_u) * vc->hdr_num + header_size;
482 pattern_interval_mod = extent % td->o.verify_pattern_interval;
483 mod = pattern_interval_mod % pattern_size;
484 bytes_todo = min(len, td->o.verify_pattern_interval - pattern_interval_mod);
485 io_u->offset += extent / td->o.verify_pattern_interval * td->o.verify_pattern_interval;
487 mod = (get_hdr_inc(td, io_u) * vc->hdr_num + header_size) % pattern_size;
489 pattern_interval_mod = 0;
492 while (bytes_done < len) {
493 if (td->o.verify_pattern_interval) {
494 (void)paste_format_inplace(pattern, pattern_size,
495 td->o.verify_fmt, td->o.verify_fmt_sz,
499 rc = check_pattern(buf, bytes_todo, mod, pattern_size, pattern, header_size);
501 vc->name = "pattern";
502 log_verify_failure(hdr, vc);
507 bytes_done += bytes_todo;
509 io_u->offset += td->o.verify_pattern_interval;
510 bytes_todo = min(len - bytes_done, td->o.verify_pattern_interval);
513 io_u->offset = offset;
514 if (pattern_need_buffer(td))
519 static int verify_io_u_xxhash(struct verify_header *hdr, struct vcont *vc)
521 void *p = io_u_verify_off(hdr, vc);
522 struct vhdr_xxhash *vh = hdr_priv(hdr);
526 dprint(FD_VERIFY, "xxhash verify io_u %p, len %u\n", vc->io_u, hdr->len);
528 state = XXH32_init(1);
529 XXH32_update(state, p, hdr->len - hdr_size(vc->td, hdr));
530 hash = XXH32_digest(state);
532 if (vh->hash == hash)
536 vc->good_crc = &vh->hash;
538 vc->crc_len = sizeof(hash);
539 log_verify_failure(hdr, vc);
543 static int verify_io_u_sha3(struct verify_header *hdr, struct vcont *vc,
544 struct fio_sha3_ctx *sha3_ctx, uint8_t *sha,
545 unsigned int sha_size, const char *name)
547 void *p = io_u_verify_off(hdr, vc);
549 dprint(FD_VERIFY, "%s verify io_u %p, len %u\n", name, vc->io_u, hdr->len);
551 fio_sha3_update(sha3_ctx, p, hdr->len - hdr_size(vc->td, hdr));
552 fio_sha3_final(sha3_ctx);
554 if (!memcmp(sha, sha3_ctx->sha, sha_size))
559 vc->bad_crc = sha3_ctx->sha;
560 vc->crc_len = sha_size;
561 log_verify_failure(hdr, vc);
565 static int verify_io_u_sha3_224(struct verify_header *hdr, struct vcont *vc)
567 struct vhdr_sha3_224 *vh = hdr_priv(hdr);
568 uint8_t sha[SHA3_224_DIGEST_SIZE];
569 struct fio_sha3_ctx sha3_ctx = {
573 fio_sha3_224_init(&sha3_ctx);
575 return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
576 SHA3_224_DIGEST_SIZE, "sha3-224");
579 static int verify_io_u_sha3_256(struct verify_header *hdr, struct vcont *vc)
581 struct vhdr_sha3_256 *vh = hdr_priv(hdr);
582 uint8_t sha[SHA3_256_DIGEST_SIZE];
583 struct fio_sha3_ctx sha3_ctx = {
587 fio_sha3_256_init(&sha3_ctx);
589 return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
590 SHA3_256_DIGEST_SIZE, "sha3-256");
593 static int verify_io_u_sha3_384(struct verify_header *hdr, struct vcont *vc)
595 struct vhdr_sha3_384 *vh = hdr_priv(hdr);
596 uint8_t sha[SHA3_384_DIGEST_SIZE];
597 struct fio_sha3_ctx sha3_ctx = {
601 fio_sha3_384_init(&sha3_ctx);
603 return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
604 SHA3_384_DIGEST_SIZE, "sha3-384");
607 static int verify_io_u_sha3_512(struct verify_header *hdr, struct vcont *vc)
609 struct vhdr_sha3_512 *vh = hdr_priv(hdr);
610 uint8_t sha[SHA3_512_DIGEST_SIZE];
611 struct fio_sha3_ctx sha3_ctx = {
615 fio_sha3_512_init(&sha3_ctx);
617 return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
618 SHA3_512_DIGEST_SIZE, "sha3-512");
621 static int verify_io_u_sha512(struct verify_header *hdr, struct vcont *vc)
623 void *p = io_u_verify_off(hdr, vc);
624 struct vhdr_sha512 *vh = hdr_priv(hdr);
626 struct fio_sha512_ctx sha512_ctx = {
630 dprint(FD_VERIFY, "sha512 verify io_u %p, len %u\n", vc->io_u, hdr->len);
632 fio_sha512_init(&sha512_ctx);
633 fio_sha512_update(&sha512_ctx, p, hdr->len - hdr_size(vc->td, hdr));
634 fio_sha512_final(&sha512_ctx);
636 if (!memcmp(vh->sha512, sha512_ctx.buf, sizeof(sha512)))
640 vc->good_crc = vh->sha512;
641 vc->bad_crc = sha512_ctx.buf;
642 vc->crc_len = sizeof(vh->sha512);
643 log_verify_failure(hdr, vc);
647 static int verify_io_u_sha256(struct verify_header *hdr, struct vcont *vc)
649 void *p = io_u_verify_off(hdr, vc);
650 struct vhdr_sha256 *vh = hdr_priv(hdr);
652 struct fio_sha256_ctx sha256_ctx = {
656 dprint(FD_VERIFY, "sha256 verify io_u %p, len %u\n", vc->io_u, hdr->len);
658 fio_sha256_init(&sha256_ctx);
659 fio_sha256_update(&sha256_ctx, p, hdr->len - hdr_size(vc->td, hdr));
660 fio_sha256_final(&sha256_ctx);
662 if (!memcmp(vh->sha256, sha256_ctx.buf, sizeof(sha256)))
666 vc->good_crc = vh->sha256;
667 vc->bad_crc = sha256_ctx.buf;
668 vc->crc_len = sizeof(vh->sha256);
669 log_verify_failure(hdr, vc);
673 static int verify_io_u_sha1(struct verify_header *hdr, struct vcont *vc)
675 void *p = io_u_verify_off(hdr, vc);
676 struct vhdr_sha1 *vh = hdr_priv(hdr);
678 struct fio_sha1_ctx sha1_ctx = {
682 dprint(FD_VERIFY, "sha1 verify io_u %p, len %u\n", vc->io_u, hdr->len);
684 fio_sha1_init(&sha1_ctx);
685 fio_sha1_update(&sha1_ctx, p, hdr->len - hdr_size(vc->td, hdr));
686 fio_sha1_final(&sha1_ctx);
688 if (!memcmp(vh->sha1, sha1_ctx.H, sizeof(sha1)))
692 vc->good_crc = vh->sha1;
693 vc->bad_crc = sha1_ctx.H;
694 vc->crc_len = sizeof(vh->sha1);
695 log_verify_failure(hdr, vc);
699 static int verify_io_u_crc7(struct verify_header *hdr, struct vcont *vc)
701 void *p = io_u_verify_off(hdr, vc);
702 struct vhdr_crc7 *vh = hdr_priv(hdr);
705 dprint(FD_VERIFY, "crc7 verify io_u %p, len %u\n", vc->io_u, hdr->len);
707 c = fio_crc7(p, hdr->len - hdr_size(vc->td, hdr));
713 vc->good_crc = &vh->crc7;
716 log_verify_failure(hdr, vc);
720 static int verify_io_u_crc16(struct verify_header *hdr, struct vcont *vc)
722 void *p = io_u_verify_off(hdr, vc);
723 struct vhdr_crc16 *vh = hdr_priv(hdr);
726 dprint(FD_VERIFY, "crc16 verify io_u %p, len %u\n", vc->io_u, hdr->len);
728 c = fio_crc16(p, hdr->len - hdr_size(vc->td, hdr));
734 vc->good_crc = &vh->crc16;
737 log_verify_failure(hdr, vc);
741 static int verify_io_u_crc64(struct verify_header *hdr, struct vcont *vc)
743 void *p = io_u_verify_off(hdr, vc);
744 struct vhdr_crc64 *vh = hdr_priv(hdr);
745 unsigned long long c;
747 dprint(FD_VERIFY, "crc64 verify io_u %p, len %u\n", vc->io_u, hdr->len);
749 c = fio_crc64(p, hdr->len - hdr_size(vc->td, hdr));
755 vc->good_crc = &vh->crc64;
758 log_verify_failure(hdr, vc);
762 static int verify_io_u_crc32(struct verify_header *hdr, struct vcont *vc)
764 void *p = io_u_verify_off(hdr, vc);
765 struct vhdr_crc32 *vh = hdr_priv(hdr);
768 dprint(FD_VERIFY, "crc32 verify io_u %p, len %u\n", vc->io_u, hdr->len);
770 c = fio_crc32(p, hdr->len - hdr_size(vc->td, hdr));
776 vc->good_crc = &vh->crc32;
779 log_verify_failure(hdr, vc);
783 static int verify_io_u_crc32c(struct verify_header *hdr, struct vcont *vc)
785 void *p = io_u_verify_off(hdr, vc);
786 struct vhdr_crc32 *vh = hdr_priv(hdr);
789 dprint(FD_VERIFY, "crc32c verify io_u %p, len %u\n", vc->io_u, hdr->len);
791 c = fio_crc32c(p, hdr->len - hdr_size(vc->td, hdr));
797 vc->good_crc = &vh->crc32;
800 log_verify_failure(hdr, vc);
804 static int verify_io_u_md5(struct verify_header *hdr, struct vcont *vc)
806 void *p = io_u_verify_off(hdr, vc);
807 struct vhdr_md5 *vh = hdr_priv(hdr);
808 uint32_t hash[MD5_HASH_WORDS];
809 struct fio_md5_ctx md5_ctx = {
813 dprint(FD_VERIFY, "md5 verify io_u %p, len %u\n", vc->io_u, hdr->len);
815 fio_md5_init(&md5_ctx);
816 fio_md5_update(&md5_ctx, p, hdr->len - hdr_size(vc->td, hdr));
817 fio_md5_final(&md5_ctx);
819 if (!memcmp(vh->md5_digest, md5_ctx.hash, sizeof(hash)))
823 vc->good_crc = vh->md5_digest;
824 vc->bad_crc = md5_ctx.hash;
825 vc->crc_len = sizeof(hash);
826 log_verify_failure(hdr, vc);
831 * Push IO verification to a separate thread
833 int verify_io_u_async(struct thread_data *td, struct io_u **io_u_ptr)
835 struct io_u *io_u = *io_u_ptr;
837 pthread_mutex_lock(&td->io_u_lock);
840 put_file_log(td, io_u->file);
842 if (io_u->flags & IO_U_F_IN_CUR_DEPTH) {
844 io_u_clear(td, io_u, IO_U_F_IN_CUR_DEPTH);
846 flist_add_tail(&io_u->verify_list, &td->verify_list);
849 pthread_cond_signal(&td->verify_cond);
850 pthread_mutex_unlock(&td->io_u_lock);
855 * Thanks Rusty, for spending the time so I don't have to.
857 * http://rusty.ozlabs.org/?p=560
859 static int mem_is_zero(const void *data, size_t length)
861 const unsigned char *p = data;
864 /* Check first 16 bytes manually */
865 for (len = 0; len < 16; len++) {
874 /* Now we know that's zero, memcmp with self. */
875 return memcmp(data, p, length) == 0;
878 static int mem_is_zero_slow(const void *data, size_t length, size_t *offset)
880 const unsigned char *p = data;
894 static int verify_trimmed_io_u(struct thread_data *td, struct io_u *io_u)
898 if (!td->o.trim_zero)
901 if (mem_is_zero(io_u->buf, io_u->buflen))
904 mem_is_zero_slow(io_u->buf, io_u->buflen, &offset);
906 log_err("trim: verify failed at file %s offset %llu, length %llu"
907 ", block offset %lu\n",
908 io_u->file->file_name, io_u->verify_offset, io_u->buflen,
909 (unsigned long) offset);
913 static int verify_header(struct io_u *io_u, struct thread_data *td,
914 struct verify_header *hdr, unsigned int hdr_num,
915 unsigned int hdr_len)
920 if (hdr->magic != FIO_HDR_MAGIC) {
921 log_err("verify: bad magic header %x, wanted %x",
922 hdr->magic, FIO_HDR_MAGIC);
925 if (hdr->len != hdr_len) {
926 log_err("verify: bad header length %u, wanted %u",
930 if (td->o.verify_header_seed && (hdr->rand_seed != io_u->rand_seed)) {
931 log_err("verify: bad header rand_seed %"PRIu64
933 hdr->rand_seed, io_u->rand_seed);
936 if (hdr->offset != io_u->verify_offset + hdr_num * td->o.verify_interval) {
937 log_err("verify: bad header offset %"PRIu64
939 hdr->offset, io_u->verify_offset);
944 * For read-only workloads, the program cannot be certain of the
945 * last numberio written to a block. Checking of numberio will be
946 * done only for workloads that write data. For verify_only or
947 * any mode de-selecting verify_write_sequence, numberio check is
950 if (td_write(td) && (td_min_bs(td) == td_max_bs(td)) &&
952 if (td->o.verify_write_sequence)
953 if (hdr->numberio != io_u->numberio) {
954 log_err("verify: bad header numberio %"PRIu16
956 hdr->numberio, io_u->numberio);
960 crc = fio_crc32c(p, offsetof(struct verify_header, crc32));
961 if (crc != hdr->crc32) {
962 log_err("verify: bad header crc %x, calculated %x",
969 log_err(" at file %s offset %llu, length %u"
970 " (requested block: offset=%llu, length=%llu)\n",
971 io_u->file->file_name,
972 io_u->verify_offset + hdr_num * hdr_len, hdr_len,
973 io_u->verify_offset, io_u->buflen);
975 if (td->o.verify_dump)
976 dump_buf(p, hdr_len, io_u->verify_offset + hdr_num * hdr_len,
977 "hdr_fail", io_u->file);
982 int verify_io_u(struct thread_data *td, struct io_u **io_u_ptr)
984 struct verify_header *hdr;
985 struct io_u *io_u = *io_u_ptr;
986 unsigned int header_size, hdr_inc, hdr_num = 0;
990 if (td->o.verify == VERIFY_NULL || io_u->ddir != DDIR_READ)
993 * If the IO engine is faking IO (like null), then just pretend
994 * we verified everything.
996 if (td_ioengine_flagged(td, FIO_FAKEIO))
1000 * If data has already been verified from the device, we can skip
1001 * the actual verification phase here.
1003 if (io_u->flags & IO_U_F_VER_IN_DEV)
1006 if (io_u->flags & IO_U_F_TRIMMED) {
1007 ret = verify_trimmed_io_u(td, io_u);
1011 hdr_inc = get_hdr_inc(td, io_u);
1014 for (p = io_u->buf; p < io_u->buf + io_u->buflen;
1015 p += hdr_inc, hdr_num++) {
1021 unsigned int verify_type;
1023 if (ret && td->o.verify_fatal)
1026 header_size = __hdr_size(td->o.verify);
1027 if (td->o.verify_offset)
1028 memswp(p, p + td->o.verify_offset, header_size);
1031 if (td->o.verify != VERIFY_PATTERN_NO_HDR) {
1032 ret = verify_header(io_u, td, hdr, hdr_num, hdr_inc);
1037 if (td->o.verify != VERIFY_NONE)
1038 verify_type = td->o.verify;
1040 verify_type = hdr->verify_type;
1042 switch (verify_type) {
1043 case VERIFY_HDR_ONLY:
1044 /* Header is always verified, check if pattern is left
1045 * for verification. */
1046 if (td->o.verify_pattern_bytes)
1047 ret = verify_io_u_pattern(hdr, &vc);
1050 ret = verify_io_u_md5(hdr, &vc);
1053 ret = verify_io_u_crc64(hdr, &vc);
1056 case VERIFY_CRC32C_INTEL:
1057 ret = verify_io_u_crc32c(hdr, &vc);
1060 ret = verify_io_u_crc32(hdr, &vc);
1063 ret = verify_io_u_crc16(hdr, &vc);
1066 ret = verify_io_u_crc7(hdr, &vc);
1069 ret = verify_io_u_sha256(hdr, &vc);
1072 ret = verify_io_u_sha512(hdr, &vc);
1074 case VERIFY_SHA3_224:
1075 ret = verify_io_u_sha3_224(hdr, &vc);
1077 case VERIFY_SHA3_256:
1078 ret = verify_io_u_sha3_256(hdr, &vc);
1080 case VERIFY_SHA3_384:
1081 ret = verify_io_u_sha3_384(hdr, &vc);
1083 case VERIFY_SHA3_512:
1084 ret = verify_io_u_sha3_512(hdr, &vc);
1087 ret = verify_io_u_xxhash(hdr, &vc);
1090 ret = verify_io_u_sha1(hdr, &vc);
1092 case VERIFY_PATTERN:
1093 case VERIFY_PATTERN_NO_HDR:
1094 ret = verify_io_u_pattern(hdr, &vc);
1097 log_err("Bad verify type %u\n", hdr->verify_type);
1101 if (ret && verify_type != hdr->verify_type && verify_type != VERIFY_PATTERN_NO_HDR)
1102 log_err("fio: verify type mismatch (%u media, %u given)\n",
1103 hdr->verify_type, verify_type);
1107 if (ret && td->o.verify_fatal)
1108 fio_mark_td_terminate(td);
1113 static void fill_xxhash(struct verify_header *hdr, void *p, unsigned int len)
1115 struct vhdr_xxhash *vh = hdr_priv(hdr);
1118 state = XXH32_init(1);
1119 XXH32_update(state, p, len);
1120 vh->hash = XXH32_digest(state);
1123 static void fill_sha3(struct fio_sha3_ctx *sha3_ctx, void *p, unsigned int len)
1125 fio_sha3_update(sha3_ctx, p, len);
1126 fio_sha3_final(sha3_ctx);
1129 static void fill_sha3_224(struct verify_header *hdr, void *p, unsigned int len)
1131 struct vhdr_sha3_224 *vh = hdr_priv(hdr);
1132 struct fio_sha3_ctx sha3_ctx = {
1136 fio_sha3_224_init(&sha3_ctx);
1137 fill_sha3(&sha3_ctx, p, len);
1140 static void fill_sha3_256(struct verify_header *hdr, void *p, unsigned int len)
1142 struct vhdr_sha3_256 *vh = hdr_priv(hdr);
1143 struct fio_sha3_ctx sha3_ctx = {
1147 fio_sha3_256_init(&sha3_ctx);
1148 fill_sha3(&sha3_ctx, p, len);
1151 static void fill_sha3_384(struct verify_header *hdr, void *p, unsigned int len)
1153 struct vhdr_sha3_384 *vh = hdr_priv(hdr);
1154 struct fio_sha3_ctx sha3_ctx = {
1158 fio_sha3_384_init(&sha3_ctx);
1159 fill_sha3(&sha3_ctx, p, len);
1162 static void fill_sha3_512(struct verify_header *hdr, void *p, unsigned int len)
1164 struct vhdr_sha3_512 *vh = hdr_priv(hdr);
1165 struct fio_sha3_ctx sha3_ctx = {
1169 fio_sha3_512_init(&sha3_ctx);
1170 fill_sha3(&sha3_ctx, p, len);
1173 static void fill_sha512(struct verify_header *hdr, void *p, unsigned int len)
1175 struct vhdr_sha512 *vh = hdr_priv(hdr);
1176 struct fio_sha512_ctx sha512_ctx = {
1180 fio_sha512_init(&sha512_ctx);
1181 fio_sha512_update(&sha512_ctx, p, len);
1182 fio_sha512_final(&sha512_ctx);
1185 static void fill_sha256(struct verify_header *hdr, void *p, unsigned int len)
1187 struct vhdr_sha256 *vh = hdr_priv(hdr);
1188 struct fio_sha256_ctx sha256_ctx = {
1192 fio_sha256_init(&sha256_ctx);
1193 fio_sha256_update(&sha256_ctx, p, len);
1194 fio_sha256_final(&sha256_ctx);
1197 static void fill_sha1(struct verify_header *hdr, void *p, unsigned int len)
1199 struct vhdr_sha1 *vh = hdr_priv(hdr);
1200 struct fio_sha1_ctx sha1_ctx = {
1204 fio_sha1_init(&sha1_ctx);
1205 fio_sha1_update(&sha1_ctx, p, len);
1206 fio_sha1_final(&sha1_ctx);
1209 static void fill_crc7(struct verify_header *hdr, void *p, unsigned int len)
1211 struct vhdr_crc7 *vh = hdr_priv(hdr);
1213 vh->crc7 = fio_crc7(p, len);
1216 static void fill_crc16(struct verify_header *hdr, void *p, unsigned int len)
1218 struct vhdr_crc16 *vh = hdr_priv(hdr);
1220 vh->crc16 = fio_crc16(p, len);
1223 static void fill_crc32(struct verify_header *hdr, void *p, unsigned int len)
1225 struct vhdr_crc32 *vh = hdr_priv(hdr);
1227 vh->crc32 = fio_crc32(p, len);
1230 static void fill_crc32c(struct verify_header *hdr, void *p, unsigned int len)
1232 struct vhdr_crc32 *vh = hdr_priv(hdr);
1234 vh->crc32 = fio_crc32c(p, len);
1237 static void fill_crc64(struct verify_header *hdr, void *p, unsigned int len)
1239 struct vhdr_crc64 *vh = hdr_priv(hdr);
1241 vh->crc64 = fio_crc64(p, len);
1244 static void fill_md5(struct verify_header *hdr, void *p, unsigned int len)
1246 struct vhdr_md5 *vh = hdr_priv(hdr);
1247 struct fio_md5_ctx md5_ctx = {
1248 .hash = (uint32_t *) vh->md5_digest,
1251 fio_md5_init(&md5_ctx);
1252 fio_md5_update(&md5_ctx, p, len);
1253 fio_md5_final(&md5_ctx);
1256 static void __fill_hdr(struct thread_data *td, struct io_u *io_u,
1257 struct verify_header *hdr, unsigned int header_num,
1258 unsigned int header_len, uint64_t rand_seed)
1262 hdr->magic = FIO_HDR_MAGIC;
1263 hdr->verify_type = td->o.verify;
1264 hdr->len = header_len;
1265 hdr->rand_seed = rand_seed;
1266 hdr->offset = io_u->verify_offset + header_num * td->o.verify_interval;
1267 hdr->time_sec = io_u->start_time.tv_sec;
1268 hdr->time_nsec = io_u->start_time.tv_nsec;
1269 hdr->thread = td->thread_number;
1270 hdr->numberio = io_u->numberio;
1271 hdr->crc32 = fio_crc32c(p, offsetof(struct verify_header, crc32));
1275 static void fill_hdr(struct thread_data *td, struct io_u *io_u,
1276 struct verify_header *hdr, unsigned int header_num,
1277 unsigned int header_len, uint64_t rand_seed)
1279 if (td->o.verify != VERIFY_PATTERN_NO_HDR)
1280 __fill_hdr(td, io_u, hdr, header_num, header_len, rand_seed);
1283 static void populate_hdr(struct thread_data *td, struct io_u *io_u,
1284 struct verify_header *hdr, unsigned int header_num,
1285 unsigned int header_len)
1287 unsigned int data_len;
1293 fill_hdr(td, io_u, hdr, header_num, header_len, io_u->rand_seed);
1295 if (header_len <= hdr_size(td, hdr)) {
1296 td_verror(td, EINVAL, "Blocksize too small");
1299 data_len = header_len - hdr_size(td, hdr);
1301 data = p + hdr_size(td, hdr);
1302 switch (td->o.verify) {
1304 dprint(FD_VERIFY, "fill md5 io_u %p, len %u\n",
1306 fill_md5(hdr, data, data_len);
1309 dprint(FD_VERIFY, "fill crc64 io_u %p, len %u\n",
1311 fill_crc64(hdr, data, data_len);
1314 case VERIFY_CRC32C_INTEL:
1315 dprint(FD_VERIFY, "fill crc32c io_u %p, len %u\n",
1317 fill_crc32c(hdr, data, data_len);
1320 dprint(FD_VERIFY, "fill crc32 io_u %p, len %u\n",
1322 fill_crc32(hdr, data, data_len);
1325 dprint(FD_VERIFY, "fill crc16 io_u %p, len %u\n",
1327 fill_crc16(hdr, data, data_len);
1330 dprint(FD_VERIFY, "fill crc7 io_u %p, len %u\n",
1332 fill_crc7(hdr, data, data_len);
1335 dprint(FD_VERIFY, "fill sha256 io_u %p, len %u\n",
1337 fill_sha256(hdr, data, data_len);
1340 dprint(FD_VERIFY, "fill sha512 io_u %p, len %u\n",
1342 fill_sha512(hdr, data, data_len);
1344 case VERIFY_SHA3_224:
1345 dprint(FD_VERIFY, "fill sha3-224 io_u %p, len %u\n",
1347 fill_sha3_224(hdr, data, data_len);
1349 case VERIFY_SHA3_256:
1350 dprint(FD_VERIFY, "fill sha3-256 io_u %p, len %u\n",
1352 fill_sha3_256(hdr, data, data_len);
1354 case VERIFY_SHA3_384:
1355 dprint(FD_VERIFY, "fill sha3-384 io_u %p, len %u\n",
1357 fill_sha3_384(hdr, data, data_len);
1359 case VERIFY_SHA3_512:
1360 dprint(FD_VERIFY, "fill sha3-512 io_u %p, len %u\n",
1362 fill_sha3_512(hdr, data, data_len);
1365 dprint(FD_VERIFY, "fill xxhash io_u %p, len %u\n",
1367 fill_xxhash(hdr, data, data_len);
1370 dprint(FD_VERIFY, "fill sha1 io_u %p, len %u\n",
1372 fill_sha1(hdr, data, data_len);
1374 case VERIFY_HDR_ONLY:
1375 case VERIFY_PATTERN:
1376 case VERIFY_PATTERN_NO_HDR:
1377 /* nothing to do here */
1380 log_err("fio: bad verify type: %d\n", td->o.verify);
1384 if (td->o.verify_offset && hdr_size(td, hdr))
1385 memswp(p, p + td->o.verify_offset, hdr_size(td, hdr));
1389 * fill body of io_u->buf with random data and add a header with the
1390 * checksum of choice
1392 void populate_verify_io_u(struct thread_data *td, struct io_u *io_u)
1394 if (td->o.verify == VERIFY_NULL)
1397 fill_pattern_headers(td, io_u, 0, 0);
1400 int get_next_verify(struct thread_data *td, struct io_u *io_u)
1402 struct io_piece *ipo = NULL;
1405 * this io_u is from a requeue, we already filled the offsets
1410 if (!RB_EMPTY_ROOT(&td->io_hist_tree)) {
1411 struct fio_rb_node *n = rb_first(&td->io_hist_tree);
1413 ipo = rb_entry(n, struct io_piece, rb_node);
1416 * Ensure that the associated IO has completed
1418 if (atomic_load_acquire(&ipo->flags) & IP_F_IN_FLIGHT)
1421 rb_erase(n, &td->io_hist_tree);
1422 assert(ipo->flags & IP_F_ONRB);
1423 ipo->flags &= ~IP_F_ONRB;
1424 } else if (!flist_empty(&td->io_hist_list)) {
1425 ipo = flist_first_entry(&td->io_hist_list, struct io_piece, list);
1428 * Ensure that the associated IO has completed
1430 if (atomic_load_acquire(&ipo->flags) & IP_F_IN_FLIGHT)
1433 flist_del(&ipo->list);
1434 assert(ipo->flags & IP_F_ONLIST);
1435 ipo->flags &= ~IP_F_ONLIST;
1441 io_u->offset = ipo->offset;
1442 io_u->verify_offset = ipo->offset;
1443 io_u->buflen = ipo->len;
1444 io_u->numberio = ipo->numberio;
1445 io_u->file = ipo->file;
1446 io_u_set(td, io_u, IO_U_F_VER_LIST);
1448 if (ipo->flags & IP_F_TRIMMED)
1449 io_u_set(td, io_u, IO_U_F_TRIMMED);
1451 if (!fio_file_open(io_u->file)) {
1452 int r = td_io_open_file(td, io_u->file);
1455 dprint(FD_VERIFY, "failed file %s open\n",
1456 io_u->file->file_name);
1461 get_file(ipo->file);
1462 assert(fio_file_open(io_u->file));
1463 io_u->ddir = DDIR_READ;
1464 io_u->xfer_buf = io_u->buf;
1465 io_u->xfer_buflen = io_u->buflen;
1467 remove_trim_entry(td, ipo);
1469 dprint(FD_VERIFY, "get_next_verify: ret io_u %p\n", io_u);
1471 if (!td->o.verify_pattern_bytes) {
1472 io_u->rand_seed = __rand(&td->verify_state);
1473 if (sizeof(int) != sizeof(long *))
1474 io_u->rand_seed *= __rand(&td->verify_state);
1480 dprint(FD_VERIFY, "get_next_verify: empty\n");
1484 void fio_verify_init(struct thread_data *td)
1486 if (td->o.verify == VERIFY_CRC32C_INTEL ||
1487 td->o.verify == VERIFY_CRC32C) {
1488 crc32c_arm64_probe();
1489 crc32c_intel_probe();
1493 static void *verify_async_thread(void *data)
1495 struct thread_data *td = data;
1499 if (fio_option_is_set(&td->o, verify_cpumask) &&
1500 fio_setaffinity(td->pid, td->o.verify_cpumask)) {
1501 log_err("fio: failed setting verify thread affinity\n");
1509 if (td->verify_thread_exit)
1512 pthread_mutex_lock(&td->io_u_lock);
1514 while (flist_empty(&td->verify_list) &&
1515 !td->verify_thread_exit) {
1516 ret = pthread_cond_wait(&td->verify_cond,
1523 flist_splice_init(&td->verify_list, &list);
1524 pthread_mutex_unlock(&td->io_u_lock);
1526 if (flist_empty(&list))
1529 while (!flist_empty(&list)) {
1530 io_u = flist_first_entry(&list, struct io_u, verify_list);
1531 flist_del_init(&io_u->verify_list);
1533 io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
1534 ret = verify_io_u(td, &io_u);
1539 if (td_non_fatal_error(td, ERROR_TYPE_VERIFY_BIT, ret)) {
1540 update_error_count(td, ret);
1548 td_verror(td, ret, "async_verify");
1549 if (td->o.verify_fatal)
1550 fio_mark_td_terminate(td);
1554 pthread_mutex_lock(&td->io_u_lock);
1555 td->nr_verify_threads--;
1556 pthread_cond_signal(&td->free_cond);
1557 pthread_mutex_unlock(&td->io_u_lock);
1562 int verify_async_init(struct thread_data *td)
1565 pthread_attr_t attr;
1567 pthread_attr_init(&attr);
1568 pthread_attr_setstacksize(&attr, 2 * PTHREAD_STACK_MIN);
1570 td->verify_thread_exit = 0;
1572 td->verify_threads = malloc(sizeof(pthread_t) * td->o.verify_async);
1573 for (i = 0; i < td->o.verify_async; i++) {
1574 ret = pthread_create(&td->verify_threads[i], &attr,
1575 verify_async_thread, td);
1577 log_err("fio: async verify creation failed: %s\n",
1581 ret = pthread_detach(td->verify_threads[i]);
1583 log_err("fio: async verify thread detach failed: %s\n",
1587 td->nr_verify_threads++;
1590 pthread_attr_destroy(&attr);
1592 if (i != td->o.verify_async) {
1593 log_err("fio: only %d verify threads started, exiting\n", i);
1595 pthread_mutex_lock(&td->io_u_lock);
1596 td->verify_thread_exit = 1;
1597 pthread_cond_broadcast(&td->verify_cond);
1598 pthread_mutex_unlock(&td->io_u_lock);
1606 void verify_async_exit(struct thread_data *td)
1608 pthread_mutex_lock(&td->io_u_lock);
1609 td->verify_thread_exit = 1;
1610 pthread_cond_broadcast(&td->verify_cond);
1612 while (td->nr_verify_threads)
1613 pthread_cond_wait(&td->free_cond, &td->io_u_lock);
1615 pthread_mutex_unlock(&td->io_u_lock);
1616 free(td->verify_threads);
1617 td->verify_threads = NULL;
1620 int paste_blockoff(char *buf, unsigned int len, void *priv)
1622 struct io_u *io = priv;
1623 unsigned long long off;
1625 typecheck(__typeof__(off), io->offset);
1626 off = cpu_to_le64((uint64_t)io->offset);
1627 len = min(len, (unsigned int)sizeof(off));
1628 memcpy(buf, &off, len);
1632 static int __fill_file_completions(struct thread_data *td,
1633 struct thread_io_list *s,
1634 struct fio_file *f, unsigned int *index)
1639 if (!f->last_write_comp)
1642 if (td->io_blocks[DDIR_WRITE] < td->last_write_comp_depth)
1643 comps = td->io_blocks[DDIR_WRITE];
1645 comps = td->last_write_comp_depth;
1647 j = f->last_write_idx - 1;
1648 for (i = 0; i < comps; i++) {
1650 j = td->last_write_comp_depth - 1;
1651 s->comps[*index].fileno = __cpu_to_le64(f->fileno);
1652 s->comps[*index].offset = cpu_to_le64(f->last_write_comp[j]);
1660 static int fill_file_completions(struct thread_data *td,
1661 struct thread_io_list *s, unsigned int *index)
1667 for_each_file(td, f, i)
1668 comps += __fill_file_completions(td, s, f, index);
1673 struct all_io_list *get_all_io_list(int save_mask, size_t *sz)
1675 struct all_io_list *rep;
1680 compiletime_assert(sizeof(struct all_io_list) == 8, "all_io_list");
1683 * Calculate reply space needed. We need one 'io_state' per thread,
1684 * and the size will vary depending on depth.
1689 if (save_mask != IO_LIST_ALL && (__td_index + 1) != save_mask)
1692 td->flags |= TD_F_VSTATE_SAVED;
1693 depth += (td->last_write_comp_depth * td->o.nr_files);
1701 *sz += nr * sizeof(struct thread_io_list);
1702 *sz += depth * sizeof(struct file_comp);
1703 rep = calloc(1, *sz);
1705 rep->threads = cpu_to_le64((uint64_t) nr);
1707 next = &rep->state[0];
1709 struct thread_io_list *s = next;
1710 unsigned int comps, index = 0;
1712 if (save_mask != IO_LIST_ALL && (__td_index + 1) != save_mask)
1715 comps = fill_file_completions(td, s, &index);
1717 s->no_comps = cpu_to_le64((uint64_t) comps);
1718 s->depth = cpu_to_le32((uint32_t) td->o.iodepth);
1719 s->max_no_comps_per_file = cpu_to_le32((uint32_t) td->last_write_comp_depth);
1720 s->nofiles = cpu_to_le32((uint32_t) td->o.nr_files);
1721 s->numberio = cpu_to_le64((uint64_t) td->io_issues[DDIR_WRITE]);
1722 s->index = cpu_to_le64((uint64_t) __td_index);
1723 if (td->random_state.use64) {
1724 s->rand.state64.s[0] = cpu_to_le64(td->random_state.state64.s1);
1725 s->rand.state64.s[1] = cpu_to_le64(td->random_state.state64.s2);
1726 s->rand.state64.s[2] = cpu_to_le64(td->random_state.state64.s3);
1727 s->rand.state64.s[3] = cpu_to_le64(td->random_state.state64.s4);
1728 s->rand.state64.s[4] = cpu_to_le64(td->random_state.state64.s5);
1729 s->rand.state64.s[5] = 0;
1730 s->rand.use64 = cpu_to_le64((uint64_t)1);
1732 s->rand.state32.s[0] = cpu_to_le32(td->random_state.state32.s1);
1733 s->rand.state32.s[1] = cpu_to_le32(td->random_state.state32.s2);
1734 s->rand.state32.s[2] = cpu_to_le32(td->random_state.state32.s3);
1735 s->rand.state32.s[3] = 0;
1738 snprintf((char *) s->name, sizeof(s->name), "%s", td->o.name);
1739 next = io_list_next(s);
1745 static int open_state_file(const char *name, const char *prefix, int num,
1753 flags = O_CREAT | O_TRUNC | O_WRONLY | O_SYNC;
1761 verify_state_gen_name(out, sizeof(out), name, prefix, num);
1763 fd = open(out, flags, 0644);
1765 perror("fio: open state file");
1766 log_err("fio: state file: %s (for_write=%d)\n", out, for_write);
1773 static int write_thread_list_state(struct thread_io_list *s,
1776 struct verify_state_hdr hdr;
1781 fd = open_state_file((const char *) s->name, prefix, s->index, 1);
1785 crc = fio_crc32c((void *)s, thread_io_list_sz(s));
1787 hdr.version = cpu_to_le64((uint64_t) VSTATE_HDR_VERSION);
1788 hdr.size = cpu_to_le64((uint64_t) thread_io_list_sz(s));
1789 hdr.crc = cpu_to_le64(crc);
1790 ret = write(fd, &hdr, sizeof(hdr));
1791 if (ret != sizeof(hdr))
1794 ret = write(fd, s, thread_io_list_sz(s));
1795 if (ret != thread_io_list_sz(s)) {
1798 perror("fio: write state file");
1799 log_err("fio: failed to write state file\n");
1808 void __verify_save_state(struct all_io_list *state, const char *prefix)
1810 struct thread_io_list *s = &state->state[0];
1813 for (i = 0; i < le64_to_cpu(state->threads); i++) {
1814 write_thread_list_state(s, prefix);
1815 s = io_list_next(s);
1819 void verify_save_state(int mask)
1821 struct all_io_list *state;
1824 state = get_all_io_list(mask, &sz);
1826 char prefix[PATH_MAX];
1829 sprintf(prefix, "%s%clocal", aux_path, FIO_OS_PATH_SEPARATOR);
1831 strcpy(prefix, "local");
1833 __verify_save_state(state, prefix);
1838 void verify_free_state(struct thread_data *td)
1844 void verify_assign_state(struct thread_data *td, void *p)
1846 struct thread_io_list *s = p;
1849 s->no_comps = le64_to_cpu(s->no_comps);
1850 s->depth = le32_to_cpu(s->depth);
1851 s->max_no_comps_per_file = le32_to_cpu(s->max_no_comps_per_file);
1852 s->nofiles = le32_to_cpu(s->nofiles);
1853 s->numberio = le64_to_cpu(s->numberio);
1854 s->rand.use64 = le64_to_cpu(s->rand.use64);
1856 if (s->rand.use64) {
1857 for (i = 0; i < 6; i++)
1858 s->rand.state64.s[i] = le64_to_cpu(s->rand.state64.s[i]);
1860 for (i = 0; i < 4; i++)
1861 s->rand.state32.s[i] = le32_to_cpu(s->rand.state32.s[i]);
1864 for (i = 0; i < s->no_comps; i++) {
1865 s->comps[i].fileno = le64_to_cpu(s->comps[i].fileno);
1866 s->comps[i].offset = le64_to_cpu(s->comps[i].offset);
1872 int verify_state_hdr(struct verify_state_hdr *hdr, struct thread_io_list *s)
1876 hdr->version = le64_to_cpu(hdr->version);
1877 hdr->size = le64_to_cpu(hdr->size);
1878 hdr->crc = le64_to_cpu(hdr->crc);
1880 if (hdr->version != VSTATE_HDR_VERSION)
1883 crc = fio_crc32c((void *)s, hdr->size);
1884 if (crc != hdr->crc)
1890 int verify_load_state(struct thread_data *td, const char *prefix)
1892 struct verify_state_hdr hdr;
1898 if (!td->o.verify_state)
1901 fd = open_state_file(td->o.name, prefix, td->thread_number - 1, 0);
1905 ret = read(fd, &hdr, sizeof(hdr));
1906 if (ret != sizeof(hdr)) {
1908 td_verror(td, errno, "read verify state hdr");
1909 log_err("fio: failed reading verify state header\n");
1913 hdr.version = le64_to_cpu(hdr.version);
1914 hdr.size = le64_to_cpu(hdr.size);
1915 hdr.crc = le64_to_cpu(hdr.crc);
1917 if (hdr.version != VSTATE_HDR_VERSION) {
1918 log_err("fio: unsupported (%d) version in verify state header\n",
1919 (unsigned int) hdr.version);
1923 s = malloc(hdr.size);
1924 ret = read(fd, s, hdr.size);
1925 if (ret != hdr.size) {
1927 td_verror(td, errno, "read verify state");
1928 log_err("fio: failed reading verity state\n");
1932 crc = fio_crc32c(s, hdr.size);
1933 if (crc != hdr.crc) {
1934 log_err("fio: verify state is corrupt\n");
1940 verify_assign_state(td, s);
1950 * Use the loaded verify state to know when to stop doing verification
1952 int verify_state_should_stop(struct thread_data *td, struct io_u *io_u)
1954 struct thread_io_list *s = td->vstate;
1955 struct fio_file *f = io_u->file;
1962 * If we're not into the window of issues - depth yet, continue. If
1963 * issue is shorter than depth, do check.
1965 if ((td->io_blocks[DDIR_READ] < s->depth ||
1966 s->numberio - td->io_blocks[DDIR_READ] > s->depth) &&
1967 s->numberio > s->depth)
1971 * We're in the window of having to check if this io was
1972 * completed or not. If the IO was seen as completed, then
1975 for (i = 0; i < s->no_comps; i++) {
1976 if (s->comps[i].fileno != f->fileno)
1978 if (io_u->verify_offset == s->comps[i].offset)
1983 * Not found, we have to stop
1985 log_info("Stop verify because offset %llu in %s is not recorded in verify state\n",
1986 io_u->verify_offset, f->file_name);