Fix README regarding fio snapshots
[fio.git] / verify.c
... / ...
CommitLineData
1/*
2 * IO verification helpers
3 */
4#include <unistd.h>
5#include <fcntl.h>
6#include <string.h>
7#include <assert.h>
8#include <pthread.h>
9#include <libgen.h>
10
11#include "fio.h"
12#include "verify.h"
13#include "trim.h"
14#include "lib/rand.h"
15#include "lib/hweight.h"
16#include "lib/pattern.h"
17
18#include "crc/md5.h"
19#include "crc/crc64.h"
20#include "crc/crc32.h"
21#include "crc/crc32c.h"
22#include "crc/crc16.h"
23#include "crc/crc7.h"
24#include "crc/sha256.h"
25#include "crc/sha512.h"
26#include "crc/sha1.h"
27#include "crc/xxhash.h"
28
29static void populate_hdr(struct thread_data *td, struct io_u *io_u,
30 struct verify_header *hdr, unsigned int header_num,
31 unsigned int header_len);
32static void fill_hdr(struct thread_data *td, struct io_u *io_u,
33 struct verify_header *hdr, unsigned int header_num,
34 unsigned int header_len, uint64_t rand_seed);
35static void __fill_hdr(struct thread_data *td, struct io_u *io_u,
36 struct verify_header *hdr, unsigned int header_num,
37 unsigned int header_len, uint64_t rand_seed);
38
39void fill_buffer_pattern(struct thread_data *td, void *p, unsigned int len)
40{
41 (void)cpy_pattern(td->o.buffer_pattern, td->o.buffer_pattern_bytes, p, len);
42}
43
44static void __fill_buffer(struct thread_options *o, unsigned long seed, void *p,
45 unsigned int len)
46{
47 __fill_random_buf_percentage(seed, p, o->compress_percentage, len, len, o->buffer_pattern, o->buffer_pattern_bytes);
48}
49
50static unsigned long fill_buffer(struct thread_data *td, void *p,
51 unsigned int len)
52{
53 struct frand_state *fs = &td->verify_state;
54 struct thread_options *o = &td->o;
55
56 return fill_random_buf_percentage(fs, p, o->compress_percentage, len, len, o->buffer_pattern, o->buffer_pattern_bytes);
57}
58
59void fill_verify_pattern(struct thread_data *td, void *p, unsigned int len,
60 struct io_u *io_u, unsigned long seed, int use_seed)
61{
62 struct thread_options *o = &td->o;
63
64 if (!o->verify_pattern_bytes) {
65 dprint(FD_VERIFY, "fill random bytes len=%u\n", len);
66
67 if (use_seed)
68 __fill_buffer(o, seed, p, len);
69 else
70 io_u->rand_seed = fill_buffer(td, p, len);
71 return;
72 }
73
74 /* Skip if we were here and we do not need to patch pattern
75 * with format */
76 if (!td->o.verify_fmt_sz && io_u->buf_filled_len >= len) {
77 dprint(FD_VERIFY, "using already filled verify pattern b=%d len=%u\n",
78 o->verify_pattern_bytes, len);
79 return;
80 }
81
82 (void)paste_format(td->o.verify_pattern, td->o.verify_pattern_bytes,
83 td->o.verify_fmt, td->o.verify_fmt_sz,
84 p, len, io_u);
85 io_u->buf_filled_len = len;
86}
87
88static unsigned int get_hdr_inc(struct thread_data *td, struct io_u *io_u)
89{
90 unsigned int hdr_inc;
91
92 hdr_inc = io_u->buflen;
93 if (td->o.verify_interval && td->o.verify_interval <= io_u->buflen)
94 hdr_inc = td->o.verify_interval;
95
96 return hdr_inc;
97}
98
99static void fill_pattern_headers(struct thread_data *td, struct io_u *io_u,
100 unsigned long seed, int use_seed)
101{
102 unsigned int hdr_inc, header_num;
103 struct verify_header *hdr;
104 void *p = io_u->buf;
105
106 fill_verify_pattern(td, p, io_u->buflen, io_u, seed, use_seed);
107
108 hdr_inc = get_hdr_inc(td, io_u);
109 header_num = 0;
110 for (; p < io_u->buf + io_u->buflen; p += hdr_inc) {
111 hdr = p;
112 populate_hdr(td, io_u, hdr, header_num, hdr_inc);
113 header_num++;
114 }
115}
116
117static void memswp(void *buf1, void *buf2, unsigned int len)
118{
119 char swap[200];
120
121 assert(len <= sizeof(swap));
122
123 memcpy(&swap, buf1, len);
124 memcpy(buf1, buf2, len);
125 memcpy(buf2, &swap, len);
126}
127
128static void hexdump(void *buffer, int len)
129{
130 unsigned char *p = buffer;
131 int i;
132
133 for (i = 0; i < len; i++)
134 log_err("%02x", p[i]);
135 log_err("\n");
136}
137
138/*
139 * Prepare for separation of verify_header and checksum header
140 */
141static inline unsigned int __hdr_size(int verify_type)
142{
143 unsigned int len = 0;
144
145 switch (verify_type) {
146 case VERIFY_NONE:
147 case VERIFY_HDR_ONLY:
148 case VERIFY_NULL:
149 case VERIFY_PATTERN:
150 len = 0;
151 break;
152 case VERIFY_MD5:
153 len = sizeof(struct vhdr_md5);
154 break;
155 case VERIFY_CRC64:
156 len = sizeof(struct vhdr_crc64);
157 break;
158 case VERIFY_CRC32C:
159 case VERIFY_CRC32:
160 case VERIFY_CRC32C_INTEL:
161 len = sizeof(struct vhdr_crc32);
162 break;
163 case VERIFY_CRC16:
164 len = sizeof(struct vhdr_crc16);
165 break;
166 case VERIFY_CRC7:
167 len = sizeof(struct vhdr_crc7);
168 break;
169 case VERIFY_SHA256:
170 len = sizeof(struct vhdr_sha256);
171 break;
172 case VERIFY_SHA512:
173 len = sizeof(struct vhdr_sha512);
174 break;
175 case VERIFY_XXHASH:
176 len = sizeof(struct vhdr_xxhash);
177 break;
178 case VERIFY_SHA1:
179 len = sizeof(struct vhdr_sha1);
180 break;
181 case VERIFY_PATTERN_NO_HDR:
182 return 0;
183 default:
184 log_err("fio: unknown verify header!\n");
185 assert(0);
186 }
187
188 return len + sizeof(struct verify_header);
189}
190
191static inline unsigned int hdr_size(struct thread_data *td,
192 struct verify_header *hdr)
193{
194 if (td->o.verify == VERIFY_PATTERN_NO_HDR)
195 return 0;
196
197 return __hdr_size(hdr->verify_type);
198}
199
200static void *hdr_priv(struct verify_header *hdr)
201{
202 void *priv = hdr;
203
204 return priv + sizeof(struct verify_header);
205}
206
207/*
208 * Verify container, pass info to verify handlers and allow them to
209 * pass info back in case of error
210 */
211struct vcont {
212 /*
213 * Input
214 */
215 struct io_u *io_u;
216 unsigned int hdr_num;
217 struct thread_data *td;
218
219 /*
220 * Output, only valid in case of error
221 */
222 const char *name;
223 void *good_crc;
224 void *bad_crc;
225 unsigned int crc_len;
226};
227
228#define DUMP_BUF_SZ 255
229static int dump_buf_warned;
230
231static void dump_buf(char *buf, unsigned int len, unsigned long long offset,
232 const char *type, struct fio_file *f)
233{
234 char *ptr, fname[DUMP_BUF_SZ];
235 size_t buf_left = DUMP_BUF_SZ;
236 int ret, fd;
237
238 ptr = strdup(f->file_name);
239
240 memset(fname, 0, sizeof(fname));
241 if (aux_path)
242 sprintf(fname, "%s%s", aux_path, FIO_OS_PATH_SEPARATOR);
243
244 strncpy(fname + strlen(fname), basename(ptr), buf_left - 1);
245
246 buf_left -= strlen(fname);
247 if (buf_left <= 0) {
248 if (!dump_buf_warned) {
249 log_err("fio: verify failure dump buffer too small\n");
250 dump_buf_warned = 1;
251 }
252 free(ptr);
253 return;
254 }
255
256 snprintf(fname + strlen(fname), buf_left, ".%llu.%s", offset, type);
257
258 fd = open(fname, O_CREAT | O_TRUNC | O_WRONLY, 0644);
259 if (fd < 0) {
260 perror("open verify buf file");
261 return;
262 }
263
264 while (len) {
265 ret = write(fd, buf, len);
266 if (!ret)
267 break;
268 else if (ret < 0) {
269 perror("write verify buf file");
270 break;
271 }
272 len -= ret;
273 buf += ret;
274 }
275
276 close(fd);
277 log_err(" %s data dumped as %s\n", type, fname);
278 free(ptr);
279}
280
281/*
282 * Dump the contents of the read block and re-generate the correct data
283 * and dump that too.
284 */
285static void __dump_verify_buffers(struct verify_header *hdr, struct vcont *vc)
286{
287 struct thread_data *td = vc->td;
288 struct io_u *io_u = vc->io_u;
289 unsigned long hdr_offset;
290 struct io_u dummy;
291 void *buf;
292
293 if (!td->o.verify_dump)
294 return;
295
296 /*
297 * Dump the contents we just read off disk
298 */
299 hdr_offset = vc->hdr_num * hdr->len;
300
301 dump_buf(io_u->buf + hdr_offset, hdr->len, io_u->offset + hdr_offset,
302 "received", vc->io_u->file);
303
304 /*
305 * Allocate a new buf and re-generate the original data
306 */
307 buf = malloc(io_u->buflen);
308 dummy = *io_u;
309 dummy.buf = buf;
310 dummy.rand_seed = hdr->rand_seed;
311 dummy.buf_filled_len = 0;
312 dummy.buflen = io_u->buflen;
313
314 fill_pattern_headers(td, &dummy, hdr->rand_seed, 1);
315
316 dump_buf(buf + hdr_offset, hdr->len, io_u->offset + hdr_offset,
317 "expected", vc->io_u->file);
318 free(buf);
319}
320
321static void dump_verify_buffers(struct verify_header *hdr, struct vcont *vc)
322{
323 struct thread_data *td = vc->td;
324 struct verify_header shdr;
325
326 if (td->o.verify == VERIFY_PATTERN_NO_HDR) {
327 __fill_hdr(td, vc->io_u, &shdr, 0, vc->io_u->buflen, 0);
328 hdr = &shdr;
329 }
330
331 __dump_verify_buffers(hdr, vc);
332}
333
334static void log_verify_failure(struct verify_header *hdr, struct vcont *vc)
335{
336 unsigned long long offset;
337
338 offset = vc->io_u->offset;
339 offset += vc->hdr_num * hdr->len;
340 log_err("%.8s: verify failed at file %s offset %llu, length %u\n",
341 vc->name, vc->io_u->file->file_name, offset, hdr->len);
342
343 if (vc->good_crc && vc->bad_crc) {
344 log_err(" Expected CRC: ");
345 hexdump(vc->good_crc, vc->crc_len);
346 log_err(" Received CRC: ");
347 hexdump(vc->bad_crc, vc->crc_len);
348 }
349
350 dump_verify_buffers(hdr, vc);
351}
352
353/*
354 * Return data area 'header_num'
355 */
356static inline void *io_u_verify_off(struct verify_header *hdr, struct vcont *vc)
357{
358 return vc->io_u->buf + vc->hdr_num * hdr->len + hdr_size(vc->td, hdr);
359}
360
361static int verify_io_u_pattern(struct verify_header *hdr, struct vcont *vc)
362{
363 struct thread_data *td = vc->td;
364 struct io_u *io_u = vc->io_u;
365 char *buf, *pattern;
366 unsigned int header_size = __hdr_size(td->o.verify);
367 unsigned int len, mod, i, pattern_size;
368 int rc;
369
370 pattern = td->o.verify_pattern;
371 pattern_size = td->o.verify_pattern_bytes;
372 assert(pattern_size != 0);
373
374 (void)paste_format_inplace(pattern, pattern_size,
375 td->o.verify_fmt, td->o.verify_fmt_sz, io_u);
376
377 buf = (void *) hdr + header_size;
378 len = get_hdr_inc(td, io_u) - header_size;
379 mod = (get_hdr_inc(td, io_u) * vc->hdr_num + header_size) % pattern_size;
380
381 rc = cmp_pattern(pattern, pattern_size, mod, buf, len);
382 if (!rc)
383 return 0;
384
385 /* Slow path, compare each byte */
386 for (i = 0; i < len; i++) {
387 if (buf[i] != pattern[mod]) {
388 unsigned int bits;
389
390 bits = hweight8(buf[i] ^ pattern[mod]);
391 log_err("fio: got pattern '%02x', wanted '%02x'. Bad bits %d\n",
392 (unsigned char)buf[i],
393 (unsigned char)pattern[mod],
394 bits);
395 log_err("fio: bad pattern block offset %u\n", i);
396 vc->name = "pattern";
397 log_verify_failure(hdr, vc);
398 return EILSEQ;
399 }
400 mod++;
401 if (mod == td->o.verify_pattern_bytes)
402 mod = 0;
403 }
404
405 /* Unreachable line */
406 assert(0);
407 return EILSEQ;
408}
409
410static int verify_io_u_xxhash(struct verify_header *hdr, struct vcont *vc)
411{
412 void *p = io_u_verify_off(hdr, vc);
413 struct vhdr_xxhash *vh = hdr_priv(hdr);
414 uint32_t hash;
415 void *state;
416
417 dprint(FD_VERIFY, "xxhash verify io_u %p, len %u\n", vc->io_u, hdr->len);
418
419 state = XXH32_init(1);
420 XXH32_update(state, p, hdr->len - hdr_size(vc->td, hdr));
421 hash = XXH32_digest(state);
422
423 if (vh->hash == hash)
424 return 0;
425
426 vc->name = "xxhash";
427 vc->good_crc = &vh->hash;
428 vc->bad_crc = &hash;
429 vc->crc_len = sizeof(hash);
430 log_verify_failure(hdr, vc);
431 return EILSEQ;
432}
433
434static int verify_io_u_sha512(struct verify_header *hdr, struct vcont *vc)
435{
436 void *p = io_u_verify_off(hdr, vc);
437 struct vhdr_sha512 *vh = hdr_priv(hdr);
438 uint8_t sha512[128];
439 struct fio_sha512_ctx sha512_ctx = {
440 .buf = sha512,
441 };
442
443 dprint(FD_VERIFY, "sha512 verify io_u %p, len %u\n", vc->io_u, hdr->len);
444
445 fio_sha512_init(&sha512_ctx);
446 fio_sha512_update(&sha512_ctx, p, hdr->len - hdr_size(vc->td, hdr));
447
448 if (!memcmp(vh->sha512, sha512_ctx.buf, sizeof(sha512)))
449 return 0;
450
451 vc->name = "sha512";
452 vc->good_crc = vh->sha512;
453 vc->bad_crc = sha512_ctx.buf;
454 vc->crc_len = sizeof(vh->sha512);
455 log_verify_failure(hdr, vc);
456 return EILSEQ;
457}
458
459static int verify_io_u_sha256(struct verify_header *hdr, struct vcont *vc)
460{
461 void *p = io_u_verify_off(hdr, vc);
462 struct vhdr_sha256 *vh = hdr_priv(hdr);
463 uint8_t sha256[64];
464 struct fio_sha256_ctx sha256_ctx = {
465 .buf = sha256,
466 };
467
468 dprint(FD_VERIFY, "sha256 verify io_u %p, len %u\n", vc->io_u, hdr->len);
469
470 fio_sha256_init(&sha256_ctx);
471 fio_sha256_update(&sha256_ctx, p, hdr->len - hdr_size(vc->td, hdr));
472 fio_sha256_final(&sha256_ctx);
473
474 if (!memcmp(vh->sha256, sha256_ctx.buf, sizeof(sha256)))
475 return 0;
476
477 vc->name = "sha256";
478 vc->good_crc = vh->sha256;
479 vc->bad_crc = sha256_ctx.buf;
480 vc->crc_len = sizeof(vh->sha256);
481 log_verify_failure(hdr, vc);
482 return EILSEQ;
483}
484
485static int verify_io_u_sha1(struct verify_header *hdr, struct vcont *vc)
486{
487 void *p = io_u_verify_off(hdr, vc);
488 struct vhdr_sha1 *vh = hdr_priv(hdr);
489 uint32_t sha1[5];
490 struct fio_sha1_ctx sha1_ctx = {
491 .H = sha1,
492 };
493
494 dprint(FD_VERIFY, "sha1 verify io_u %p, len %u\n", vc->io_u, hdr->len);
495
496 fio_sha1_init(&sha1_ctx);
497 fio_sha1_update(&sha1_ctx, p, hdr->len - hdr_size(vc->td, hdr));
498 fio_sha1_final(&sha1_ctx);
499
500 if (!memcmp(vh->sha1, sha1_ctx.H, sizeof(sha1)))
501 return 0;
502
503 vc->name = "sha1";
504 vc->good_crc = vh->sha1;
505 vc->bad_crc = sha1_ctx.H;
506 vc->crc_len = sizeof(vh->sha1);
507 log_verify_failure(hdr, vc);
508 return EILSEQ;
509}
510
511static int verify_io_u_crc7(struct verify_header *hdr, struct vcont *vc)
512{
513 void *p = io_u_verify_off(hdr, vc);
514 struct vhdr_crc7 *vh = hdr_priv(hdr);
515 unsigned char c;
516
517 dprint(FD_VERIFY, "crc7 verify io_u %p, len %u\n", vc->io_u, hdr->len);
518
519 c = fio_crc7(p, hdr->len - hdr_size(vc->td, hdr));
520
521 if (c == vh->crc7)
522 return 0;
523
524 vc->name = "crc7";
525 vc->good_crc = &vh->crc7;
526 vc->bad_crc = &c;
527 vc->crc_len = 1;
528 log_verify_failure(hdr, vc);
529 return EILSEQ;
530}
531
532static int verify_io_u_crc16(struct verify_header *hdr, struct vcont *vc)
533{
534 void *p = io_u_verify_off(hdr, vc);
535 struct vhdr_crc16 *vh = hdr_priv(hdr);
536 unsigned short c;
537
538 dprint(FD_VERIFY, "crc16 verify io_u %p, len %u\n", vc->io_u, hdr->len);
539
540 c = fio_crc16(p, hdr->len - hdr_size(vc->td, hdr));
541
542 if (c == vh->crc16)
543 return 0;
544
545 vc->name = "crc16";
546 vc->good_crc = &vh->crc16;
547 vc->bad_crc = &c;
548 vc->crc_len = 2;
549 log_verify_failure(hdr, vc);
550 return EILSEQ;
551}
552
553static int verify_io_u_crc64(struct verify_header *hdr, struct vcont *vc)
554{
555 void *p = io_u_verify_off(hdr, vc);
556 struct vhdr_crc64 *vh = hdr_priv(hdr);
557 unsigned long long c;
558
559 dprint(FD_VERIFY, "crc64 verify io_u %p, len %u\n", vc->io_u, hdr->len);
560
561 c = fio_crc64(p, hdr->len - hdr_size(vc->td, hdr));
562
563 if (c == vh->crc64)
564 return 0;
565
566 vc->name = "crc64";
567 vc->good_crc = &vh->crc64;
568 vc->bad_crc = &c;
569 vc->crc_len = 8;
570 log_verify_failure(hdr, vc);
571 return EILSEQ;
572}
573
574static int verify_io_u_crc32(struct verify_header *hdr, struct vcont *vc)
575{
576 void *p = io_u_verify_off(hdr, vc);
577 struct vhdr_crc32 *vh = hdr_priv(hdr);
578 uint32_t c;
579
580 dprint(FD_VERIFY, "crc32 verify io_u %p, len %u\n", vc->io_u, hdr->len);
581
582 c = fio_crc32(p, hdr->len - hdr_size(vc->td, hdr));
583
584 if (c == vh->crc32)
585 return 0;
586
587 vc->name = "crc32";
588 vc->good_crc = &vh->crc32;
589 vc->bad_crc = &c;
590 vc->crc_len = 4;
591 log_verify_failure(hdr, vc);
592 return EILSEQ;
593}
594
595static int verify_io_u_crc32c(struct verify_header *hdr, struct vcont *vc)
596{
597 void *p = io_u_verify_off(hdr, vc);
598 struct vhdr_crc32 *vh = hdr_priv(hdr);
599 uint32_t c;
600
601 dprint(FD_VERIFY, "crc32c verify io_u %p, len %u\n", vc->io_u, hdr->len);
602
603 c = fio_crc32c(p, hdr->len - hdr_size(vc->td, hdr));
604
605 if (c == vh->crc32)
606 return 0;
607
608 vc->name = "crc32c";
609 vc->good_crc = &vh->crc32;
610 vc->bad_crc = &c;
611 vc->crc_len = 4;
612 log_verify_failure(hdr, vc);
613 return EILSEQ;
614}
615
616static int verify_io_u_md5(struct verify_header *hdr, struct vcont *vc)
617{
618 void *p = io_u_verify_off(hdr, vc);
619 struct vhdr_md5 *vh = hdr_priv(hdr);
620 uint32_t hash[MD5_HASH_WORDS];
621 struct fio_md5_ctx md5_ctx = {
622 .hash = hash,
623 };
624
625 dprint(FD_VERIFY, "md5 verify io_u %p, len %u\n", vc->io_u, hdr->len);
626
627 fio_md5_init(&md5_ctx);
628 fio_md5_update(&md5_ctx, p, hdr->len - hdr_size(vc->td, hdr));
629 fio_md5_final(&md5_ctx);
630
631 if (!memcmp(vh->md5_digest, md5_ctx.hash, sizeof(hash)))
632 return 0;
633
634 vc->name = "md5";
635 vc->good_crc = vh->md5_digest;
636 vc->bad_crc = md5_ctx.hash;
637 vc->crc_len = sizeof(hash);
638 log_verify_failure(hdr, vc);
639 return EILSEQ;
640}
641
642/*
643 * Push IO verification to a separate thread
644 */
645int verify_io_u_async(struct thread_data *td, struct io_u **io_u_ptr)
646{
647 struct io_u *io_u = *io_u_ptr;
648
649 pthread_mutex_lock(&td->io_u_lock);
650
651 if (io_u->file)
652 put_file_log(td, io_u->file);
653
654 if (io_u->flags & IO_U_F_IN_CUR_DEPTH) {
655 td->cur_depth--;
656 io_u_clear(td, io_u, IO_U_F_IN_CUR_DEPTH);
657 }
658 flist_add_tail(&io_u->verify_list, &td->verify_list);
659 *io_u_ptr = NULL;
660 pthread_mutex_unlock(&td->io_u_lock);
661
662 pthread_cond_signal(&td->verify_cond);
663 return 0;
664}
665
666/*
667 * Thanks Rusty, for spending the time so I don't have to.
668 *
669 * http://rusty.ozlabs.org/?p=560
670 */
671static int mem_is_zero(const void *data, size_t length)
672{
673 const unsigned char *p = data;
674 size_t len;
675
676 /* Check first 16 bytes manually */
677 for (len = 0; len < 16; len++) {
678 if (!length)
679 return 1;
680 if (*p)
681 return 0;
682 p++;
683 length--;
684 }
685
686 /* Now we know that's zero, memcmp with self. */
687 return memcmp(data, p, length) == 0;
688}
689
690static int mem_is_zero_slow(const void *data, size_t length, size_t *offset)
691{
692 const unsigned char *p = data;
693
694 *offset = 0;
695 while (length) {
696 if (*p)
697 break;
698 (*offset)++;
699 length--;
700 p++;
701 }
702
703 return !length;
704}
705
706static int verify_trimmed_io_u(struct thread_data *td, struct io_u *io_u)
707{
708 size_t offset;
709
710 if (!td->o.trim_zero)
711 return 0;
712
713 if (mem_is_zero(io_u->buf, io_u->buflen))
714 return 0;
715
716 mem_is_zero_slow(io_u->buf, io_u->buflen, &offset);
717
718 log_err("trim: verify failed at file %s offset %llu, length %lu"
719 ", block offset %lu\n",
720 io_u->file->file_name, io_u->offset, io_u->buflen,
721 (unsigned long) offset);
722 return EILSEQ;
723}
724
725static int verify_header(struct io_u *io_u, struct thread_data *td,
726 struct verify_header *hdr, unsigned int hdr_num,
727 unsigned int hdr_len)
728{
729 void *p = hdr;
730 uint32_t crc;
731
732 if (hdr->magic != FIO_HDR_MAGIC) {
733 log_err("verify: bad magic header %x, wanted %x",
734 hdr->magic, FIO_HDR_MAGIC);
735 goto err;
736 }
737 if (hdr->len != hdr_len) {
738 log_err("verify: bad header length %u, wanted %u",
739 hdr->len, hdr_len);
740 goto err;
741 }
742 if (hdr->rand_seed != io_u->rand_seed) {
743 log_err("verify: bad header rand_seed %"PRIu64
744 ", wanted %"PRIu64,
745 hdr->rand_seed, io_u->rand_seed);
746 goto err;
747 }
748 if (hdr->offset != io_u->offset + hdr_num * td->o.verify_interval) {
749 log_err("verify: bad header offset %"PRIu64
750 ", wanted %llu",
751 hdr->offset, io_u->offset);
752 goto err;
753 }
754
755 /*
756 * For read-only workloads, the program cannot be certain of the
757 * last numberio written to a block. Checking of numberio will be
758 * done only for workloads that write data. For verify_only,
759 * numberio will be checked in the last iteration when the correct
760 * state of numberio, that would have been written to each block
761 * in a previous run of fio, has been reached.
762 */
763 if ((td_write(td) || td_rw(td)) && (td_min_bs(td) == td_max_bs(td)) &&
764 !td->o.time_based)
765 if (!td->o.verify_only || td->o.loops == 0)
766 if (hdr->numberio != io_u->numberio) {
767 log_err("verify: bad header numberio %"PRIu16
768 ", wanted %"PRIu16,
769 hdr->numberio, io_u->numberio);
770 goto err;
771 }
772
773 crc = fio_crc32c(p, offsetof(struct verify_header, crc32));
774 if (crc != hdr->crc32) {
775 log_err("verify: bad header crc %x, calculated %x",
776 hdr->crc32, crc);
777 goto err;
778 }
779 return 0;
780
781err:
782 log_err(" at file %s offset %llu, length %u\n",
783 io_u->file->file_name,
784 io_u->offset + hdr_num * hdr_len, hdr_len);
785
786 if (td->o.verify_dump)
787 dump_buf(p, hdr_len, io_u->offset + hdr_num * hdr_len,
788 "hdr_fail", io_u->file);
789
790 return EILSEQ;
791}
792
793int verify_io_u(struct thread_data *td, struct io_u **io_u_ptr)
794{
795 struct verify_header *hdr;
796 struct io_u *io_u = *io_u_ptr;
797 unsigned int header_size, hdr_inc, hdr_num = 0;
798 void *p;
799 int ret;
800
801 if (td->o.verify == VERIFY_NULL || io_u->ddir != DDIR_READ)
802 return 0;
803 /*
804 * If the IO engine is faking IO (like null), then just pretend
805 * we verified everything.
806 */
807 if (td_ioengine_flagged(td, FIO_FAKEIO))
808 return 0;
809
810 if (io_u->flags & IO_U_F_TRIMMED) {
811 ret = verify_trimmed_io_u(td, io_u);
812 goto done;
813 }
814
815 hdr_inc = get_hdr_inc(td, io_u);
816
817 ret = 0;
818 for (p = io_u->buf; p < io_u->buf + io_u->buflen;
819 p += hdr_inc, hdr_num++) {
820 struct vcont vc = {
821 .io_u = io_u,
822 .hdr_num = hdr_num,
823 .td = td,
824 };
825 unsigned int verify_type;
826
827 if (ret && td->o.verify_fatal)
828 break;
829
830 header_size = __hdr_size(td->o.verify);
831 if (td->o.verify_offset)
832 memswp(p, p + td->o.verify_offset, header_size);
833 hdr = p;
834
835 /*
836 * Make rand_seed check pass when have verifysort or
837 * verify_backlog.
838 */
839 if (td->o.verifysort || (td->flags & TD_F_VER_BACKLOG))
840 io_u->rand_seed = hdr->rand_seed;
841
842 if (td->o.verify != VERIFY_PATTERN_NO_HDR) {
843 ret = verify_header(io_u, td, hdr, hdr_num, hdr_inc);
844 if (ret)
845 return ret;
846 }
847
848 if (td->o.verify != VERIFY_NONE)
849 verify_type = td->o.verify;
850 else
851 verify_type = hdr->verify_type;
852
853 switch (verify_type) {
854 case VERIFY_HDR_ONLY:
855 /* Header is always verified, check if pattern is left
856 * for verification. */
857 if (td->o.verify_pattern_bytes)
858 ret = verify_io_u_pattern(hdr, &vc);
859 break;
860 case VERIFY_MD5:
861 ret = verify_io_u_md5(hdr, &vc);
862 break;
863 case VERIFY_CRC64:
864 ret = verify_io_u_crc64(hdr, &vc);
865 break;
866 case VERIFY_CRC32C:
867 case VERIFY_CRC32C_INTEL:
868 ret = verify_io_u_crc32c(hdr, &vc);
869 break;
870 case VERIFY_CRC32:
871 ret = verify_io_u_crc32(hdr, &vc);
872 break;
873 case VERIFY_CRC16:
874 ret = verify_io_u_crc16(hdr, &vc);
875 break;
876 case VERIFY_CRC7:
877 ret = verify_io_u_crc7(hdr, &vc);
878 break;
879 case VERIFY_SHA256:
880 ret = verify_io_u_sha256(hdr, &vc);
881 break;
882 case VERIFY_SHA512:
883 ret = verify_io_u_sha512(hdr, &vc);
884 break;
885 case VERIFY_XXHASH:
886 ret = verify_io_u_xxhash(hdr, &vc);
887 break;
888 case VERIFY_SHA1:
889 ret = verify_io_u_sha1(hdr, &vc);
890 break;
891 case VERIFY_PATTERN:
892 case VERIFY_PATTERN_NO_HDR:
893 ret = verify_io_u_pattern(hdr, &vc);
894 break;
895 default:
896 log_err("Bad verify type %u\n", hdr->verify_type);
897 ret = EINVAL;
898 }
899
900 if (ret && verify_type != hdr->verify_type)
901 log_err("fio: verify type mismatch (%u media, %u given)\n",
902 hdr->verify_type, verify_type);
903 }
904
905done:
906 if (ret && td->o.verify_fatal)
907 fio_mark_td_terminate(td);
908
909 return ret;
910}
911
912static void fill_xxhash(struct verify_header *hdr, void *p, unsigned int len)
913{
914 struct vhdr_xxhash *vh = hdr_priv(hdr);
915 void *state;
916
917 state = XXH32_init(1);
918 XXH32_update(state, p, len);
919 vh->hash = XXH32_digest(state);
920}
921
922static void fill_sha512(struct verify_header *hdr, void *p, unsigned int len)
923{
924 struct vhdr_sha512 *vh = hdr_priv(hdr);
925 struct fio_sha512_ctx sha512_ctx = {
926 .buf = vh->sha512,
927 };
928
929 fio_sha512_init(&sha512_ctx);
930 fio_sha512_update(&sha512_ctx, p, len);
931}
932
933static void fill_sha256(struct verify_header *hdr, void *p, unsigned int len)
934{
935 struct vhdr_sha256 *vh = hdr_priv(hdr);
936 struct fio_sha256_ctx sha256_ctx = {
937 .buf = vh->sha256,
938 };
939
940 fio_sha256_init(&sha256_ctx);
941 fio_sha256_update(&sha256_ctx, p, len);
942 fio_sha256_final(&sha256_ctx);
943}
944
945static void fill_sha1(struct verify_header *hdr, void *p, unsigned int len)
946{
947 struct vhdr_sha1 *vh = hdr_priv(hdr);
948 struct fio_sha1_ctx sha1_ctx = {
949 .H = vh->sha1,
950 };
951
952 fio_sha1_init(&sha1_ctx);
953 fio_sha1_update(&sha1_ctx, p, len);
954 fio_sha1_final(&sha1_ctx);
955}
956
957static void fill_crc7(struct verify_header *hdr, void *p, unsigned int len)
958{
959 struct vhdr_crc7 *vh = hdr_priv(hdr);
960
961 vh->crc7 = fio_crc7(p, len);
962}
963
964static void fill_crc16(struct verify_header *hdr, void *p, unsigned int len)
965{
966 struct vhdr_crc16 *vh = hdr_priv(hdr);
967
968 vh->crc16 = fio_crc16(p, len);
969}
970
971static void fill_crc32(struct verify_header *hdr, void *p, unsigned int len)
972{
973 struct vhdr_crc32 *vh = hdr_priv(hdr);
974
975 vh->crc32 = fio_crc32(p, len);
976}
977
978static void fill_crc32c(struct verify_header *hdr, void *p, unsigned int len)
979{
980 struct vhdr_crc32 *vh = hdr_priv(hdr);
981
982 vh->crc32 = fio_crc32c(p, len);
983}
984
985static void fill_crc64(struct verify_header *hdr, void *p, unsigned int len)
986{
987 struct vhdr_crc64 *vh = hdr_priv(hdr);
988
989 vh->crc64 = fio_crc64(p, len);
990}
991
992static void fill_md5(struct verify_header *hdr, void *p, unsigned int len)
993{
994 struct vhdr_md5 *vh = hdr_priv(hdr);
995 struct fio_md5_ctx md5_ctx = {
996 .hash = (uint32_t *) vh->md5_digest,
997 };
998
999 fio_md5_init(&md5_ctx);
1000 fio_md5_update(&md5_ctx, p, len);
1001 fio_md5_final(&md5_ctx);
1002}
1003
1004static void __fill_hdr(struct thread_data *td, struct io_u *io_u,
1005 struct verify_header *hdr, unsigned int header_num,
1006 unsigned int header_len, uint64_t rand_seed)
1007{
1008 void *p = hdr;
1009
1010 hdr->magic = FIO_HDR_MAGIC;
1011 hdr->verify_type = td->o.verify;
1012 hdr->len = header_len;
1013 hdr->rand_seed = rand_seed;
1014 hdr->offset = io_u->offset + header_num * td->o.verify_interval;
1015 hdr->time_sec = io_u->start_time.tv_sec;
1016 hdr->time_usec = io_u->start_time.tv_usec;
1017 hdr->thread = td->thread_number;
1018 hdr->numberio = io_u->numberio;
1019 hdr->crc32 = fio_crc32c(p, offsetof(struct verify_header, crc32));
1020}
1021
1022
1023static void fill_hdr(struct thread_data *td, struct io_u *io_u,
1024 struct verify_header *hdr, unsigned int header_num,
1025 unsigned int header_len, uint64_t rand_seed)
1026{
1027
1028 if (td->o.verify != VERIFY_PATTERN_NO_HDR)
1029 __fill_hdr(td, io_u, hdr, header_num, header_len, rand_seed);
1030}
1031
1032static void populate_hdr(struct thread_data *td, struct io_u *io_u,
1033 struct verify_header *hdr, unsigned int header_num,
1034 unsigned int header_len)
1035{
1036 unsigned int data_len;
1037 void *data, *p;
1038
1039 p = (void *) hdr;
1040
1041 fill_hdr(td, io_u, hdr, header_num, header_len, io_u->rand_seed);
1042
1043 data_len = header_len - hdr_size(td, hdr);
1044
1045 data = p + hdr_size(td, hdr);
1046 switch (td->o.verify) {
1047 case VERIFY_MD5:
1048 dprint(FD_VERIFY, "fill md5 io_u %p, len %u\n",
1049 io_u, hdr->len);
1050 fill_md5(hdr, data, data_len);
1051 break;
1052 case VERIFY_CRC64:
1053 dprint(FD_VERIFY, "fill crc64 io_u %p, len %u\n",
1054 io_u, hdr->len);
1055 fill_crc64(hdr, data, data_len);
1056 break;
1057 case VERIFY_CRC32C:
1058 case VERIFY_CRC32C_INTEL:
1059 dprint(FD_VERIFY, "fill crc32c io_u %p, len %u\n",
1060 io_u, hdr->len);
1061 fill_crc32c(hdr, data, data_len);
1062 break;
1063 case VERIFY_CRC32:
1064 dprint(FD_VERIFY, "fill crc32 io_u %p, len %u\n",
1065 io_u, hdr->len);
1066 fill_crc32(hdr, data, data_len);
1067 break;
1068 case VERIFY_CRC16:
1069 dprint(FD_VERIFY, "fill crc16 io_u %p, len %u\n",
1070 io_u, hdr->len);
1071 fill_crc16(hdr, data, data_len);
1072 break;
1073 case VERIFY_CRC7:
1074 dprint(FD_VERIFY, "fill crc7 io_u %p, len %u\n",
1075 io_u, hdr->len);
1076 fill_crc7(hdr, data, data_len);
1077 break;
1078 case VERIFY_SHA256:
1079 dprint(FD_VERIFY, "fill sha256 io_u %p, len %u\n",
1080 io_u, hdr->len);
1081 fill_sha256(hdr, data, data_len);
1082 break;
1083 case VERIFY_SHA512:
1084 dprint(FD_VERIFY, "fill sha512 io_u %p, len %u\n",
1085 io_u, hdr->len);
1086 fill_sha512(hdr, data, data_len);
1087 break;
1088 case VERIFY_XXHASH:
1089 dprint(FD_VERIFY, "fill xxhash io_u %p, len %u\n",
1090 io_u, hdr->len);
1091 fill_xxhash(hdr, data, data_len);
1092 break;
1093 case VERIFY_SHA1:
1094 dprint(FD_VERIFY, "fill sha1 io_u %p, len %u\n",
1095 io_u, hdr->len);
1096 fill_sha1(hdr, data, data_len);
1097 break;
1098 case VERIFY_HDR_ONLY:
1099 case VERIFY_PATTERN:
1100 case VERIFY_PATTERN_NO_HDR:
1101 /* nothing to do here */
1102 break;
1103 default:
1104 log_err("fio: bad verify type: %d\n", td->o.verify);
1105 assert(0);
1106 }
1107
1108 if (td->o.verify_offset && hdr_size(td, hdr))
1109 memswp(p, p + td->o.verify_offset, hdr_size(td, hdr));
1110}
1111
1112/*
1113 * fill body of io_u->buf with random data and add a header with the
1114 * checksum of choice
1115 */
1116void populate_verify_io_u(struct thread_data *td, struct io_u *io_u)
1117{
1118 if (td->o.verify == VERIFY_NULL)
1119 return;
1120
1121 io_u->numberio = td->io_issues[io_u->ddir];
1122
1123 fill_pattern_headers(td, io_u, 0, 0);
1124}
1125
1126int get_next_verify(struct thread_data *td, struct io_u *io_u)
1127{
1128 struct io_piece *ipo = NULL;
1129
1130 /*
1131 * this io_u is from a requeue, we already filled the offsets
1132 */
1133 if (io_u->file)
1134 return 0;
1135
1136 if (!RB_EMPTY_ROOT(&td->io_hist_tree)) {
1137 struct rb_node *n = rb_first(&td->io_hist_tree);
1138
1139 ipo = rb_entry(n, struct io_piece, rb_node);
1140
1141 /*
1142 * Ensure that the associated IO has completed
1143 */
1144 read_barrier();
1145 if (ipo->flags & IP_F_IN_FLIGHT)
1146 goto nothing;
1147
1148 rb_erase(n, &td->io_hist_tree);
1149 assert(ipo->flags & IP_F_ONRB);
1150 ipo->flags &= ~IP_F_ONRB;
1151 } else if (!flist_empty(&td->io_hist_list)) {
1152 ipo = flist_first_entry(&td->io_hist_list, struct io_piece, list);
1153
1154 /*
1155 * Ensure that the associated IO has completed
1156 */
1157 read_barrier();
1158 if (ipo->flags & IP_F_IN_FLIGHT)
1159 goto nothing;
1160
1161 flist_del(&ipo->list);
1162 assert(ipo->flags & IP_F_ONLIST);
1163 ipo->flags &= ~IP_F_ONLIST;
1164 }
1165
1166 if (ipo) {
1167 td->io_hist_len--;
1168
1169 io_u->offset = ipo->offset;
1170 io_u->buflen = ipo->len;
1171 io_u->numberio = ipo->numberio;
1172 io_u->file = ipo->file;
1173 io_u_set(td, io_u, IO_U_F_VER_LIST);
1174
1175 if (ipo->flags & IP_F_TRIMMED)
1176 io_u_set(td, io_u, IO_U_F_TRIMMED);
1177
1178 if (!fio_file_open(io_u->file)) {
1179 int r = td_io_open_file(td, io_u->file);
1180
1181 if (r) {
1182 dprint(FD_VERIFY, "failed file %s open\n",
1183 io_u->file->file_name);
1184 return 1;
1185 }
1186 }
1187
1188 get_file(ipo->file);
1189 assert(fio_file_open(io_u->file));
1190 io_u->ddir = DDIR_READ;
1191 io_u->xfer_buf = io_u->buf;
1192 io_u->xfer_buflen = io_u->buflen;
1193
1194 remove_trim_entry(td, ipo);
1195 free(ipo);
1196 dprint(FD_VERIFY, "get_next_verify: ret io_u %p\n", io_u);
1197
1198 if (!td->o.verify_pattern_bytes) {
1199 io_u->rand_seed = __rand(&td->verify_state);
1200 if (sizeof(int) != sizeof(long *))
1201 io_u->rand_seed *= __rand(&td->verify_state);
1202 }
1203 return 0;
1204 }
1205
1206nothing:
1207 dprint(FD_VERIFY, "get_next_verify: empty\n");
1208 return 1;
1209}
1210
1211void fio_verify_init(struct thread_data *td)
1212{
1213 if (td->o.verify == VERIFY_CRC32C_INTEL ||
1214 td->o.verify == VERIFY_CRC32C_ARM64 ||
1215 td->o.verify == VERIFY_CRC32C) {
1216 crc32c_arm64_probe();
1217 crc32c_intel_probe();
1218 }
1219}
1220
1221static void *verify_async_thread(void *data)
1222{
1223 struct thread_data *td = data;
1224 struct io_u *io_u;
1225 int ret = 0;
1226
1227 if (fio_option_is_set(&td->o, verify_cpumask) &&
1228 fio_setaffinity(td->pid, td->o.verify_cpumask)) {
1229 log_err("fio: failed setting verify thread affinity\n");
1230 goto done;
1231 }
1232
1233 do {
1234 FLIST_HEAD(list);
1235
1236 read_barrier();
1237 if (td->verify_thread_exit)
1238 break;
1239
1240 pthread_mutex_lock(&td->io_u_lock);
1241
1242 while (flist_empty(&td->verify_list) &&
1243 !td->verify_thread_exit) {
1244 ret = pthread_cond_wait(&td->verify_cond,
1245 &td->io_u_lock);
1246 if (ret) {
1247 pthread_mutex_unlock(&td->io_u_lock);
1248 break;
1249 }
1250 }
1251
1252 flist_splice_init(&td->verify_list, &list);
1253 pthread_mutex_unlock(&td->io_u_lock);
1254
1255 if (flist_empty(&list))
1256 continue;
1257
1258 while (!flist_empty(&list)) {
1259 io_u = flist_first_entry(&list, struct io_u, verify_list);
1260 flist_del_init(&io_u->verify_list);
1261
1262 io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
1263 ret = verify_io_u(td, &io_u);
1264
1265 put_io_u(td, io_u);
1266 if (!ret)
1267 continue;
1268 if (td_non_fatal_error(td, ERROR_TYPE_VERIFY_BIT, ret)) {
1269 update_error_count(td, ret);
1270 td_clear_error(td);
1271 ret = 0;
1272 }
1273 }
1274 } while (!ret);
1275
1276 if (ret) {
1277 td_verror(td, ret, "async_verify");
1278 if (td->o.verify_fatal)
1279 fio_mark_td_terminate(td);
1280 }
1281
1282done:
1283 pthread_mutex_lock(&td->io_u_lock);
1284 td->nr_verify_threads--;
1285 pthread_mutex_unlock(&td->io_u_lock);
1286
1287 pthread_cond_signal(&td->free_cond);
1288 return NULL;
1289}
1290
1291int verify_async_init(struct thread_data *td)
1292{
1293 int i, ret;
1294 pthread_attr_t attr;
1295
1296 pthread_attr_init(&attr);
1297 pthread_attr_setstacksize(&attr, 2 * PTHREAD_STACK_MIN);
1298
1299 td->verify_thread_exit = 0;
1300
1301 td->verify_threads = malloc(sizeof(pthread_t) * td->o.verify_async);
1302 for (i = 0; i < td->o.verify_async; i++) {
1303 ret = pthread_create(&td->verify_threads[i], &attr,
1304 verify_async_thread, td);
1305 if (ret) {
1306 log_err("fio: async verify creation failed: %s\n",
1307 strerror(ret));
1308 break;
1309 }
1310 ret = pthread_detach(td->verify_threads[i]);
1311 if (ret) {
1312 log_err("fio: async verify thread detach failed: %s\n",
1313 strerror(ret));
1314 break;
1315 }
1316 td->nr_verify_threads++;
1317 }
1318
1319 pthread_attr_destroy(&attr);
1320
1321 if (i != td->o.verify_async) {
1322 log_err("fio: only %d verify threads started, exiting\n", i);
1323 td->verify_thread_exit = 1;
1324 write_barrier();
1325 pthread_cond_broadcast(&td->verify_cond);
1326 return 1;
1327 }
1328
1329 return 0;
1330}
1331
1332void verify_async_exit(struct thread_data *td)
1333{
1334 td->verify_thread_exit = 1;
1335 write_barrier();
1336 pthread_cond_broadcast(&td->verify_cond);
1337
1338 pthread_mutex_lock(&td->io_u_lock);
1339
1340 while (td->nr_verify_threads)
1341 pthread_cond_wait(&td->free_cond, &td->io_u_lock);
1342
1343 pthread_mutex_unlock(&td->io_u_lock);
1344 free(td->verify_threads);
1345 td->verify_threads = NULL;
1346}
1347
1348int paste_blockoff(char *buf, unsigned int len, void *priv)
1349{
1350 struct io_u *io = priv;
1351 unsigned long long off;
1352
1353 typecheck(typeof(off), io->offset);
1354 off = cpu_to_le64((uint64_t)io->offset);
1355 len = min(len, (unsigned int)sizeof(off));
1356 memcpy(buf, &off, len);
1357 return 0;
1358}
1359
1360static int __fill_file_completions(struct thread_data *td,
1361 struct thread_io_list *s,
1362 struct fio_file *f, unsigned int *index)
1363{
1364 unsigned int comps;
1365 int i, j;
1366
1367 if (!f->last_write_comp)
1368 return 0;
1369
1370 if (td->io_blocks[DDIR_WRITE] < td->o.iodepth)
1371 comps = td->io_blocks[DDIR_WRITE];
1372 else
1373 comps = td->o.iodepth;
1374
1375 j = f->last_write_idx - 1;
1376 for (i = 0; i < comps; i++) {
1377 if (j == -1)
1378 j = td->o.iodepth - 1;
1379 s->comps[*index].fileno = __cpu_to_le64(f->fileno);
1380 s->comps[*index].offset = cpu_to_le64(f->last_write_comp[j]);
1381 (*index)++;
1382 j--;
1383 }
1384
1385 return comps;
1386}
1387
1388static int fill_file_completions(struct thread_data *td,
1389 struct thread_io_list *s, unsigned int *index)
1390{
1391 struct fio_file *f;
1392 unsigned int i;
1393 int comps = 0;
1394
1395 for_each_file(td, f, i)
1396 comps += __fill_file_completions(td, s, f, index);
1397
1398 return comps;
1399}
1400
1401struct all_io_list *get_all_io_list(int save_mask, size_t *sz)
1402{
1403 struct all_io_list *rep;
1404 struct thread_data *td;
1405 size_t depth;
1406 void *next;
1407 int i, nr;
1408
1409 compiletime_assert(sizeof(struct all_io_list) == 8, "all_io_list");
1410
1411 /*
1412 * Calculate reply space needed. We need one 'io_state' per thread,
1413 * and the size will vary depending on depth.
1414 */
1415 depth = 0;
1416 nr = 0;
1417 for_each_td(td, i) {
1418 if (save_mask != IO_LIST_ALL && (i + 1) != save_mask)
1419 continue;
1420 td->stop_io = 1;
1421 td->flags |= TD_F_VSTATE_SAVED;
1422 depth += (td->o.iodepth * td->o.nr_files);
1423 nr++;
1424 }
1425
1426 if (!nr)
1427 return NULL;
1428
1429 *sz = sizeof(*rep);
1430 *sz += nr * sizeof(struct thread_io_list);
1431 *sz += depth * sizeof(struct file_comp);
1432 rep = malloc(*sz);
1433 memset(rep, 0, *sz);
1434
1435 rep->threads = cpu_to_le64((uint64_t) nr);
1436
1437 next = &rep->state[0];
1438 for_each_td(td, i) {
1439 struct thread_io_list *s = next;
1440 unsigned int comps, index = 0;
1441
1442 if (save_mask != IO_LIST_ALL && (i + 1) != save_mask)
1443 continue;
1444
1445 comps = fill_file_completions(td, s, &index);
1446
1447 s->no_comps = cpu_to_le64((uint64_t) comps);
1448 s->depth = cpu_to_le64((uint64_t) td->o.iodepth);
1449 s->nofiles = cpu_to_le64((uint64_t) td->o.nr_files);
1450 s->numberio = cpu_to_le64((uint64_t) td->io_issues[DDIR_WRITE]);
1451 s->index = cpu_to_le64((uint64_t) i);
1452 if (td->random_state.use64) {
1453 s->rand.state64.s[0] = cpu_to_le64(td->random_state.state64.s1);
1454 s->rand.state64.s[1] = cpu_to_le64(td->random_state.state64.s2);
1455 s->rand.state64.s[2] = cpu_to_le64(td->random_state.state64.s3);
1456 s->rand.state64.s[3] = cpu_to_le64(td->random_state.state64.s4);
1457 s->rand.state64.s[4] = cpu_to_le64(td->random_state.state64.s5);
1458 s->rand.state64.s[5] = 0;
1459 s->rand.use64 = cpu_to_le64((uint64_t)1);
1460 } else {
1461 s->rand.state32.s[0] = cpu_to_le32(td->random_state.state32.s1);
1462 s->rand.state32.s[1] = cpu_to_le32(td->random_state.state32.s2);
1463 s->rand.state32.s[2] = cpu_to_le32(td->random_state.state32.s3);
1464 s->rand.state32.s[3] = 0;
1465 s->rand.use64 = 0;
1466 }
1467 s->name[sizeof(s->name) - 1] = '\0';
1468 strncpy((char *) s->name, td->o.name, sizeof(s->name) - 1);
1469 next = io_list_next(s);
1470 }
1471
1472 return rep;
1473}
1474
1475static int open_state_file(const char *name, const char *prefix, int num,
1476 int for_write)
1477{
1478 char out[PATH_MAX];
1479 int flags;
1480 int fd;
1481
1482 if (for_write)
1483 flags = O_CREAT | O_TRUNC | O_WRONLY | O_SYNC;
1484 else
1485 flags = O_RDONLY;
1486
1487 verify_state_gen_name(out, sizeof(out), name, prefix, num);
1488
1489 fd = open(out, flags, 0644);
1490 if (fd == -1) {
1491 perror("fio: open state file");
1492 log_err("fio: state file: %s (for_write=%d)\n", out, for_write);
1493 return -1;
1494 }
1495
1496 return fd;
1497}
1498
1499static int write_thread_list_state(struct thread_io_list *s,
1500 const char *prefix)
1501{
1502 struct verify_state_hdr hdr;
1503 uint64_t crc;
1504 ssize_t ret;
1505 int fd;
1506
1507 fd = open_state_file((const char *) s->name, prefix, s->index, 1);
1508 if (fd == -1)
1509 return 1;
1510
1511 crc = fio_crc32c((void *)s, thread_io_list_sz(s));
1512
1513 hdr.version = cpu_to_le64((uint64_t) VSTATE_HDR_VERSION);
1514 hdr.size = cpu_to_le64((uint64_t) thread_io_list_sz(s));
1515 hdr.crc = cpu_to_le64(crc);
1516 ret = write(fd, &hdr, sizeof(hdr));
1517 if (ret != sizeof(hdr))
1518 goto write_fail;
1519
1520 ret = write(fd, s, thread_io_list_sz(s));
1521 if (ret != thread_io_list_sz(s)) {
1522write_fail:
1523 if (ret < 0)
1524 perror("fio: write state file");
1525 log_err("fio: failed to write state file\n");
1526 ret = 1;
1527 } else
1528 ret = 0;
1529
1530 close(fd);
1531 return ret;
1532}
1533
1534void __verify_save_state(struct all_io_list *state, const char *prefix)
1535{
1536 struct thread_io_list *s = &state->state[0];
1537 unsigned int i;
1538
1539 for (i = 0; i < le64_to_cpu(state->threads); i++) {
1540 write_thread_list_state(s, prefix);
1541 s = io_list_next(s);
1542 }
1543}
1544
1545void verify_save_state(int mask)
1546{
1547 struct all_io_list *state;
1548 size_t sz;
1549
1550 state = get_all_io_list(mask, &sz);
1551 if (state) {
1552 char prefix[PATH_MAX];
1553
1554 if (aux_path)
1555 sprintf(prefix, "%s%slocal", aux_path, FIO_OS_PATH_SEPARATOR);
1556 else
1557 strcpy(prefix, "local");
1558
1559 __verify_save_state(state, prefix);
1560 free(state);
1561 }
1562}
1563
1564void verify_free_state(struct thread_data *td)
1565{
1566 if (td->vstate)
1567 free(td->vstate);
1568}
1569
1570void verify_assign_state(struct thread_data *td, void *p)
1571{
1572 struct thread_io_list *s = p;
1573 int i;
1574
1575 s->no_comps = le64_to_cpu(s->no_comps);
1576 s->depth = le32_to_cpu(s->depth);
1577 s->nofiles = le32_to_cpu(s->nofiles);
1578 s->numberio = le64_to_cpu(s->numberio);
1579 s->rand.use64 = le64_to_cpu(s->rand.use64);
1580
1581 if (s->rand.use64) {
1582 for (i = 0; i < 6; i++)
1583 s->rand.state64.s[i] = le64_to_cpu(s->rand.state64.s[i]);
1584 } else {
1585 for (i = 0; i < 4; i++)
1586 s->rand.state32.s[i] = le32_to_cpu(s->rand.state32.s[i]);
1587 }
1588
1589 for (i = 0; i < s->no_comps; i++) {
1590 s->comps[i].fileno = le64_to_cpu(s->comps[i].fileno);
1591 s->comps[i].offset = le64_to_cpu(s->comps[i].offset);
1592 }
1593
1594 td->vstate = p;
1595}
1596
1597int verify_state_hdr(struct verify_state_hdr *hdr, struct thread_io_list *s)
1598{
1599 uint64_t crc;
1600
1601 hdr->version = le64_to_cpu(hdr->version);
1602 hdr->size = le64_to_cpu(hdr->size);
1603 hdr->crc = le64_to_cpu(hdr->crc);
1604
1605 if (hdr->version != VSTATE_HDR_VERSION)
1606 return 1;
1607
1608 crc = fio_crc32c((void *)s, hdr->size);
1609 if (crc != hdr->crc)
1610 return 1;
1611
1612 return 0;
1613}
1614
1615int verify_load_state(struct thread_data *td, const char *prefix)
1616{
1617 struct verify_state_hdr hdr;
1618 void *s = NULL;
1619 uint64_t crc;
1620 ssize_t ret;
1621 int fd;
1622
1623 if (!td->o.verify_state)
1624 return 0;
1625
1626 fd = open_state_file(td->o.name, prefix, td->thread_number - 1, 0);
1627 if (fd == -1)
1628 return 1;
1629
1630 ret = read(fd, &hdr, sizeof(hdr));
1631 if (ret != sizeof(hdr)) {
1632 if (ret < 0)
1633 td_verror(td, errno, "read verify state hdr");
1634 log_err("fio: failed reading verify state header\n");
1635 goto err;
1636 }
1637
1638 hdr.version = le64_to_cpu(hdr.version);
1639 hdr.size = le64_to_cpu(hdr.size);
1640 hdr.crc = le64_to_cpu(hdr.crc);
1641
1642 if (hdr.version != VSTATE_HDR_VERSION) {
1643 log_err("fio: unsupported (%d) version in verify state header\n",
1644 (unsigned int) hdr.version);
1645 goto err;
1646 }
1647
1648 s = malloc(hdr.size);
1649 ret = read(fd, s, hdr.size);
1650 if (ret != hdr.size) {
1651 if (ret < 0)
1652 td_verror(td, errno, "read verify state");
1653 log_err("fio: failed reading verity state\n");
1654 goto err;
1655 }
1656
1657 crc = fio_crc32c(s, hdr.size);
1658 if (crc != hdr.crc) {
1659 log_err("fio: verify state is corrupt\n");
1660 goto err;
1661 }
1662
1663 close(fd);
1664
1665 verify_assign_state(td, s);
1666 return 0;
1667err:
1668 if (s)
1669 free(s);
1670 close(fd);
1671 return 1;
1672}
1673
1674/*
1675 * Use the loaded verify state to know when to stop doing verification
1676 */
1677int verify_state_should_stop(struct thread_data *td, struct io_u *io_u)
1678{
1679 struct thread_io_list *s = td->vstate;
1680 struct fio_file *f = io_u->file;
1681 int i;
1682
1683 if (!s || !f)
1684 return 0;
1685
1686 /*
1687 * If we're not into the window of issues - depth yet, continue. If
1688 * issue is shorter than depth, do check.
1689 */
1690 if ((td->io_blocks[DDIR_READ] < s->depth ||
1691 s->numberio - td->io_blocks[DDIR_READ] > s->depth) &&
1692 s->numberio > s->depth)
1693 return 0;
1694
1695 /*
1696 * We're in the window of having to check if this io was
1697 * completed or not. If the IO was seen as completed, then
1698 * lets verify it.
1699 */
1700 for (i = 0; i < s->no_comps; i++) {
1701 if (s->comps[i].fileno != f->fileno)
1702 continue;
1703 if (io_u->offset == s->comps[i].offset)
1704 return 0;
1705 }
1706
1707 /*
1708 * Not found, we have to stop
1709 */
1710 return 1;
1711}