Merge branch 'security-token' of https://github.com/sfc-gh-rnarubin/fio
[fio.git] / verify.c
1 /*
2  * IO verification helpers
3  */
4 #include <unistd.h>
5 #include <fcntl.h>
6 #include <string.h>
7 #include <assert.h>
8 #include <pthread.h>
9 #include <libgen.h>
10
11 #include "arch/arch.h"
12 #include "fio.h"
13 #include "verify.h"
14 #include "trim.h"
15 #include "lib/rand.h"
16 #include "lib/hweight.h"
17 #include "lib/pattern.h"
18 #include "oslib/asprintf.h"
19
20 #include "crc/md5.h"
21 #include "crc/crc64.h"
22 #include "crc/crc32.h"
23 #include "crc/crc32c.h"
24 #include "crc/crc16.h"
25 #include "crc/crc7.h"
26 #include "crc/sha256.h"
27 #include "crc/sha512.h"
28 #include "crc/sha1.h"
29 #include "crc/xxhash.h"
30 #include "crc/sha3.h"
31
32 static void populate_hdr(struct thread_data *td, struct io_u *io_u,
33                          struct verify_header *hdr, unsigned int header_num,
34                          unsigned int header_len);
35 static void __fill_hdr(struct thread_data *td, struct io_u *io_u,
36                        struct verify_header *hdr, unsigned int header_num,
37                        unsigned int header_len, uint64_t rand_seed);
38
39 void fill_buffer_pattern(struct thread_data *td, void *p, unsigned int len)
40 {
41         (void)cpy_pattern(td->o.buffer_pattern, td->o.buffer_pattern_bytes, p, len);
42 }
43
44 static void __fill_buffer(struct thread_options *o, uint64_t seed, void *p,
45                           unsigned int len)
46 {
47         __fill_random_buf_percentage(seed, p, o->compress_percentage, len, len, o->buffer_pattern, o->buffer_pattern_bytes);
48 }
49
50 void fill_verify_pattern(struct thread_data *td, void *p, unsigned int len,
51                          struct io_u *io_u, uint64_t seed, int use_seed)
52 {
53         struct thread_options *o = &td->o;
54         unsigned int interval = o->verify_pattern_interval;
55         unsigned long long offset = io_u->offset;
56
57         if (!o->verify_pattern_bytes) {
58                 dprint(FD_VERIFY, "fill random bytes len=%u\n", len);
59
60                 if (!use_seed) {
61                         seed = __rand(&td->verify_state);
62                         if (sizeof(int) != sizeof(long *))
63                                 seed *= (unsigned long)__rand(&td->verify_state);
64                 }
65                 io_u->rand_seed = seed;
66                 __fill_buffer(o, seed, p, len);
67                 return;
68         }
69
70         /* Skip if we were here and we do not need to patch pattern with
71          * format. However, we cannot skip if verify_offset is set because we
72          * have swapped the header with pattern bytes */
73         if (!td->o.verify_fmt_sz && io_u->buf_filled_len >= len && !td->o.verify_offset) {
74                 dprint(FD_VERIFY, "using already filled verify pattern b=%d len=%u\n",
75                         o->verify_pattern_bytes, len);
76                 return;
77         }
78
79         if (!interval)
80                 interval = len;
81
82         io_u->offset += (p - io_u->buf) - (p - io_u->buf) % interval;
83         for (unsigned int bytes_done = 0, bytes_todo = 0; bytes_done < len;
84                         bytes_done += bytes_todo, p += bytes_todo, io_u->offset += interval) {
85                 bytes_todo = (p - io_u->buf) % interval;
86                 if (!bytes_todo)
87                         bytes_todo = interval;
88                 bytes_todo = min(bytes_todo, len - bytes_done);
89
90                 (void)paste_format(td->o.verify_pattern, td->o.verify_pattern_bytes,
91                                    td->o.verify_fmt, td->o.verify_fmt_sz,
92                                    p, bytes_todo, io_u);
93         }
94
95         io_u->buf_filled_len = len;
96         io_u->offset = offset;
97 }
98
99 static unsigned int get_hdr_inc(struct thread_data *td, struct io_u *io_u)
100 {
101         unsigned int hdr_inc;
102
103         /*
104          * If we use bs_unaligned, buflen can be larger than the verify
105          * interval (which just defaults to the smallest blocksize possible).
106          */
107         hdr_inc = io_u->buflen;
108         if (td->o.verify_interval && td->o.verify_interval <= io_u->buflen &&
109             !td->o.bs_unaligned)
110                 hdr_inc = td->o.verify_interval;
111
112         return hdr_inc;
113 }
114
115 static void fill_pattern_headers(struct thread_data *td, struct io_u *io_u,
116                                  uint64_t seed, int use_seed)
117 {
118         unsigned int hdr_inc, header_num;
119         struct verify_header *hdr;
120         void *p = io_u->buf;
121
122         fill_verify_pattern(td, p, io_u->buflen, io_u, seed, use_seed);
123
124         hdr_inc = get_hdr_inc(td, io_u);
125         header_num = 0;
126         for (; p < io_u->buf + io_u->buflen; p += hdr_inc) {
127                 hdr = p;
128                 populate_hdr(td, io_u, hdr, header_num, hdr_inc);
129                 header_num++;
130         }
131 }
132
133 static void memswp(void *buf1, void *buf2, unsigned int len)
134 {
135         char swap[200];
136
137         assert(len <= sizeof(swap));
138
139         memcpy(&swap, buf1, len);
140         memcpy(buf1, buf2, len);
141         memcpy(buf2, &swap, len);
142 }
143
144 static void hexdump(void *buffer, int len)
145 {
146         unsigned char *p = buffer;
147         int i;
148
149         for (i = 0; i < len; i++)
150                 log_err("%02x", p[i]);
151         log_err("\n");
152 }
153
154 /*
155  * Prepare for separation of verify_header and checksum header
156  */
157 static inline unsigned int __hdr_size(int verify_type)
158 {
159         unsigned int len = 0;
160
161         switch (verify_type) {
162         case VERIFY_NONE:
163         case VERIFY_HDR_ONLY:
164         case VERIFY_NULL:
165         case VERIFY_PATTERN:
166                 len = 0;
167                 break;
168         case VERIFY_MD5:
169                 len = sizeof(struct vhdr_md5);
170                 break;
171         case VERIFY_CRC64:
172                 len = sizeof(struct vhdr_crc64);
173                 break;
174         case VERIFY_CRC32C:
175         case VERIFY_CRC32:
176         case VERIFY_CRC32C_INTEL:
177                 len = sizeof(struct vhdr_crc32);
178                 break;
179         case VERIFY_CRC16:
180                 len = sizeof(struct vhdr_crc16);
181                 break;
182         case VERIFY_CRC7:
183                 len = sizeof(struct vhdr_crc7);
184                 break;
185         case VERIFY_SHA256:
186                 len = sizeof(struct vhdr_sha256);
187                 break;
188         case VERIFY_SHA512:
189                 len = sizeof(struct vhdr_sha512);
190                 break;
191         case VERIFY_SHA3_224:
192                 len = sizeof(struct vhdr_sha3_224);
193                 break;
194         case VERIFY_SHA3_256:
195                 len = sizeof(struct vhdr_sha3_256);
196                 break;
197         case VERIFY_SHA3_384:
198                 len = sizeof(struct vhdr_sha3_384);
199                 break;
200         case VERIFY_SHA3_512:
201                 len = sizeof(struct vhdr_sha3_512);
202                 break;
203         case VERIFY_XXHASH:
204                 len = sizeof(struct vhdr_xxhash);
205                 break;
206         case VERIFY_SHA1:
207                 len = sizeof(struct vhdr_sha1);
208                 break;
209         case VERIFY_PATTERN_NO_HDR:
210                 return 0;
211         default:
212                 log_err("fio: unknown verify header!\n");
213                 assert(0);
214         }
215
216         return len + sizeof(struct verify_header);
217 }
218
219 static inline unsigned int hdr_size(struct thread_data *td,
220                                     struct verify_header *hdr)
221 {
222         if (td->o.verify == VERIFY_PATTERN_NO_HDR)
223                 return 0;
224
225         return __hdr_size(hdr->verify_type);
226 }
227
228 static void *hdr_priv(struct verify_header *hdr)
229 {
230         void *priv = hdr;
231
232         return priv + sizeof(struct verify_header);
233 }
234
235 /*
236  * Verify container, pass info to verify handlers and allow them to
237  * pass info back in case of error
238  */
239 struct vcont {
240         /*
241          * Input
242          */
243         struct io_u *io_u;
244         unsigned int hdr_num;
245         struct thread_data *td;
246
247         /*
248          * Output, only valid in case of error
249          */
250         const char *name;
251         void *good_crc;
252         void *bad_crc;
253         unsigned int crc_len;
254 };
255
256 #define DUMP_BUF_SZ     255
257
258 static void dump_buf(char *buf, unsigned int len, unsigned long long offset,
259                      const char *type, struct fio_file *f)
260 {
261         char *ptr, *fname;
262         char sep[2] = { FIO_OS_PATH_SEPARATOR, 0 };
263         int ret, fd;
264
265         ptr = strdup(f->file_name);
266
267         if (asprintf(&fname, "%s%s%s.%llu.%s", aux_path ? : "",
268                      aux_path ? sep : "", basename(ptr), offset, type) < 0) {
269                 if (!fio_did_warn(FIO_WARN_VERIFY_BUF))
270                         log_err("fio: not enough memory for dump buffer filename\n");
271                 goto free_ptr;
272         }
273
274         fd = open(fname, O_CREAT | O_TRUNC | O_WRONLY, 0644);
275         if (fd < 0) {
276                 perror("open verify buf file");
277                 goto free_fname;
278         }
279
280         while (len) {
281                 ret = write(fd, buf, len);
282                 if (!ret)
283                         break;
284                 else if (ret < 0) {
285                         perror("write verify buf file");
286                         break;
287                 }
288                 len -= ret;
289                 buf += ret;
290         }
291
292         close(fd);
293         log_err("       %s data dumped as %s\n", type, fname);
294
295 free_fname:
296         free(fname);
297
298 free_ptr:
299         free(ptr);
300 }
301
302 /*
303  * Dump the contents of the read block and re-generate the correct data
304  * and dump that too.
305  */
306 static void __dump_verify_buffers(struct verify_header *hdr, struct vcont *vc)
307 {
308         struct thread_data *td = vc->td;
309         struct io_u *io_u = vc->io_u;
310         unsigned long hdr_offset;
311         struct io_u dummy;
312         void *buf;
313
314         if (!td->o.verify_dump)
315                 return;
316
317         /*
318          * Dump the contents we just read off disk
319          */
320         hdr_offset = vc->hdr_num * hdr->len;
321
322         dump_buf(io_u->buf + hdr_offset, hdr->len, io_u->verify_offset + hdr_offset,
323                         "received", vc->io_u->file);
324
325         /*
326          * Allocate a new buf and re-generate the original data
327          */
328         buf = malloc(io_u->buflen);
329         dummy = *io_u;
330         dummy.buf = buf;
331         dummy.rand_seed = hdr->rand_seed;
332         dummy.buf_filled_len = 0;
333         dummy.buflen = io_u->buflen;
334
335         fill_pattern_headers(td, &dummy, hdr->rand_seed, 1);
336
337         dump_buf(buf + hdr_offset, hdr->len, io_u->verify_offset + hdr_offset,
338                         "expected", vc->io_u->file);
339         free(buf);
340 }
341
342 static void dump_verify_buffers(struct verify_header *hdr, struct vcont *vc)
343 {
344         struct thread_data *td = vc->td;
345         struct verify_header shdr;
346
347         if (td->o.verify == VERIFY_PATTERN_NO_HDR) {
348                 __fill_hdr(td, vc->io_u, &shdr, 0, vc->io_u->buflen, 0);
349                 hdr = &shdr;
350         }
351
352         __dump_verify_buffers(hdr, vc);
353 }
354
355 static void log_verify_failure(struct verify_header *hdr, struct vcont *vc)
356 {
357         unsigned long long offset;
358         uint32_t len;
359         struct thread_data *td = vc->td;
360
361         offset = vc->io_u->verify_offset;
362         if (td->o.verify != VERIFY_PATTERN_NO_HDR) {
363                 len = hdr->len;
364                 offset += (unsigned long long) vc->hdr_num * len;
365         } else {
366                 len = vc->io_u->buflen;
367         }
368
369         log_err("%.8s: verify failed at file %s offset %llu, length %u"
370                         " (requested block: offset=%llu, length=%llu, flags=%x)\n",
371                         vc->name, vc->io_u->file->file_name, offset, len,
372                         vc->io_u->verify_offset, vc->io_u->buflen, vc->io_u->flags);
373
374         if (vc->good_crc && vc->bad_crc) {
375                 log_err("       Expected CRC: ");
376                 hexdump(vc->good_crc, vc->crc_len);
377                 log_err("       Received CRC: ");
378                 hexdump(vc->bad_crc, vc->crc_len);
379         }
380
381         dump_verify_buffers(hdr, vc);
382 }
383
384 /*
385  * Return data area 'header_num'
386  */
387 static inline void *io_u_verify_off(struct verify_header *hdr, struct vcont *vc)
388 {
389         return vc->io_u->buf + vc->hdr_num * hdr->len + hdr_size(vc->td, hdr);
390 }
391
392 static int check_pattern(char *buf, unsigned int len, unsigned int mod,
393                 unsigned int pattern_size, char *pattern, unsigned int header_size)
394 {
395         unsigned int i;
396         int rc;
397
398         rc = cmp_pattern(pattern, pattern_size, mod, buf, len);
399         if (!rc)
400                 goto done;
401
402         /* Slow path, compare each byte */
403         for (i = 0; i < len; i++) {
404                 if (buf[i] != pattern[mod]) {
405                         unsigned int bits;
406
407                         bits = hweight8(buf[i] ^ pattern[mod]);
408                         log_err("fio: got pattern '%02x', wanted '%02x'. Bad bits %d\n",
409                                 (unsigned char)buf[i],
410                                 (unsigned char)pattern[mod],
411                                 bits);
412                         log_err("fio: bad pattern block offset %u\n",
413                                 i + header_size);
414                         rc = EILSEQ;
415                         goto done;
416                 }
417                 mod++;
418                 if (mod == pattern_size)
419                         mod = 0;
420         }
421
422 done:
423         return rc;
424 }
425
426 /*
427  *  The current thread will need its own buffer if there are multiple threads
428  *  and the pattern contains the offset. Fio currently only has one pattern
429  *  format specifier so we only need to check that one, but this may need to be
430  *  changed if fio ever gains more pattern format specifiers.
431  */
432 static inline bool pattern_need_buffer(struct thread_data *td)
433 {
434         return td->o.verify_async &&
435                 td->o.verify_fmt_sz &&
436                 td->o.verify_fmt[0].desc->paste == paste_blockoff;
437 }
438
439 static int verify_io_u_pattern(struct verify_header *hdr, struct vcont *vc)
440 {
441         struct thread_data *td = vc->td;
442         struct io_u *io_u = vc->io_u;
443         char *buf, *pattern;
444         unsigned int header_size = __hdr_size(td->o.verify);
445         unsigned int len, mod, pattern_size, pattern_interval_mod, bytes_done = 0, bytes_todo;
446         int rc;
447         unsigned long long offset = io_u->offset;
448
449         pattern = td->o.verify_pattern;
450         pattern_size = td->o.verify_pattern_bytes;
451         assert(pattern_size != 0);
452
453         /*
454          * Make this thread safe when verify_async is set and the verify
455          * pattern includes the offset.
456          */
457         if (pattern_need_buffer(td)) {
458                 pattern = malloc(pattern_size);
459                 assert(pattern);
460                 memcpy(pattern, td->o.verify_pattern, pattern_size);
461         }
462
463         if (!td->o.verify_pattern_interval) {
464                 (void)paste_format_inplace(pattern, pattern_size,
465                                            td->o.verify_fmt, td->o.verify_fmt_sz, io_u);
466         }
467
468         /*
469          * We have 3 cases here:
470          * 1. Compare the entire buffer if (1) verify_interval is not set and
471          * (2) verify_pattern_interval is not set
472          * 2. Compare the entire *verify_interval* if (1) verify_interval *is*
473          * set and (2) verify_pattern_interval is not set
474          * 3. Compare *verify_pattern_interval* segments or subsets thereof if
475          * (2) verify_pattern_interval is set
476          */
477
478         buf = (char *) hdr + header_size;
479         len = get_hdr_inc(td, io_u) - header_size;
480         if (td->o.verify_pattern_interval) {
481                 unsigned int extent = get_hdr_inc(td, io_u) * vc->hdr_num + header_size;
482                 pattern_interval_mod = extent % td->o.verify_pattern_interval;
483                 mod = pattern_interval_mod % pattern_size;
484                 bytes_todo = min(len, td->o.verify_pattern_interval - pattern_interval_mod);
485                 io_u->offset += extent / td->o.verify_pattern_interval * td->o.verify_pattern_interval;
486         } else {
487                 mod = (get_hdr_inc(td, io_u) * vc->hdr_num + header_size) % pattern_size;
488                 bytes_todo = len;
489                 pattern_interval_mod = 0;
490         }
491
492         while (bytes_done < len) {
493                 if (td->o.verify_pattern_interval) {
494                         (void)paste_format_inplace(pattern, pattern_size,
495                                         td->o.verify_fmt, td->o.verify_fmt_sz,
496                                         io_u);
497                 }
498
499                 rc = check_pattern(buf, bytes_todo, mod, pattern_size, pattern, header_size);
500                 if (rc) {
501                         vc->name = "pattern";
502                         log_verify_failure(hdr, vc);
503                         break;
504                 }
505
506                 mod = 0;
507                 bytes_done += bytes_todo;
508                 buf += bytes_todo;
509                 io_u->offset += td->o.verify_pattern_interval;
510                 bytes_todo = min(len - bytes_done, td->o.verify_pattern_interval);
511         }
512
513         io_u->offset = offset;
514         if (pattern_need_buffer(td))
515                 free(pattern);
516         return rc;
517 }
518
519 static int verify_io_u_xxhash(struct verify_header *hdr, struct vcont *vc)
520 {
521         void *p = io_u_verify_off(hdr, vc);
522         struct vhdr_xxhash *vh = hdr_priv(hdr);
523         uint32_t hash;
524         void *state;
525
526         dprint(FD_VERIFY, "xxhash verify io_u %p, len %u\n", vc->io_u, hdr->len);
527
528         state = XXH32_init(1);
529         XXH32_update(state, p, hdr->len - hdr_size(vc->td, hdr));
530         hash = XXH32_digest(state);
531
532         if (vh->hash == hash)
533                 return 0;
534
535         vc->name = "xxhash";
536         vc->good_crc = &vh->hash;
537         vc->bad_crc = &hash;
538         vc->crc_len = sizeof(hash);
539         log_verify_failure(hdr, vc);
540         return EILSEQ;
541 }
542
543 static int verify_io_u_sha3(struct verify_header *hdr, struct vcont *vc,
544                             struct fio_sha3_ctx *sha3_ctx, uint8_t *sha,
545                             unsigned int sha_size, const char *name)
546 {
547         void *p = io_u_verify_off(hdr, vc);
548
549         dprint(FD_VERIFY, "%s verify io_u %p, len %u\n", name, vc->io_u, hdr->len);
550
551         fio_sha3_update(sha3_ctx, p, hdr->len - hdr_size(vc->td, hdr));
552         fio_sha3_final(sha3_ctx);
553
554         if (!memcmp(sha, sha3_ctx->sha, sha_size))
555                 return 0;
556
557         vc->name = name;
558         vc->good_crc = sha;
559         vc->bad_crc = sha3_ctx->sha;
560         vc->crc_len = sha_size;
561         log_verify_failure(hdr, vc);
562         return EILSEQ;
563 }
564
565 static int verify_io_u_sha3_224(struct verify_header *hdr, struct vcont *vc)
566 {
567         struct vhdr_sha3_224 *vh = hdr_priv(hdr);
568         uint8_t sha[SHA3_224_DIGEST_SIZE];
569         struct fio_sha3_ctx sha3_ctx = {
570                 .sha = sha,
571         };
572
573         fio_sha3_224_init(&sha3_ctx);
574
575         return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
576                                 SHA3_224_DIGEST_SIZE, "sha3-224");
577 }
578
579 static int verify_io_u_sha3_256(struct verify_header *hdr, struct vcont *vc)
580 {
581         struct vhdr_sha3_256 *vh = hdr_priv(hdr);
582         uint8_t sha[SHA3_256_DIGEST_SIZE];
583         struct fio_sha3_ctx sha3_ctx = {
584                 .sha = sha,
585         };
586
587         fio_sha3_256_init(&sha3_ctx);
588
589         return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
590                                 SHA3_256_DIGEST_SIZE, "sha3-256");
591 }
592
593 static int verify_io_u_sha3_384(struct verify_header *hdr, struct vcont *vc)
594 {
595         struct vhdr_sha3_384 *vh = hdr_priv(hdr);
596         uint8_t sha[SHA3_384_DIGEST_SIZE];
597         struct fio_sha3_ctx sha3_ctx = {
598                 .sha = sha,
599         };
600
601         fio_sha3_384_init(&sha3_ctx);
602
603         return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
604                                 SHA3_384_DIGEST_SIZE, "sha3-384");
605 }
606
607 static int verify_io_u_sha3_512(struct verify_header *hdr, struct vcont *vc)
608 {
609         struct vhdr_sha3_512 *vh = hdr_priv(hdr);
610         uint8_t sha[SHA3_512_DIGEST_SIZE];
611         struct fio_sha3_ctx sha3_ctx = {
612                 .sha = sha,
613         };
614
615         fio_sha3_512_init(&sha3_ctx);
616
617         return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
618                                 SHA3_512_DIGEST_SIZE, "sha3-512");
619 }
620
621 static int verify_io_u_sha512(struct verify_header *hdr, struct vcont *vc)
622 {
623         void *p = io_u_verify_off(hdr, vc);
624         struct vhdr_sha512 *vh = hdr_priv(hdr);
625         uint8_t sha512[128];
626         struct fio_sha512_ctx sha512_ctx = {
627                 .buf = sha512,
628         };
629
630         dprint(FD_VERIFY, "sha512 verify io_u %p, len %u\n", vc->io_u, hdr->len);
631
632         fio_sha512_init(&sha512_ctx);
633         fio_sha512_update(&sha512_ctx, p, hdr->len - hdr_size(vc->td, hdr));
634         fio_sha512_final(&sha512_ctx);
635
636         if (!memcmp(vh->sha512, sha512_ctx.buf, sizeof(sha512)))
637                 return 0;
638
639         vc->name = "sha512";
640         vc->good_crc = vh->sha512;
641         vc->bad_crc = sha512_ctx.buf;
642         vc->crc_len = sizeof(vh->sha512);
643         log_verify_failure(hdr, vc);
644         return EILSEQ;
645 }
646
647 static int verify_io_u_sha256(struct verify_header *hdr, struct vcont *vc)
648 {
649         void *p = io_u_verify_off(hdr, vc);
650         struct vhdr_sha256 *vh = hdr_priv(hdr);
651         uint8_t sha256[64];
652         struct fio_sha256_ctx sha256_ctx = {
653                 .buf = sha256,
654         };
655
656         dprint(FD_VERIFY, "sha256 verify io_u %p, len %u\n", vc->io_u, hdr->len);
657
658         fio_sha256_init(&sha256_ctx);
659         fio_sha256_update(&sha256_ctx, p, hdr->len - hdr_size(vc->td, hdr));
660         fio_sha256_final(&sha256_ctx);
661
662         if (!memcmp(vh->sha256, sha256_ctx.buf, sizeof(sha256)))
663                 return 0;
664
665         vc->name = "sha256";
666         vc->good_crc = vh->sha256;
667         vc->bad_crc = sha256_ctx.buf;
668         vc->crc_len = sizeof(vh->sha256);
669         log_verify_failure(hdr, vc);
670         return EILSEQ;
671 }
672
673 static int verify_io_u_sha1(struct verify_header *hdr, struct vcont *vc)
674 {
675         void *p = io_u_verify_off(hdr, vc);
676         struct vhdr_sha1 *vh = hdr_priv(hdr);
677         uint32_t sha1[5];
678         struct fio_sha1_ctx sha1_ctx = {
679                 .H = sha1,
680         };
681
682         dprint(FD_VERIFY, "sha1 verify io_u %p, len %u\n", vc->io_u, hdr->len);
683
684         fio_sha1_init(&sha1_ctx);
685         fio_sha1_update(&sha1_ctx, p, hdr->len - hdr_size(vc->td, hdr));
686         fio_sha1_final(&sha1_ctx);
687
688         if (!memcmp(vh->sha1, sha1_ctx.H, sizeof(sha1)))
689                 return 0;
690
691         vc->name = "sha1";
692         vc->good_crc = vh->sha1;
693         vc->bad_crc = sha1_ctx.H;
694         vc->crc_len = sizeof(vh->sha1);
695         log_verify_failure(hdr, vc);
696         return EILSEQ;
697 }
698
699 static int verify_io_u_crc7(struct verify_header *hdr, struct vcont *vc)
700 {
701         void *p = io_u_verify_off(hdr, vc);
702         struct vhdr_crc7 *vh = hdr_priv(hdr);
703         unsigned char c;
704
705         dprint(FD_VERIFY, "crc7 verify io_u %p, len %u\n", vc->io_u, hdr->len);
706
707         c = fio_crc7(p, hdr->len - hdr_size(vc->td, hdr));
708
709         if (c == vh->crc7)
710                 return 0;
711
712         vc->name = "crc7";
713         vc->good_crc = &vh->crc7;
714         vc->bad_crc = &c;
715         vc->crc_len = 1;
716         log_verify_failure(hdr, vc);
717         return EILSEQ;
718 }
719
720 static int verify_io_u_crc16(struct verify_header *hdr, struct vcont *vc)
721 {
722         void *p = io_u_verify_off(hdr, vc);
723         struct vhdr_crc16 *vh = hdr_priv(hdr);
724         unsigned short c;
725
726         dprint(FD_VERIFY, "crc16 verify io_u %p, len %u\n", vc->io_u, hdr->len);
727
728         c = fio_crc16(p, hdr->len - hdr_size(vc->td, hdr));
729
730         if (c == vh->crc16)
731                 return 0;
732
733         vc->name = "crc16";
734         vc->good_crc = &vh->crc16;
735         vc->bad_crc = &c;
736         vc->crc_len = 2;
737         log_verify_failure(hdr, vc);
738         return EILSEQ;
739 }
740
741 static int verify_io_u_crc64(struct verify_header *hdr, struct vcont *vc)
742 {
743         void *p = io_u_verify_off(hdr, vc);
744         struct vhdr_crc64 *vh = hdr_priv(hdr);
745         unsigned long long c;
746
747         dprint(FD_VERIFY, "crc64 verify io_u %p, len %u\n", vc->io_u, hdr->len);
748
749         c = fio_crc64(p, hdr->len - hdr_size(vc->td, hdr));
750
751         if (c == vh->crc64)
752                 return 0;
753
754         vc->name = "crc64";
755         vc->good_crc = &vh->crc64;
756         vc->bad_crc = &c;
757         vc->crc_len = 8;
758         log_verify_failure(hdr, vc);
759         return EILSEQ;
760 }
761
762 static int verify_io_u_crc32(struct verify_header *hdr, struct vcont *vc)
763 {
764         void *p = io_u_verify_off(hdr, vc);
765         struct vhdr_crc32 *vh = hdr_priv(hdr);
766         uint32_t c;
767
768         dprint(FD_VERIFY, "crc32 verify io_u %p, len %u\n", vc->io_u, hdr->len);
769
770         c = fio_crc32(p, hdr->len - hdr_size(vc->td, hdr));
771
772         if (c == vh->crc32)
773                 return 0;
774
775         vc->name = "crc32";
776         vc->good_crc = &vh->crc32;
777         vc->bad_crc = &c;
778         vc->crc_len = 4;
779         log_verify_failure(hdr, vc);
780         return EILSEQ;
781 }
782
783 static int verify_io_u_crc32c(struct verify_header *hdr, struct vcont *vc)
784 {
785         void *p = io_u_verify_off(hdr, vc);
786         struct vhdr_crc32 *vh = hdr_priv(hdr);
787         uint32_t c;
788
789         dprint(FD_VERIFY, "crc32c verify io_u %p, len %u\n", vc->io_u, hdr->len);
790
791         c = fio_crc32c(p, hdr->len - hdr_size(vc->td, hdr));
792
793         if (c == vh->crc32)
794                 return 0;
795
796         vc->name = "crc32c";
797         vc->good_crc = &vh->crc32;
798         vc->bad_crc = &c;
799         vc->crc_len = 4;
800         log_verify_failure(hdr, vc);
801         return EILSEQ;
802 }
803
804 static int verify_io_u_md5(struct verify_header *hdr, struct vcont *vc)
805 {
806         void *p = io_u_verify_off(hdr, vc);
807         struct vhdr_md5 *vh = hdr_priv(hdr);
808         uint32_t hash[MD5_HASH_WORDS];
809         struct fio_md5_ctx md5_ctx = {
810                 .hash = hash,
811         };
812
813         dprint(FD_VERIFY, "md5 verify io_u %p, len %u\n", vc->io_u, hdr->len);
814
815         fio_md5_init(&md5_ctx);
816         fio_md5_update(&md5_ctx, p, hdr->len - hdr_size(vc->td, hdr));
817         fio_md5_final(&md5_ctx);
818
819         if (!memcmp(vh->md5_digest, md5_ctx.hash, sizeof(hash)))
820                 return 0;
821
822         vc->name = "md5";
823         vc->good_crc = vh->md5_digest;
824         vc->bad_crc = md5_ctx.hash;
825         vc->crc_len = sizeof(hash);
826         log_verify_failure(hdr, vc);
827         return EILSEQ;
828 }
829
830 /*
831  * Push IO verification to a separate thread
832  */
833 int verify_io_u_async(struct thread_data *td, struct io_u **io_u_ptr)
834 {
835         struct io_u *io_u = *io_u_ptr;
836
837         pthread_mutex_lock(&td->io_u_lock);
838
839         if (io_u->file)
840                 put_file_log(td, io_u->file);
841
842         if (io_u->flags & IO_U_F_IN_CUR_DEPTH) {
843                 td->cur_depth--;
844                 io_u_clear(td, io_u, IO_U_F_IN_CUR_DEPTH);
845         }
846         flist_add_tail(&io_u->verify_list, &td->verify_list);
847         *io_u_ptr = NULL;
848
849         pthread_cond_signal(&td->verify_cond);
850         pthread_mutex_unlock(&td->io_u_lock);
851         return 0;
852 }
853
854 /*
855  * Thanks Rusty, for spending the time so I don't have to.
856  *
857  * http://rusty.ozlabs.org/?p=560
858  */
859 static int mem_is_zero(const void *data, size_t length)
860 {
861         const unsigned char *p = data;
862         size_t len;
863
864         /* Check first 16 bytes manually */
865         for (len = 0; len < 16; len++) {
866                 if (!length)
867                         return 1;
868                 if (*p)
869                         return 0;
870                 p++;
871                 length--;
872         }
873
874         /* Now we know that's zero, memcmp with self. */
875         return memcmp(data, p, length) == 0;
876 }
877
878 static int mem_is_zero_slow(const void *data, size_t length, size_t *offset)
879 {
880         const unsigned char *p = data;
881
882         *offset = 0;
883         while (length) {
884                 if (*p)
885                         break;
886                 (*offset)++;
887                 length--;
888                 p++;
889         }
890
891         return !length;
892 }
893
894 static int verify_trimmed_io_u(struct thread_data *td, struct io_u *io_u)
895 {
896         size_t offset;
897
898         if (!td->o.trim_zero)
899                 return 0;
900
901         if (mem_is_zero(io_u->buf, io_u->buflen))
902                 return 0;
903
904         mem_is_zero_slow(io_u->buf, io_u->buflen, &offset);
905
906         log_err("trim: verify failed at file %s offset %llu, length %llu"
907                 ", block offset %lu\n",
908                         io_u->file->file_name, io_u->verify_offset, io_u->buflen,
909                         (unsigned long) offset);
910         return EILSEQ;
911 }
912
913 static int verify_header(struct io_u *io_u, struct thread_data *td,
914                          struct verify_header *hdr, unsigned int hdr_num,
915                          unsigned int hdr_len)
916 {
917         void *p = hdr;
918         uint32_t crc;
919
920         if (hdr->magic != FIO_HDR_MAGIC) {
921                 log_err("verify: bad magic header %x, wanted %x",
922                         hdr->magic, FIO_HDR_MAGIC);
923                 goto err;
924         }
925         if (hdr->len != hdr_len) {
926                 log_err("verify: bad header length %u, wanted %u",
927                         hdr->len, hdr_len);
928                 goto err;
929         }
930         if (td->o.verify_header_seed && (hdr->rand_seed != io_u->rand_seed)) {
931                 log_err("verify: bad header rand_seed %"PRIu64
932                         ", wanted %"PRIu64,
933                         hdr->rand_seed, io_u->rand_seed);
934                 goto err;
935         }
936         if (hdr->offset != io_u->verify_offset + hdr_num * td->o.verify_interval) {
937                 log_err("verify: bad header offset %"PRIu64
938                         ", wanted %llu",
939                         hdr->offset, io_u->verify_offset);
940                 goto err;
941         }
942
943         /*
944          * For read-only workloads, the program cannot be certain of the
945          * last numberio written to a block. Checking of numberio will be
946          * done only for workloads that write data.  For verify_only or
947          * any mode de-selecting verify_write_sequence, numberio check is
948          * skipped.
949          */
950         if (td_write(td) && (td_min_bs(td) == td_max_bs(td)) &&
951             !td->o.time_based)
952                 if (td->o.verify_write_sequence)
953                         if (hdr->numberio != io_u->numberio) {
954                                 log_err("verify: bad header numberio %"PRIu16
955                                         ", wanted %"PRIu16,
956                                         hdr->numberio, io_u->numberio);
957                                 goto err;
958                         }
959
960         crc = fio_crc32c(p, offsetof(struct verify_header, crc32));
961         if (crc != hdr->crc32) {
962                 log_err("verify: bad header crc %x, calculated %x",
963                         hdr->crc32, crc);
964                 goto err;
965         }
966         return 0;
967
968 err:
969         log_err(" at file %s offset %llu, length %u"
970                 " (requested block: offset=%llu, length=%llu)\n",
971                 io_u->file->file_name,
972                 io_u->verify_offset + hdr_num * hdr_len, hdr_len,
973                 io_u->verify_offset, io_u->buflen);
974
975         if (td->o.verify_dump)
976                 dump_buf(p, hdr_len, io_u->verify_offset + hdr_num * hdr_len,
977                                 "hdr_fail", io_u->file);
978
979         return EILSEQ;
980 }
981
982 int verify_io_u(struct thread_data *td, struct io_u **io_u_ptr)
983 {
984         struct verify_header *hdr;
985         struct io_u *io_u = *io_u_ptr;
986         unsigned int header_size, hdr_inc, hdr_num = 0;
987         void *p;
988         int ret;
989
990         if (td->o.verify == VERIFY_NULL || io_u->ddir != DDIR_READ)
991                 return 0;
992         /*
993          * If the IO engine is faking IO (like null), then just pretend
994          * we verified everything.
995          */
996         if (td_ioengine_flagged(td, FIO_FAKEIO))
997                 return 0;
998
999         /*
1000          * If data has already been verified from the device, we can skip
1001          * the actual verification phase here.
1002          */
1003         if (io_u->flags & IO_U_F_VER_IN_DEV)
1004                 return 0;
1005
1006         if (io_u->flags & IO_U_F_TRIMMED) {
1007                 ret = verify_trimmed_io_u(td, io_u);
1008                 goto done;
1009         }
1010
1011         hdr_inc = get_hdr_inc(td, io_u);
1012
1013         ret = 0;
1014         for (p = io_u->buf; p < io_u->buf + io_u->buflen;
1015              p += hdr_inc, hdr_num++) {
1016                 struct vcont vc = {
1017                         .io_u           = io_u,
1018                         .hdr_num        = hdr_num,
1019                         .td             = td,
1020                 };
1021                 unsigned int verify_type;
1022
1023                 if (ret && td->o.verify_fatal)
1024                         break;
1025
1026                 header_size = __hdr_size(td->o.verify);
1027                 if (td->o.verify_offset)
1028                         memswp(p, p + td->o.verify_offset, header_size);
1029                 hdr = p;
1030
1031                 if (td->o.verify != VERIFY_PATTERN_NO_HDR) {
1032                         ret = verify_header(io_u, td, hdr, hdr_num, hdr_inc);
1033                         if (ret)
1034                                 return ret;
1035                 }
1036
1037                 if (td->o.verify != VERIFY_NONE)
1038                         verify_type = td->o.verify;
1039                 else
1040                         verify_type = hdr->verify_type;
1041
1042                 switch (verify_type) {
1043                 case VERIFY_HDR_ONLY:
1044                         /* Header is always verified, check if pattern is left
1045                          * for verification. */
1046                         if (td->o.verify_pattern_bytes)
1047                                 ret = verify_io_u_pattern(hdr, &vc);
1048                         break;
1049                 case VERIFY_MD5:
1050                         ret = verify_io_u_md5(hdr, &vc);
1051                         break;
1052                 case VERIFY_CRC64:
1053                         ret = verify_io_u_crc64(hdr, &vc);
1054                         break;
1055                 case VERIFY_CRC32C:
1056                 case VERIFY_CRC32C_INTEL:
1057                         ret = verify_io_u_crc32c(hdr, &vc);
1058                         break;
1059                 case VERIFY_CRC32:
1060                         ret = verify_io_u_crc32(hdr, &vc);
1061                         break;
1062                 case VERIFY_CRC16:
1063                         ret = verify_io_u_crc16(hdr, &vc);
1064                         break;
1065                 case VERIFY_CRC7:
1066                         ret = verify_io_u_crc7(hdr, &vc);
1067                         break;
1068                 case VERIFY_SHA256:
1069                         ret = verify_io_u_sha256(hdr, &vc);
1070                         break;
1071                 case VERIFY_SHA512:
1072                         ret = verify_io_u_sha512(hdr, &vc);
1073                         break;
1074                 case VERIFY_SHA3_224:
1075                         ret = verify_io_u_sha3_224(hdr, &vc);
1076                         break;
1077                 case VERIFY_SHA3_256:
1078                         ret = verify_io_u_sha3_256(hdr, &vc);
1079                         break;
1080                 case VERIFY_SHA3_384:
1081                         ret = verify_io_u_sha3_384(hdr, &vc);
1082                         break;
1083                 case VERIFY_SHA3_512:
1084                         ret = verify_io_u_sha3_512(hdr, &vc);
1085                         break;
1086                 case VERIFY_XXHASH:
1087                         ret = verify_io_u_xxhash(hdr, &vc);
1088                         break;
1089                 case VERIFY_SHA1:
1090                         ret = verify_io_u_sha1(hdr, &vc);
1091                         break;
1092                 case VERIFY_PATTERN:
1093                 case VERIFY_PATTERN_NO_HDR:
1094                         ret = verify_io_u_pattern(hdr, &vc);
1095                         break;
1096                 default:
1097                         log_err("Bad verify type %u\n", hdr->verify_type);
1098                         ret = EINVAL;
1099                 }
1100
1101                 if (ret && verify_type != hdr->verify_type && verify_type != VERIFY_PATTERN_NO_HDR)
1102                         log_err("fio: verify type mismatch (%u media, %u given)\n",
1103                                         hdr->verify_type, verify_type);
1104         }
1105
1106 done:
1107         if (ret && td->o.verify_fatal)
1108                 fio_mark_td_terminate(td);
1109
1110         return ret;
1111 }
1112
1113 static void fill_xxhash(struct verify_header *hdr, void *p, unsigned int len)
1114 {
1115         struct vhdr_xxhash *vh = hdr_priv(hdr);
1116         void *state;
1117
1118         state = XXH32_init(1);
1119         XXH32_update(state, p, len);
1120         vh->hash = XXH32_digest(state);
1121 }
1122
1123 static void fill_sha3(struct fio_sha3_ctx *sha3_ctx, void *p, unsigned int len)
1124 {
1125         fio_sha3_update(sha3_ctx, p, len);
1126         fio_sha3_final(sha3_ctx);
1127 }
1128
1129 static void fill_sha3_224(struct verify_header *hdr, void *p, unsigned int len)
1130 {
1131         struct vhdr_sha3_224 *vh = hdr_priv(hdr);
1132         struct fio_sha3_ctx sha3_ctx = {
1133                 .sha = vh->sha,
1134         };
1135
1136         fio_sha3_224_init(&sha3_ctx);
1137         fill_sha3(&sha3_ctx, p, len);
1138 }
1139
1140 static void fill_sha3_256(struct verify_header *hdr, void *p, unsigned int len)
1141 {
1142         struct vhdr_sha3_256 *vh = hdr_priv(hdr);
1143         struct fio_sha3_ctx sha3_ctx = {
1144                 .sha = vh->sha,
1145         };
1146
1147         fio_sha3_256_init(&sha3_ctx);
1148         fill_sha3(&sha3_ctx, p, len);
1149 }
1150
1151 static void fill_sha3_384(struct verify_header *hdr, void *p, unsigned int len)
1152 {
1153         struct vhdr_sha3_384 *vh = hdr_priv(hdr);
1154         struct fio_sha3_ctx sha3_ctx = {
1155                 .sha = vh->sha,
1156         };
1157
1158         fio_sha3_384_init(&sha3_ctx);
1159         fill_sha3(&sha3_ctx, p, len);
1160 }
1161
1162 static void fill_sha3_512(struct verify_header *hdr, void *p, unsigned int len)
1163 {
1164         struct vhdr_sha3_512 *vh = hdr_priv(hdr);
1165         struct fio_sha3_ctx sha3_ctx = {
1166                 .sha = vh->sha,
1167         };
1168
1169         fio_sha3_512_init(&sha3_ctx);
1170         fill_sha3(&sha3_ctx, p, len);
1171 }
1172
1173 static void fill_sha512(struct verify_header *hdr, void *p, unsigned int len)
1174 {
1175         struct vhdr_sha512 *vh = hdr_priv(hdr);
1176         struct fio_sha512_ctx sha512_ctx = {
1177                 .buf = vh->sha512,
1178         };
1179
1180         fio_sha512_init(&sha512_ctx);
1181         fio_sha512_update(&sha512_ctx, p, len);
1182         fio_sha512_final(&sha512_ctx);
1183 }
1184
1185 static void fill_sha256(struct verify_header *hdr, void *p, unsigned int len)
1186 {
1187         struct vhdr_sha256 *vh = hdr_priv(hdr);
1188         struct fio_sha256_ctx sha256_ctx = {
1189                 .buf = vh->sha256,
1190         };
1191
1192         fio_sha256_init(&sha256_ctx);
1193         fio_sha256_update(&sha256_ctx, p, len);
1194         fio_sha256_final(&sha256_ctx);
1195 }
1196
1197 static void fill_sha1(struct verify_header *hdr, void *p, unsigned int len)
1198 {
1199         struct vhdr_sha1 *vh = hdr_priv(hdr);
1200         struct fio_sha1_ctx sha1_ctx = {
1201                 .H = vh->sha1,
1202         };
1203
1204         fio_sha1_init(&sha1_ctx);
1205         fio_sha1_update(&sha1_ctx, p, len);
1206         fio_sha1_final(&sha1_ctx);
1207 }
1208
1209 static void fill_crc7(struct verify_header *hdr, void *p, unsigned int len)
1210 {
1211         struct vhdr_crc7 *vh = hdr_priv(hdr);
1212
1213         vh->crc7 = fio_crc7(p, len);
1214 }
1215
1216 static void fill_crc16(struct verify_header *hdr, void *p, unsigned int len)
1217 {
1218         struct vhdr_crc16 *vh = hdr_priv(hdr);
1219
1220         vh->crc16 = fio_crc16(p, len);
1221 }
1222
1223 static void fill_crc32(struct verify_header *hdr, void *p, unsigned int len)
1224 {
1225         struct vhdr_crc32 *vh = hdr_priv(hdr);
1226
1227         vh->crc32 = fio_crc32(p, len);
1228 }
1229
1230 static void fill_crc32c(struct verify_header *hdr, void *p, unsigned int len)
1231 {
1232         struct vhdr_crc32 *vh = hdr_priv(hdr);
1233
1234         vh->crc32 = fio_crc32c(p, len);
1235 }
1236
1237 static void fill_crc64(struct verify_header *hdr, void *p, unsigned int len)
1238 {
1239         struct vhdr_crc64 *vh = hdr_priv(hdr);
1240
1241         vh->crc64 = fio_crc64(p, len);
1242 }
1243
1244 static void fill_md5(struct verify_header *hdr, void *p, unsigned int len)
1245 {
1246         struct vhdr_md5 *vh = hdr_priv(hdr);
1247         struct fio_md5_ctx md5_ctx = {
1248                 .hash = (uint32_t *) vh->md5_digest,
1249         };
1250
1251         fio_md5_init(&md5_ctx);
1252         fio_md5_update(&md5_ctx, p, len);
1253         fio_md5_final(&md5_ctx);
1254 }
1255
1256 static void __fill_hdr(struct thread_data *td, struct io_u *io_u,
1257                        struct verify_header *hdr, unsigned int header_num,
1258                        unsigned int header_len, uint64_t rand_seed)
1259 {
1260         void *p = hdr;
1261
1262         hdr->magic = FIO_HDR_MAGIC;
1263         hdr->verify_type = td->o.verify;
1264         hdr->len = header_len;
1265         hdr->rand_seed = rand_seed;
1266         hdr->offset = io_u->verify_offset + header_num * td->o.verify_interval;
1267         hdr->time_sec = io_u->start_time.tv_sec;
1268         hdr->time_nsec = io_u->start_time.tv_nsec;
1269         hdr->thread = td->thread_number;
1270         hdr->numberio = io_u->numberio;
1271         hdr->crc32 = fio_crc32c(p, offsetof(struct verify_header, crc32));
1272 }
1273
1274
1275 static void fill_hdr(struct thread_data *td, struct io_u *io_u,
1276                      struct verify_header *hdr, unsigned int header_num,
1277                      unsigned int header_len, uint64_t rand_seed)
1278 {
1279         if (td->o.verify != VERIFY_PATTERN_NO_HDR)
1280                 __fill_hdr(td, io_u, hdr, header_num, header_len, rand_seed);
1281 }
1282
1283 static void populate_hdr(struct thread_data *td, struct io_u *io_u,
1284                          struct verify_header *hdr, unsigned int header_num,
1285                          unsigned int header_len)
1286 {
1287         unsigned int data_len;
1288         void *data;
1289         char *p;
1290
1291         p = (char *) hdr;
1292
1293         fill_hdr(td, io_u, hdr, header_num, header_len, io_u->rand_seed);
1294
1295         if (header_len <= hdr_size(td, hdr)) {
1296                 td_verror(td, EINVAL, "Blocksize too small");
1297                 return;
1298         }
1299         data_len = header_len - hdr_size(td, hdr);
1300
1301         data = p + hdr_size(td, hdr);
1302         switch (td->o.verify) {
1303         case VERIFY_MD5:
1304                 dprint(FD_VERIFY, "fill md5 io_u %p, len %u\n",
1305                                                 io_u, hdr->len);
1306                 fill_md5(hdr, data, data_len);
1307                 break;
1308         case VERIFY_CRC64:
1309                 dprint(FD_VERIFY, "fill crc64 io_u %p, len %u\n",
1310                                                 io_u, hdr->len);
1311                 fill_crc64(hdr, data, data_len);
1312                 break;
1313         case VERIFY_CRC32C:
1314         case VERIFY_CRC32C_INTEL:
1315                 dprint(FD_VERIFY, "fill crc32c io_u %p, len %u\n",
1316                                                 io_u, hdr->len);
1317                 fill_crc32c(hdr, data, data_len);
1318                 break;
1319         case VERIFY_CRC32:
1320                 dprint(FD_VERIFY, "fill crc32 io_u %p, len %u\n",
1321                                                 io_u, hdr->len);
1322                 fill_crc32(hdr, data, data_len);
1323                 break;
1324         case VERIFY_CRC16:
1325                 dprint(FD_VERIFY, "fill crc16 io_u %p, len %u\n",
1326                                                 io_u, hdr->len);
1327                 fill_crc16(hdr, data, data_len);
1328                 break;
1329         case VERIFY_CRC7:
1330                 dprint(FD_VERIFY, "fill crc7 io_u %p, len %u\n",
1331                                                 io_u, hdr->len);
1332                 fill_crc7(hdr, data, data_len);
1333                 break;
1334         case VERIFY_SHA256:
1335                 dprint(FD_VERIFY, "fill sha256 io_u %p, len %u\n",
1336                                                 io_u, hdr->len);
1337                 fill_sha256(hdr, data, data_len);
1338                 break;
1339         case VERIFY_SHA512:
1340                 dprint(FD_VERIFY, "fill sha512 io_u %p, len %u\n",
1341                                                 io_u, hdr->len);
1342                 fill_sha512(hdr, data, data_len);
1343                 break;
1344         case VERIFY_SHA3_224:
1345                 dprint(FD_VERIFY, "fill sha3-224 io_u %p, len %u\n",
1346                                                 io_u, hdr->len);
1347                 fill_sha3_224(hdr, data, data_len);
1348                 break;
1349         case VERIFY_SHA3_256:
1350                 dprint(FD_VERIFY, "fill sha3-256 io_u %p, len %u\n",
1351                                                 io_u, hdr->len);
1352                 fill_sha3_256(hdr, data, data_len);
1353                 break;
1354         case VERIFY_SHA3_384:
1355                 dprint(FD_VERIFY, "fill sha3-384 io_u %p, len %u\n",
1356                                                 io_u, hdr->len);
1357                 fill_sha3_384(hdr, data, data_len);
1358                 break;
1359         case VERIFY_SHA3_512:
1360                 dprint(FD_VERIFY, "fill sha3-512 io_u %p, len %u\n",
1361                                                 io_u, hdr->len);
1362                 fill_sha3_512(hdr, data, data_len);
1363                 break;
1364         case VERIFY_XXHASH:
1365                 dprint(FD_VERIFY, "fill xxhash io_u %p, len %u\n",
1366                                                 io_u, hdr->len);
1367                 fill_xxhash(hdr, data, data_len);
1368                 break;
1369         case VERIFY_SHA1:
1370                 dprint(FD_VERIFY, "fill sha1 io_u %p, len %u\n",
1371                                                 io_u, hdr->len);
1372                 fill_sha1(hdr, data, data_len);
1373                 break;
1374         case VERIFY_HDR_ONLY:
1375         case VERIFY_PATTERN:
1376         case VERIFY_PATTERN_NO_HDR:
1377                 /* nothing to do here */
1378                 break;
1379         default:
1380                 log_err("fio: bad verify type: %d\n", td->o.verify);
1381                 assert(0);
1382         }
1383
1384         if (td->o.verify_offset && hdr_size(td, hdr))
1385                 memswp(p, p + td->o.verify_offset, hdr_size(td, hdr));
1386 }
1387
1388 /*
1389  * fill body of io_u->buf with random data and add a header with the
1390  * checksum of choice
1391  */
1392 void populate_verify_io_u(struct thread_data *td, struct io_u *io_u)
1393 {
1394         if (td->o.verify == VERIFY_NULL)
1395                 return;
1396
1397         fill_pattern_headers(td, io_u, 0, 0);
1398 }
1399
1400 int get_next_verify(struct thread_data *td, struct io_u *io_u)
1401 {
1402         struct io_piece *ipo = NULL;
1403
1404         /*
1405          * this io_u is from a requeue, we already filled the offsets
1406          */
1407         if (io_u->file)
1408                 return 0;
1409
1410         if (!RB_EMPTY_ROOT(&td->io_hist_tree)) {
1411                 struct fio_rb_node *n = rb_first(&td->io_hist_tree);
1412
1413                 ipo = rb_entry(n, struct io_piece, rb_node);
1414
1415                 /*
1416                  * Ensure that the associated IO has completed
1417                  */
1418                 if (atomic_load_acquire(&ipo->flags) & IP_F_IN_FLIGHT)
1419                         goto nothing;
1420
1421                 rb_erase(n, &td->io_hist_tree);
1422                 assert(ipo->flags & IP_F_ONRB);
1423                 ipo->flags &= ~IP_F_ONRB;
1424         } else if (!flist_empty(&td->io_hist_list)) {
1425                 ipo = flist_first_entry(&td->io_hist_list, struct io_piece, list);
1426
1427                 /*
1428                  * Ensure that the associated IO has completed
1429                  */
1430                 if (atomic_load_acquire(&ipo->flags) & IP_F_IN_FLIGHT)
1431                         goto nothing;
1432
1433                 flist_del(&ipo->list);
1434                 assert(ipo->flags & IP_F_ONLIST);
1435                 ipo->flags &= ~IP_F_ONLIST;
1436         }
1437
1438         if (ipo) {
1439                 td->io_hist_len--;
1440
1441                 io_u->offset = ipo->offset;
1442                 io_u->verify_offset = ipo->offset;
1443                 io_u->buflen = ipo->len;
1444                 io_u->numberio = ipo->numberio;
1445                 io_u->file = ipo->file;
1446                 io_u_set(td, io_u, IO_U_F_VER_LIST);
1447
1448                 if (ipo->flags & IP_F_TRIMMED)
1449                         io_u_set(td, io_u, IO_U_F_TRIMMED);
1450
1451                 if (!fio_file_open(io_u->file)) {
1452                         int r = td_io_open_file(td, io_u->file);
1453
1454                         if (r) {
1455                                 dprint(FD_VERIFY, "failed file %s open\n",
1456                                                 io_u->file->file_name);
1457                                 return 1;
1458                         }
1459                 }
1460
1461                 get_file(ipo->file);
1462                 assert(fio_file_open(io_u->file));
1463                 io_u->ddir = DDIR_READ;
1464                 io_u->xfer_buf = io_u->buf;
1465                 io_u->xfer_buflen = io_u->buflen;
1466
1467                 remove_trim_entry(td, ipo);
1468                 free(ipo);
1469                 dprint(FD_VERIFY, "get_next_verify: ret io_u %p\n", io_u);
1470
1471                 if (!td->o.verify_pattern_bytes) {
1472                         io_u->rand_seed = __rand(&td->verify_state);
1473                         if (sizeof(int) != sizeof(long *))
1474                                 io_u->rand_seed *= __rand(&td->verify_state);
1475                 }
1476                 return 0;
1477         }
1478
1479 nothing:
1480         dprint(FD_VERIFY, "get_next_verify: empty\n");
1481         return 1;
1482 }
1483
1484 void fio_verify_init(struct thread_data *td)
1485 {
1486         if (td->o.verify == VERIFY_CRC32C_INTEL ||
1487             td->o.verify == VERIFY_CRC32C) {
1488                 crc32c_arm64_probe();
1489                 crc32c_intel_probe();
1490         }
1491 }
1492
1493 static void *verify_async_thread(void *data)
1494 {
1495         struct thread_data *td = data;
1496         struct io_u *io_u;
1497         int ret = 0;
1498
1499         if (fio_option_is_set(&td->o, verify_cpumask) &&
1500             fio_setaffinity(td->pid, td->o.verify_cpumask)) {
1501                 log_err("fio: failed setting verify thread affinity\n");
1502                 goto done;
1503         }
1504
1505         do {
1506                 FLIST_HEAD(list);
1507
1508                 read_barrier();
1509                 if (td->verify_thread_exit)
1510                         break;
1511
1512                 pthread_mutex_lock(&td->io_u_lock);
1513
1514                 while (flist_empty(&td->verify_list) &&
1515                        !td->verify_thread_exit) {
1516                         ret = pthread_cond_wait(&td->verify_cond,
1517                                                         &td->io_u_lock);
1518                         if (ret) {
1519                                 break;
1520                         }
1521                 }
1522
1523                 flist_splice_init(&td->verify_list, &list);
1524                 pthread_mutex_unlock(&td->io_u_lock);
1525
1526                 if (flist_empty(&list))
1527                         continue;
1528
1529                 while (!flist_empty(&list)) {
1530                         io_u = flist_first_entry(&list, struct io_u, verify_list);
1531                         flist_del_init(&io_u->verify_list);
1532
1533                         io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
1534                         ret = verify_io_u(td, &io_u);
1535
1536                         put_io_u(td, io_u);
1537                         if (!ret)
1538                                 continue;
1539                         if (td_non_fatal_error(td, ERROR_TYPE_VERIFY_BIT, ret)) {
1540                                 update_error_count(td, ret);
1541                                 td_clear_error(td);
1542                                 ret = 0;
1543                         }
1544                 }
1545         } while (!ret);
1546
1547         if (ret) {
1548                 td_verror(td, ret, "async_verify");
1549                 if (td->o.verify_fatal)
1550                         fio_mark_td_terminate(td);
1551         }
1552
1553 done:
1554         pthread_mutex_lock(&td->io_u_lock);
1555         td->nr_verify_threads--;
1556         pthread_cond_signal(&td->free_cond);
1557         pthread_mutex_unlock(&td->io_u_lock);
1558
1559         return NULL;
1560 }
1561
1562 int verify_async_init(struct thread_data *td)
1563 {
1564         int i, ret;
1565         pthread_attr_t attr;
1566
1567         pthread_attr_init(&attr);
1568         pthread_attr_setstacksize(&attr, 2 * PTHREAD_STACK_MIN);
1569
1570         td->verify_thread_exit = 0;
1571
1572         td->verify_threads = malloc(sizeof(pthread_t) * td->o.verify_async);
1573         for (i = 0; i < td->o.verify_async; i++) {
1574                 ret = pthread_create(&td->verify_threads[i], &attr,
1575                                         verify_async_thread, td);
1576                 if (ret) {
1577                         log_err("fio: async verify creation failed: %s\n",
1578                                         strerror(ret));
1579                         break;
1580                 }
1581                 ret = pthread_detach(td->verify_threads[i]);
1582                 if (ret) {
1583                         log_err("fio: async verify thread detach failed: %s\n",
1584                                         strerror(ret));
1585                         break;
1586                 }
1587                 td->nr_verify_threads++;
1588         }
1589
1590         pthread_attr_destroy(&attr);
1591
1592         if (i != td->o.verify_async) {
1593                 log_err("fio: only %d verify threads started, exiting\n", i);
1594
1595                 pthread_mutex_lock(&td->io_u_lock);
1596                 td->verify_thread_exit = 1;
1597                 pthread_cond_broadcast(&td->verify_cond);
1598                 pthread_mutex_unlock(&td->io_u_lock);
1599
1600                 return 1;
1601         }
1602
1603         return 0;
1604 }
1605
1606 void verify_async_exit(struct thread_data *td)
1607 {
1608         pthread_mutex_lock(&td->io_u_lock);
1609         td->verify_thread_exit = 1;
1610         pthread_cond_broadcast(&td->verify_cond);
1611
1612         while (td->nr_verify_threads)
1613                 pthread_cond_wait(&td->free_cond, &td->io_u_lock);
1614
1615         pthread_mutex_unlock(&td->io_u_lock);
1616         free(td->verify_threads);
1617         td->verify_threads = NULL;
1618 }
1619
1620 int paste_blockoff(char *buf, unsigned int len, void *priv)
1621 {
1622         struct io_u *io = priv;
1623         unsigned long long off;
1624
1625         typecheck(__typeof__(off), io->offset);
1626         off = cpu_to_le64((uint64_t)io->offset);
1627         len = min(len, (unsigned int)sizeof(off));
1628         memcpy(buf, &off, len);
1629         return 0;
1630 }
1631
1632 static int __fill_file_completions(struct thread_data *td,
1633                                    struct thread_io_list *s,
1634                                    struct fio_file *f, unsigned int *index)
1635 {
1636         unsigned int comps;
1637         int i, j;
1638
1639         if (!f->last_write_comp)
1640                 return 0;
1641
1642         if (td->io_blocks[DDIR_WRITE] < td->last_write_comp_depth)
1643                 comps = td->io_blocks[DDIR_WRITE];
1644         else
1645                 comps = td->last_write_comp_depth;
1646
1647         j = f->last_write_idx - 1;
1648         for (i = 0; i < comps; i++) {
1649                 if (j == -1)
1650                         j = td->last_write_comp_depth - 1;
1651                 s->comps[*index].fileno = __cpu_to_le64(f->fileno);
1652                 s->comps[*index].offset = cpu_to_le64(f->last_write_comp[j]);
1653                 (*index)++;
1654                 j--;
1655         }
1656
1657         return comps;
1658 }
1659
1660 static int fill_file_completions(struct thread_data *td,
1661                                  struct thread_io_list *s, unsigned int *index)
1662 {
1663         struct fio_file *f;
1664         unsigned int i;
1665         int comps = 0;
1666
1667         for_each_file(td, f, i)
1668                 comps += __fill_file_completions(td, s, f, index);
1669
1670         return comps;
1671 }
1672
1673 struct all_io_list *get_all_io_list(int save_mask, size_t *sz)
1674 {
1675         struct all_io_list *rep;
1676         size_t depth;
1677         void *next;
1678         int nr;
1679
1680         compiletime_assert(sizeof(struct all_io_list) == 8, "all_io_list");
1681
1682         /*
1683          * Calculate reply space needed. We need one 'io_state' per thread,
1684          * and the size will vary depending on depth.
1685          */
1686         depth = 0;
1687         nr = 0;
1688         for_each_td(td) {
1689                 if (save_mask != IO_LIST_ALL && (__td_index + 1) != save_mask)
1690                         continue;
1691                 td->stop_io = 1;
1692                 td->flags |= TD_F_VSTATE_SAVED;
1693                 depth += (td->last_write_comp_depth * td->o.nr_files);
1694                 nr++;
1695         } end_for_each();
1696
1697         if (!nr)
1698                 return NULL;
1699
1700         *sz = sizeof(*rep);
1701         *sz += nr * sizeof(struct thread_io_list);
1702         *sz += depth * sizeof(struct file_comp);
1703         rep = calloc(1, *sz);
1704
1705         rep->threads = cpu_to_le64((uint64_t) nr);
1706
1707         next = &rep->state[0];
1708         for_each_td(td) {
1709                 struct thread_io_list *s = next;
1710                 unsigned int comps, index = 0;
1711
1712                 if (save_mask != IO_LIST_ALL && (__td_index + 1) != save_mask)
1713                         continue;
1714
1715                 comps = fill_file_completions(td, s, &index);
1716
1717                 s->no_comps = cpu_to_le64((uint64_t) comps);
1718                 s->depth = cpu_to_le32((uint32_t) td->o.iodepth);
1719                 s->max_no_comps_per_file = cpu_to_le32((uint32_t) td->last_write_comp_depth);
1720                 s->nofiles = cpu_to_le32((uint32_t) td->o.nr_files);
1721                 s->numberio = cpu_to_le64((uint64_t) td->io_issues[DDIR_WRITE]);
1722                 s->index = cpu_to_le64((uint64_t) __td_index);
1723                 if (td->random_state.use64) {
1724                         s->rand.state64.s[0] = cpu_to_le64(td->random_state.state64.s1);
1725                         s->rand.state64.s[1] = cpu_to_le64(td->random_state.state64.s2);
1726                         s->rand.state64.s[2] = cpu_to_le64(td->random_state.state64.s3);
1727                         s->rand.state64.s[3] = cpu_to_le64(td->random_state.state64.s4);
1728                         s->rand.state64.s[4] = cpu_to_le64(td->random_state.state64.s5);
1729                         s->rand.state64.s[5] = 0;
1730                         s->rand.use64 = cpu_to_le64((uint64_t)1);
1731                 } else {
1732                         s->rand.state32.s[0] = cpu_to_le32(td->random_state.state32.s1);
1733                         s->rand.state32.s[1] = cpu_to_le32(td->random_state.state32.s2);
1734                         s->rand.state32.s[2] = cpu_to_le32(td->random_state.state32.s3);
1735                         s->rand.state32.s[3] = 0;
1736                         s->rand.use64 = 0;
1737                 }
1738                 snprintf((char *) s->name, sizeof(s->name), "%s", td->o.name);
1739                 next = io_list_next(s);
1740         } end_for_each();
1741
1742         return rep;
1743 }
1744
1745 static int open_state_file(const char *name, const char *prefix, int num,
1746                            int for_write)
1747 {
1748         char out[PATH_MAX];
1749         int flags;
1750         int fd;
1751
1752         if (for_write)
1753                 flags = O_CREAT | O_TRUNC | O_WRONLY | O_SYNC;
1754         else
1755                 flags = O_RDONLY;
1756
1757 #ifdef _WIN32
1758         flags |= O_BINARY;
1759 #endif
1760
1761         verify_state_gen_name(out, sizeof(out), name, prefix, num);
1762
1763         fd = open(out, flags, 0644);
1764         if (fd == -1) {
1765                 perror("fio: open state file");
1766                 log_err("fio: state file: %s (for_write=%d)\n", out, for_write);
1767                 return -1;
1768         }
1769
1770         return fd;
1771 }
1772
1773 static int write_thread_list_state(struct thread_io_list *s,
1774                                    const char *prefix)
1775 {
1776         struct verify_state_hdr hdr;
1777         uint64_t crc;
1778         ssize_t ret;
1779         int fd;
1780
1781         fd = open_state_file((const char *) s->name, prefix, s->index, 1);
1782         if (fd == -1)
1783                 return 1;
1784
1785         crc = fio_crc32c((void *)s, thread_io_list_sz(s));
1786
1787         hdr.version = cpu_to_le64((uint64_t) VSTATE_HDR_VERSION);
1788         hdr.size = cpu_to_le64((uint64_t) thread_io_list_sz(s));
1789         hdr.crc = cpu_to_le64(crc);
1790         ret = write(fd, &hdr, sizeof(hdr));
1791         if (ret != sizeof(hdr))
1792                 goto write_fail;
1793
1794         ret = write(fd, s, thread_io_list_sz(s));
1795         if (ret != thread_io_list_sz(s)) {
1796 write_fail:
1797                 if (ret < 0)
1798                         perror("fio: write state file");
1799                 log_err("fio: failed to write state file\n");
1800                 ret = 1;
1801         } else
1802                 ret = 0;
1803
1804         close(fd);
1805         return ret;
1806 }
1807
1808 void __verify_save_state(struct all_io_list *state, const char *prefix)
1809 {
1810         struct thread_io_list *s = &state->state[0];
1811         unsigned int i;
1812
1813         for (i = 0; i < le64_to_cpu(state->threads); i++) {
1814                 write_thread_list_state(s,  prefix);
1815                 s = io_list_next(s);
1816         }
1817 }
1818
1819 void verify_save_state(int mask)
1820 {
1821         struct all_io_list *state;
1822         size_t sz;
1823
1824         state = get_all_io_list(mask, &sz);
1825         if (state) {
1826                 char prefix[PATH_MAX];
1827
1828                 if (aux_path)
1829                         sprintf(prefix, "%s%clocal", aux_path, FIO_OS_PATH_SEPARATOR);
1830                 else
1831                         strcpy(prefix, "local");
1832
1833                 __verify_save_state(state, prefix);
1834                 free(state);
1835         }
1836 }
1837
1838 void verify_free_state(struct thread_data *td)
1839 {
1840         if (td->vstate)
1841                 free(td->vstate);
1842 }
1843
1844 void verify_assign_state(struct thread_data *td, void *p)
1845 {
1846         struct thread_io_list *s = p;
1847         int i;
1848
1849         s->no_comps = le64_to_cpu(s->no_comps);
1850         s->depth = le32_to_cpu(s->depth);
1851         s->max_no_comps_per_file = le32_to_cpu(s->max_no_comps_per_file);
1852         s->nofiles = le32_to_cpu(s->nofiles);
1853         s->numberio = le64_to_cpu(s->numberio);
1854         s->rand.use64 = le64_to_cpu(s->rand.use64);
1855
1856         if (s->rand.use64) {
1857                 for (i = 0; i < 6; i++)
1858                         s->rand.state64.s[i] = le64_to_cpu(s->rand.state64.s[i]);
1859         } else {
1860                 for (i = 0; i < 4; i++)
1861                         s->rand.state32.s[i] = le32_to_cpu(s->rand.state32.s[i]);
1862         }
1863
1864         for (i = 0; i < s->no_comps; i++) {
1865                 s->comps[i].fileno = le64_to_cpu(s->comps[i].fileno);
1866                 s->comps[i].offset = le64_to_cpu(s->comps[i].offset);
1867         }
1868
1869         td->vstate = p;
1870 }
1871
1872 int verify_state_hdr(struct verify_state_hdr *hdr, struct thread_io_list *s)
1873 {
1874         uint64_t crc;
1875
1876         hdr->version = le64_to_cpu(hdr->version);
1877         hdr->size = le64_to_cpu(hdr->size);
1878         hdr->crc = le64_to_cpu(hdr->crc);
1879
1880         if (hdr->version != VSTATE_HDR_VERSION)
1881                 return 1;
1882
1883         crc = fio_crc32c((void *)s, hdr->size);
1884         if (crc != hdr->crc)
1885                 return 1;
1886
1887         return 0;
1888 }
1889
1890 int verify_load_state(struct thread_data *td, const char *prefix)
1891 {
1892         struct verify_state_hdr hdr;
1893         void *s = NULL;
1894         uint64_t crc;
1895         ssize_t ret;
1896         int fd;
1897
1898         if (!td->o.verify_state)
1899                 return 0;
1900
1901         fd = open_state_file(td->o.name, prefix, td->thread_number - 1, 0);
1902         if (fd == -1)
1903                 return 1;
1904
1905         ret = read(fd, &hdr, sizeof(hdr));
1906         if (ret != sizeof(hdr)) {
1907                 if (ret < 0)
1908                         td_verror(td, errno, "read verify state hdr");
1909                 log_err("fio: failed reading verify state header\n");
1910                 goto err;
1911         }
1912
1913         hdr.version = le64_to_cpu(hdr.version);
1914         hdr.size = le64_to_cpu(hdr.size);
1915         hdr.crc = le64_to_cpu(hdr.crc);
1916
1917         if (hdr.version != VSTATE_HDR_VERSION) {
1918                 log_err("fio: unsupported (%d) version in verify state header\n",
1919                                 (unsigned int) hdr.version);
1920                 goto err;
1921         }
1922
1923         s = malloc(hdr.size);
1924         ret = read(fd, s, hdr.size);
1925         if (ret != hdr.size) {
1926                 if (ret < 0)
1927                         td_verror(td, errno, "read verify state");
1928                 log_err("fio: failed reading verity state\n");
1929                 goto err;
1930         }
1931
1932         crc = fio_crc32c(s, hdr.size);
1933         if (crc != hdr.crc) {
1934                 log_err("fio: verify state is corrupt\n");
1935                 goto err;
1936         }
1937
1938         close(fd);
1939
1940         verify_assign_state(td, s);
1941         return 0;
1942 err:
1943         if (s)
1944                 free(s);
1945         close(fd);
1946         return 1;
1947 }
1948
1949 /*
1950  * Use the loaded verify state to know when to stop doing verification
1951  */
1952 int verify_state_should_stop(struct thread_data *td, struct io_u *io_u)
1953 {
1954         struct thread_io_list *s = td->vstate;
1955         struct fio_file *f = io_u->file;
1956         int i;
1957
1958         if (!s || !f)
1959                 return 0;
1960
1961         /*
1962          * If we're not into the window of issues - depth yet, continue. If
1963          * issue is shorter than depth, do check.
1964          */
1965         if ((td->io_blocks[DDIR_READ] < s->depth ||
1966             s->numberio - td->io_blocks[DDIR_READ] > s->depth) &&
1967             s->numberio > s->depth)
1968                 return 0;
1969
1970         /*
1971          * We're in the window of having to check if this io was
1972          * completed or not. If the IO was seen as completed, then
1973          * lets verify it.
1974          */
1975         for (i = 0; i < s->no_comps; i++) {
1976                 if (s->comps[i].fileno != f->fileno)
1977                         continue;
1978                 if (io_u->verify_offset == s->comps[i].offset)
1979                         return 0;
1980         }
1981
1982         /*
1983          * Not found, we have to stop
1984          */
1985         log_info("Stop verify because offset %llu in %s is not recorded in verify state\n",
1986                  io_u->verify_offset, f->file_name);
1987         return 1;
1988 }