verify: fix potential overflow before widen
[fio.git] / verify.c
1 /*
2  * IO verification helpers
3  */
4 #include <unistd.h>
5 #include <fcntl.h>
6 #include <string.h>
7 #include <assert.h>
8 #include <pthread.h>
9 #include <libgen.h>
10
11 #include "arch/arch.h"
12 #include "fio.h"
13 #include "verify.h"
14 #include "trim.h"
15 #include "lib/rand.h"
16 #include "lib/hweight.h"
17 #include "lib/pattern.h"
18 #include "oslib/asprintf.h"
19
20 #include "crc/md5.h"
21 #include "crc/crc64.h"
22 #include "crc/crc32.h"
23 #include "crc/crc32c.h"
24 #include "crc/crc16.h"
25 #include "crc/crc7.h"
26 #include "crc/sha256.h"
27 #include "crc/sha512.h"
28 #include "crc/sha1.h"
29 #include "crc/xxhash.h"
30 #include "crc/sha3.h"
31
32 static void populate_hdr(struct thread_data *td, struct io_u *io_u,
33                          struct verify_header *hdr, unsigned int header_num,
34                          unsigned int header_len);
35 static void __fill_hdr(struct thread_data *td, struct io_u *io_u,
36                        struct verify_header *hdr, unsigned int header_num,
37                        unsigned int header_len, uint64_t rand_seed);
38
39 void fill_buffer_pattern(struct thread_data *td, void *p, unsigned int len)
40 {
41         (void)cpy_pattern(td->o.buffer_pattern, td->o.buffer_pattern_bytes, p, len);
42 }
43
44 static void __fill_buffer(struct thread_options *o, uint64_t seed, void *p,
45                           unsigned int len)
46 {
47         __fill_random_buf_percentage(seed, p, o->compress_percentage, len, len, o->buffer_pattern, o->buffer_pattern_bytes);
48 }
49
50 void fill_verify_pattern(struct thread_data *td, void *p, unsigned int len,
51                          struct io_u *io_u, uint64_t seed, int use_seed)
52 {
53         struct thread_options *o = &td->o;
54
55         if (!o->verify_pattern_bytes) {
56                 dprint(FD_VERIFY, "fill random bytes len=%u\n", len);
57
58                 if (!use_seed) {
59                         seed = __rand(&td->verify_state);
60                         if (sizeof(int) != sizeof(long *))
61                                 seed *= (unsigned long)__rand(&td->verify_state);
62                 }
63                 io_u->rand_seed = seed;
64                 __fill_buffer(o, seed, p, len);
65                 return;
66         }
67
68         /* Skip if we were here and we do not need to patch pattern
69          * with format */
70         if (!td->o.verify_fmt_sz && io_u->buf_filled_len >= len) {
71                 dprint(FD_VERIFY, "using already filled verify pattern b=%d len=%u\n",
72                         o->verify_pattern_bytes, len);
73                 return;
74         }
75
76         (void)paste_format(td->o.verify_pattern, td->o.verify_pattern_bytes,
77                            td->o.verify_fmt, td->o.verify_fmt_sz,
78                            p, len, io_u);
79         io_u->buf_filled_len = len;
80 }
81
82 static unsigned int get_hdr_inc(struct thread_data *td, struct io_u *io_u)
83 {
84         unsigned int hdr_inc;
85
86         /*
87          * If we use bs_unaligned, buflen can be larger than the verify
88          * interval (which just defaults to the smallest blocksize possible).
89          */
90         hdr_inc = io_u->buflen;
91         if (td->o.verify_interval && td->o.verify_interval <= io_u->buflen &&
92             !td->o.bs_unaligned)
93                 hdr_inc = td->o.verify_interval;
94
95         return hdr_inc;
96 }
97
98 static void fill_pattern_headers(struct thread_data *td, struct io_u *io_u,
99                                  uint64_t seed, int use_seed)
100 {
101         unsigned int hdr_inc, header_num;
102         struct verify_header *hdr;
103         void *p = io_u->buf;
104
105         fill_verify_pattern(td, p, io_u->buflen, io_u, seed, use_seed);
106
107         hdr_inc = get_hdr_inc(td, io_u);
108         header_num = 0;
109         for (; p < io_u->buf + io_u->buflen; p += hdr_inc) {
110                 hdr = p;
111                 populate_hdr(td, io_u, hdr, header_num, hdr_inc);
112                 header_num++;
113         }
114 }
115
116 static void memswp(void *buf1, void *buf2, unsigned int len)
117 {
118         char swap[200];
119
120         assert(len <= sizeof(swap));
121
122         memcpy(&swap, buf1, len);
123         memcpy(buf1, buf2, len);
124         memcpy(buf2, &swap, len);
125 }
126
127 static void hexdump(void *buffer, int len)
128 {
129         unsigned char *p = buffer;
130         int i;
131
132         for (i = 0; i < len; i++)
133                 log_err("%02x", p[i]);
134         log_err("\n");
135 }
136
137 /*
138  * Prepare for separation of verify_header and checksum header
139  */
140 static inline unsigned int __hdr_size(int verify_type)
141 {
142         unsigned int len = 0;
143
144         switch (verify_type) {
145         case VERIFY_NONE:
146         case VERIFY_HDR_ONLY:
147         case VERIFY_NULL:
148         case VERIFY_PATTERN:
149                 len = 0;
150                 break;
151         case VERIFY_MD5:
152                 len = sizeof(struct vhdr_md5);
153                 break;
154         case VERIFY_CRC64:
155                 len = sizeof(struct vhdr_crc64);
156                 break;
157         case VERIFY_CRC32C:
158         case VERIFY_CRC32:
159         case VERIFY_CRC32C_INTEL:
160                 len = sizeof(struct vhdr_crc32);
161                 break;
162         case VERIFY_CRC16:
163                 len = sizeof(struct vhdr_crc16);
164                 break;
165         case VERIFY_CRC7:
166                 len = sizeof(struct vhdr_crc7);
167                 break;
168         case VERIFY_SHA256:
169                 len = sizeof(struct vhdr_sha256);
170                 break;
171         case VERIFY_SHA512:
172                 len = sizeof(struct vhdr_sha512);
173                 break;
174         case VERIFY_SHA3_224:
175                 len = sizeof(struct vhdr_sha3_224);
176                 break;
177         case VERIFY_SHA3_256:
178                 len = sizeof(struct vhdr_sha3_256);
179                 break;
180         case VERIFY_SHA3_384:
181                 len = sizeof(struct vhdr_sha3_384);
182                 break;
183         case VERIFY_SHA3_512:
184                 len = sizeof(struct vhdr_sha3_512);
185                 break;
186         case VERIFY_XXHASH:
187                 len = sizeof(struct vhdr_xxhash);
188                 break;
189         case VERIFY_SHA1:
190                 len = sizeof(struct vhdr_sha1);
191                 break;
192         case VERIFY_PATTERN_NO_HDR:
193                 return 0;
194         default:
195                 log_err("fio: unknown verify header!\n");
196                 assert(0);
197         }
198
199         return len + sizeof(struct verify_header);
200 }
201
202 static inline unsigned int hdr_size(struct thread_data *td,
203                                     struct verify_header *hdr)
204 {
205         if (td->o.verify == VERIFY_PATTERN_NO_HDR)
206                 return 0;
207
208         return __hdr_size(hdr->verify_type);
209 }
210
211 static void *hdr_priv(struct verify_header *hdr)
212 {
213         void *priv = hdr;
214
215         return priv + sizeof(struct verify_header);
216 }
217
218 /*
219  * Verify container, pass info to verify handlers and allow them to
220  * pass info back in case of error
221  */
222 struct vcont {
223         /*
224          * Input
225          */
226         struct io_u *io_u;
227         unsigned int hdr_num;
228         struct thread_data *td;
229
230         /*
231          * Output, only valid in case of error
232          */
233         const char *name;
234         void *good_crc;
235         void *bad_crc;
236         unsigned int crc_len;
237 };
238
239 #define DUMP_BUF_SZ     255
240
241 static void dump_buf(char *buf, unsigned int len, unsigned long long offset,
242                      const char *type, struct fio_file *f)
243 {
244         char *ptr, *fname;
245         char sep[2] = { FIO_OS_PATH_SEPARATOR, 0 };
246         int ret, fd;
247
248         ptr = strdup(f->file_name);
249
250         if (asprintf(&fname, "%s%s%s.%llu.%s", aux_path ? : "",
251                      aux_path ? sep : "", basename(ptr), offset, type) < 0) {
252                 if (!fio_did_warn(FIO_WARN_VERIFY_BUF))
253                         log_err("fio: not enough memory for dump buffer filename\n");
254                 goto free_ptr;
255         }
256
257         fd = open(fname, O_CREAT | O_TRUNC | O_WRONLY, 0644);
258         if (fd < 0) {
259                 perror("open verify buf file");
260                 goto free_fname;
261         }
262
263         while (len) {
264                 ret = write(fd, buf, len);
265                 if (!ret)
266                         break;
267                 else if (ret < 0) {
268                         perror("write verify buf file");
269                         break;
270                 }
271                 len -= ret;
272                 buf += ret;
273         }
274
275         close(fd);
276         log_err("       %s data dumped as %s\n", type, fname);
277
278 free_fname:
279         free(fname);
280
281 free_ptr:
282         free(ptr);
283 }
284
285 /*
286  * Dump the contents of the read block and re-generate the correct data
287  * and dump that too.
288  */
289 static void __dump_verify_buffers(struct verify_header *hdr, struct vcont *vc)
290 {
291         struct thread_data *td = vc->td;
292         struct io_u *io_u = vc->io_u;
293         unsigned long hdr_offset;
294         struct io_u dummy;
295         void *buf;
296
297         if (!td->o.verify_dump)
298                 return;
299
300         /*
301          * Dump the contents we just read off disk
302          */
303         hdr_offset = vc->hdr_num * hdr->len;
304
305         dump_buf(io_u->buf + hdr_offset, hdr->len, io_u->verify_offset + hdr_offset,
306                         "received", vc->io_u->file);
307
308         /*
309          * Allocate a new buf and re-generate the original data
310          */
311         buf = malloc(io_u->buflen);
312         dummy = *io_u;
313         dummy.buf = buf;
314         dummy.rand_seed = hdr->rand_seed;
315         dummy.buf_filled_len = 0;
316         dummy.buflen = io_u->buflen;
317
318         fill_pattern_headers(td, &dummy, hdr->rand_seed, 1);
319
320         dump_buf(buf + hdr_offset, hdr->len, io_u->verify_offset + hdr_offset,
321                         "expected", vc->io_u->file);
322         free(buf);
323 }
324
325 static void dump_verify_buffers(struct verify_header *hdr, struct vcont *vc)
326 {
327         struct thread_data *td = vc->td;
328         struct verify_header shdr;
329
330         if (td->o.verify == VERIFY_PATTERN_NO_HDR) {
331                 __fill_hdr(td, vc->io_u, &shdr, 0, vc->io_u->buflen, 0);
332                 hdr = &shdr;
333         }
334
335         __dump_verify_buffers(hdr, vc);
336 }
337
338 static void log_verify_failure(struct verify_header *hdr, struct vcont *vc)
339 {
340         unsigned long long offset;
341         uint32_t len;
342         struct thread_data *td = vc->td;
343
344         offset = vc->io_u->verify_offset;
345         if (td->o.verify != VERIFY_PATTERN_NO_HDR) {
346                 len = hdr->len;
347                 offset += (unsigned long long) vc->hdr_num * len;
348         } else {
349                 len = vc->io_u->buflen;
350         }
351
352         log_err("%.8s: verify failed at file %s offset %llu, length %u"
353                         " (requested block: offset=%llu, length=%llu, flags=%x)\n",
354                         vc->name, vc->io_u->file->file_name, offset, len,
355                         vc->io_u->verify_offset, vc->io_u->buflen, vc->io_u->flags);
356
357         if (vc->good_crc && vc->bad_crc) {
358                 log_err("       Expected CRC: ");
359                 hexdump(vc->good_crc, vc->crc_len);
360                 log_err("       Received CRC: ");
361                 hexdump(vc->bad_crc, vc->crc_len);
362         }
363
364         dump_verify_buffers(hdr, vc);
365 }
366
367 /*
368  * Return data area 'header_num'
369  */
370 static inline void *io_u_verify_off(struct verify_header *hdr, struct vcont *vc)
371 {
372         return vc->io_u->buf + vc->hdr_num * hdr->len + hdr_size(vc->td, hdr);
373 }
374
375 static int verify_io_u_pattern(struct verify_header *hdr, struct vcont *vc)
376 {
377         struct thread_data *td = vc->td;
378         struct io_u *io_u = vc->io_u;
379         char *buf, *pattern;
380         unsigned int header_size = __hdr_size(td->o.verify);
381         unsigned int len, mod, i, pattern_size;
382         int rc;
383
384         pattern = td->o.verify_pattern;
385         pattern_size = td->o.verify_pattern_bytes;
386         assert(pattern_size != 0);
387
388         (void)paste_format_inplace(pattern, pattern_size,
389                                    td->o.verify_fmt, td->o.verify_fmt_sz, io_u);
390
391         buf = (char *) hdr + header_size;
392         len = get_hdr_inc(td, io_u) - header_size;
393         mod = (get_hdr_inc(td, io_u) * vc->hdr_num + header_size) % pattern_size;
394
395         rc = cmp_pattern(pattern, pattern_size, mod, buf, len);
396         if (!rc)
397                 return 0;
398
399         /* Slow path, compare each byte */
400         for (i = 0; i < len; i++) {
401                 if (buf[i] != pattern[mod]) {
402                         unsigned int bits;
403
404                         bits = hweight8(buf[i] ^ pattern[mod]);
405                         log_err("fio: got pattern '%02x', wanted '%02x'. Bad bits %d\n",
406                                 (unsigned char)buf[i],
407                                 (unsigned char)pattern[mod],
408                                 bits);
409                         log_err("fio: bad pattern block offset %u\n",
410                                 i + header_size);
411                         vc->name = "pattern";
412                         log_verify_failure(hdr, vc);
413                         return EILSEQ;
414                 }
415                 mod++;
416                 if (mod == td->o.verify_pattern_bytes)
417                         mod = 0;
418         }
419
420         /* Unreachable line */
421         assert(0);
422         return EILSEQ;
423 }
424
425 static int verify_io_u_xxhash(struct verify_header *hdr, struct vcont *vc)
426 {
427         void *p = io_u_verify_off(hdr, vc);
428         struct vhdr_xxhash *vh = hdr_priv(hdr);
429         uint32_t hash;
430         void *state;
431
432         dprint(FD_VERIFY, "xxhash verify io_u %p, len %u\n", vc->io_u, hdr->len);
433
434         state = XXH32_init(1);
435         XXH32_update(state, p, hdr->len - hdr_size(vc->td, hdr));
436         hash = XXH32_digest(state);
437
438         if (vh->hash == hash)
439                 return 0;
440
441         vc->name = "xxhash";
442         vc->good_crc = &vh->hash;
443         vc->bad_crc = &hash;
444         vc->crc_len = sizeof(hash);
445         log_verify_failure(hdr, vc);
446         return EILSEQ;
447 }
448
449 static int verify_io_u_sha3(struct verify_header *hdr, struct vcont *vc,
450                             struct fio_sha3_ctx *sha3_ctx, uint8_t *sha,
451                             unsigned int sha_size, const char *name)
452 {
453         void *p = io_u_verify_off(hdr, vc);
454
455         dprint(FD_VERIFY, "%s verify io_u %p, len %u\n", name, vc->io_u, hdr->len);
456
457         fio_sha3_update(sha3_ctx, p, hdr->len - hdr_size(vc->td, hdr));
458         fio_sha3_final(sha3_ctx);
459
460         if (!memcmp(sha, sha3_ctx->sha, sha_size))
461                 return 0;
462
463         vc->name = name;
464         vc->good_crc = sha;
465         vc->bad_crc = sha3_ctx->sha;
466         vc->crc_len = sha_size;
467         log_verify_failure(hdr, vc);
468         return EILSEQ;
469 }
470
471 static int verify_io_u_sha3_224(struct verify_header *hdr, struct vcont *vc)
472 {
473         struct vhdr_sha3_224 *vh = hdr_priv(hdr);
474         uint8_t sha[SHA3_224_DIGEST_SIZE];
475         struct fio_sha3_ctx sha3_ctx = {
476                 .sha = sha,
477         };
478
479         fio_sha3_224_init(&sha3_ctx);
480
481         return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
482                                 SHA3_224_DIGEST_SIZE, "sha3-224");
483 }
484
485 static int verify_io_u_sha3_256(struct verify_header *hdr, struct vcont *vc)
486 {
487         struct vhdr_sha3_256 *vh = hdr_priv(hdr);
488         uint8_t sha[SHA3_256_DIGEST_SIZE];
489         struct fio_sha3_ctx sha3_ctx = {
490                 .sha = sha,
491         };
492
493         fio_sha3_256_init(&sha3_ctx);
494
495         return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
496                                 SHA3_256_DIGEST_SIZE, "sha3-256");
497 }
498
499 static int verify_io_u_sha3_384(struct verify_header *hdr, struct vcont *vc)
500 {
501         struct vhdr_sha3_384 *vh = hdr_priv(hdr);
502         uint8_t sha[SHA3_384_DIGEST_SIZE];
503         struct fio_sha3_ctx sha3_ctx = {
504                 .sha = sha,
505         };
506
507         fio_sha3_384_init(&sha3_ctx);
508
509         return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
510                                 SHA3_384_DIGEST_SIZE, "sha3-384");
511 }
512
513 static int verify_io_u_sha3_512(struct verify_header *hdr, struct vcont *vc)
514 {
515         struct vhdr_sha3_512 *vh = hdr_priv(hdr);
516         uint8_t sha[SHA3_512_DIGEST_SIZE];
517         struct fio_sha3_ctx sha3_ctx = {
518                 .sha = sha,
519         };
520
521         fio_sha3_512_init(&sha3_ctx);
522
523         return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
524                                 SHA3_512_DIGEST_SIZE, "sha3-512");
525 }
526
527 static int verify_io_u_sha512(struct verify_header *hdr, struct vcont *vc)
528 {
529         void *p = io_u_verify_off(hdr, vc);
530         struct vhdr_sha512 *vh = hdr_priv(hdr);
531         uint8_t sha512[128];
532         struct fio_sha512_ctx sha512_ctx = {
533                 .buf = sha512,
534         };
535
536         dprint(FD_VERIFY, "sha512 verify io_u %p, len %u\n", vc->io_u, hdr->len);
537
538         fio_sha512_init(&sha512_ctx);
539         fio_sha512_update(&sha512_ctx, p, hdr->len - hdr_size(vc->td, hdr));
540
541         if (!memcmp(vh->sha512, sha512_ctx.buf, sizeof(sha512)))
542                 return 0;
543
544         vc->name = "sha512";
545         vc->good_crc = vh->sha512;
546         vc->bad_crc = sha512_ctx.buf;
547         vc->crc_len = sizeof(vh->sha512);
548         log_verify_failure(hdr, vc);
549         return EILSEQ;
550 }
551
552 static int verify_io_u_sha256(struct verify_header *hdr, struct vcont *vc)
553 {
554         void *p = io_u_verify_off(hdr, vc);
555         struct vhdr_sha256 *vh = hdr_priv(hdr);
556         uint8_t sha256[64];
557         struct fio_sha256_ctx sha256_ctx = {
558                 .buf = sha256,
559         };
560
561         dprint(FD_VERIFY, "sha256 verify io_u %p, len %u\n", vc->io_u, hdr->len);
562
563         fio_sha256_init(&sha256_ctx);
564         fio_sha256_update(&sha256_ctx, p, hdr->len - hdr_size(vc->td, hdr));
565         fio_sha256_final(&sha256_ctx);
566
567         if (!memcmp(vh->sha256, sha256_ctx.buf, sizeof(sha256)))
568                 return 0;
569
570         vc->name = "sha256";
571         vc->good_crc = vh->sha256;
572         vc->bad_crc = sha256_ctx.buf;
573         vc->crc_len = sizeof(vh->sha256);
574         log_verify_failure(hdr, vc);
575         return EILSEQ;
576 }
577
578 static int verify_io_u_sha1(struct verify_header *hdr, struct vcont *vc)
579 {
580         void *p = io_u_verify_off(hdr, vc);
581         struct vhdr_sha1 *vh = hdr_priv(hdr);
582         uint32_t sha1[5];
583         struct fio_sha1_ctx sha1_ctx = {
584                 .H = sha1,
585         };
586
587         dprint(FD_VERIFY, "sha1 verify io_u %p, len %u\n", vc->io_u, hdr->len);
588
589         fio_sha1_init(&sha1_ctx);
590         fio_sha1_update(&sha1_ctx, p, hdr->len - hdr_size(vc->td, hdr));
591         fio_sha1_final(&sha1_ctx);
592
593         if (!memcmp(vh->sha1, sha1_ctx.H, sizeof(sha1)))
594                 return 0;
595
596         vc->name = "sha1";
597         vc->good_crc = vh->sha1;
598         vc->bad_crc = sha1_ctx.H;
599         vc->crc_len = sizeof(vh->sha1);
600         log_verify_failure(hdr, vc);
601         return EILSEQ;
602 }
603
604 static int verify_io_u_crc7(struct verify_header *hdr, struct vcont *vc)
605 {
606         void *p = io_u_verify_off(hdr, vc);
607         struct vhdr_crc7 *vh = hdr_priv(hdr);
608         unsigned char c;
609
610         dprint(FD_VERIFY, "crc7 verify io_u %p, len %u\n", vc->io_u, hdr->len);
611
612         c = fio_crc7(p, hdr->len - hdr_size(vc->td, hdr));
613
614         if (c == vh->crc7)
615                 return 0;
616
617         vc->name = "crc7";
618         vc->good_crc = &vh->crc7;
619         vc->bad_crc = &c;
620         vc->crc_len = 1;
621         log_verify_failure(hdr, vc);
622         return EILSEQ;
623 }
624
625 static int verify_io_u_crc16(struct verify_header *hdr, struct vcont *vc)
626 {
627         void *p = io_u_verify_off(hdr, vc);
628         struct vhdr_crc16 *vh = hdr_priv(hdr);
629         unsigned short c;
630
631         dprint(FD_VERIFY, "crc16 verify io_u %p, len %u\n", vc->io_u, hdr->len);
632
633         c = fio_crc16(p, hdr->len - hdr_size(vc->td, hdr));
634
635         if (c == vh->crc16)
636                 return 0;
637
638         vc->name = "crc16";
639         vc->good_crc = &vh->crc16;
640         vc->bad_crc = &c;
641         vc->crc_len = 2;
642         log_verify_failure(hdr, vc);
643         return EILSEQ;
644 }
645
646 static int verify_io_u_crc64(struct verify_header *hdr, struct vcont *vc)
647 {
648         void *p = io_u_verify_off(hdr, vc);
649         struct vhdr_crc64 *vh = hdr_priv(hdr);
650         unsigned long long c;
651
652         dprint(FD_VERIFY, "crc64 verify io_u %p, len %u\n", vc->io_u, hdr->len);
653
654         c = fio_crc64(p, hdr->len - hdr_size(vc->td, hdr));
655
656         if (c == vh->crc64)
657                 return 0;
658
659         vc->name = "crc64";
660         vc->good_crc = &vh->crc64;
661         vc->bad_crc = &c;
662         vc->crc_len = 8;
663         log_verify_failure(hdr, vc);
664         return EILSEQ;
665 }
666
667 static int verify_io_u_crc32(struct verify_header *hdr, struct vcont *vc)
668 {
669         void *p = io_u_verify_off(hdr, vc);
670         struct vhdr_crc32 *vh = hdr_priv(hdr);
671         uint32_t c;
672
673         dprint(FD_VERIFY, "crc32 verify io_u %p, len %u\n", vc->io_u, hdr->len);
674
675         c = fio_crc32(p, hdr->len - hdr_size(vc->td, hdr));
676
677         if (c == vh->crc32)
678                 return 0;
679
680         vc->name = "crc32";
681         vc->good_crc = &vh->crc32;
682         vc->bad_crc = &c;
683         vc->crc_len = 4;
684         log_verify_failure(hdr, vc);
685         return EILSEQ;
686 }
687
688 static int verify_io_u_crc32c(struct verify_header *hdr, struct vcont *vc)
689 {
690         void *p = io_u_verify_off(hdr, vc);
691         struct vhdr_crc32 *vh = hdr_priv(hdr);
692         uint32_t c;
693
694         dprint(FD_VERIFY, "crc32c verify io_u %p, len %u\n", vc->io_u, hdr->len);
695
696         c = fio_crc32c(p, hdr->len - hdr_size(vc->td, hdr));
697
698         if (c == vh->crc32)
699                 return 0;
700
701         vc->name = "crc32c";
702         vc->good_crc = &vh->crc32;
703         vc->bad_crc = &c;
704         vc->crc_len = 4;
705         log_verify_failure(hdr, vc);
706         return EILSEQ;
707 }
708
709 static int verify_io_u_md5(struct verify_header *hdr, struct vcont *vc)
710 {
711         void *p = io_u_verify_off(hdr, vc);
712         struct vhdr_md5 *vh = hdr_priv(hdr);
713         uint32_t hash[MD5_HASH_WORDS];
714         struct fio_md5_ctx md5_ctx = {
715                 .hash = hash,
716         };
717
718         dprint(FD_VERIFY, "md5 verify io_u %p, len %u\n", vc->io_u, hdr->len);
719
720         fio_md5_init(&md5_ctx);
721         fio_md5_update(&md5_ctx, p, hdr->len - hdr_size(vc->td, hdr));
722         fio_md5_final(&md5_ctx);
723
724         if (!memcmp(vh->md5_digest, md5_ctx.hash, sizeof(hash)))
725                 return 0;
726
727         vc->name = "md5";
728         vc->good_crc = vh->md5_digest;
729         vc->bad_crc = md5_ctx.hash;
730         vc->crc_len = sizeof(hash);
731         log_verify_failure(hdr, vc);
732         return EILSEQ;
733 }
734
735 /*
736  * Push IO verification to a separate thread
737  */
738 int verify_io_u_async(struct thread_data *td, struct io_u **io_u_ptr)
739 {
740         struct io_u *io_u = *io_u_ptr;
741
742         pthread_mutex_lock(&td->io_u_lock);
743
744         if (io_u->file)
745                 put_file_log(td, io_u->file);
746
747         if (io_u->flags & IO_U_F_IN_CUR_DEPTH) {
748                 td->cur_depth--;
749                 io_u_clear(td, io_u, IO_U_F_IN_CUR_DEPTH);
750         }
751         flist_add_tail(&io_u->verify_list, &td->verify_list);
752         *io_u_ptr = NULL;
753
754         pthread_cond_signal(&td->verify_cond);
755         pthread_mutex_unlock(&td->io_u_lock);
756         return 0;
757 }
758
759 /*
760  * Thanks Rusty, for spending the time so I don't have to.
761  *
762  * http://rusty.ozlabs.org/?p=560
763  */
764 static int mem_is_zero(const void *data, size_t length)
765 {
766         const unsigned char *p = data;
767         size_t len;
768
769         /* Check first 16 bytes manually */
770         for (len = 0; len < 16; len++) {
771                 if (!length)
772                         return 1;
773                 if (*p)
774                         return 0;
775                 p++;
776                 length--;
777         }
778
779         /* Now we know that's zero, memcmp with self. */
780         return memcmp(data, p, length) == 0;
781 }
782
783 static int mem_is_zero_slow(const void *data, size_t length, size_t *offset)
784 {
785         const unsigned char *p = data;
786
787         *offset = 0;
788         while (length) {
789                 if (*p)
790                         break;
791                 (*offset)++;
792                 length--;
793                 p++;
794         }
795
796         return !length;
797 }
798
799 static int verify_trimmed_io_u(struct thread_data *td, struct io_u *io_u)
800 {
801         size_t offset;
802
803         if (!td->o.trim_zero)
804                 return 0;
805
806         if (mem_is_zero(io_u->buf, io_u->buflen))
807                 return 0;
808
809         mem_is_zero_slow(io_u->buf, io_u->buflen, &offset);
810
811         log_err("trim: verify failed at file %s offset %llu, length %llu"
812                 ", block offset %lu\n",
813                         io_u->file->file_name, io_u->verify_offset, io_u->buflen,
814                         (unsigned long) offset);
815         return EILSEQ;
816 }
817
818 static int verify_header(struct io_u *io_u, struct thread_data *td,
819                          struct verify_header *hdr, unsigned int hdr_num,
820                          unsigned int hdr_len)
821 {
822         void *p = hdr;
823         uint32_t crc;
824
825         if (hdr->magic != FIO_HDR_MAGIC) {
826                 log_err("verify: bad magic header %x, wanted %x",
827                         hdr->magic, FIO_HDR_MAGIC);
828                 goto err;
829         }
830         if (hdr->len != hdr_len) {
831                 log_err("verify: bad header length %u, wanted %u",
832                         hdr->len, hdr_len);
833                 goto err;
834         }
835         if (hdr->rand_seed != io_u->rand_seed) {
836                 log_err("verify: bad header rand_seed %"PRIu64
837                         ", wanted %"PRIu64,
838                         hdr->rand_seed, io_u->rand_seed);
839                 goto err;
840         }
841         if (hdr->offset != io_u->verify_offset + hdr_num * td->o.verify_interval) {
842                 log_err("verify: bad header offset %"PRIu64
843                         ", wanted %llu",
844                         hdr->offset, io_u->verify_offset);
845                 goto err;
846         }
847
848         /*
849          * For read-only workloads, the program cannot be certain of the
850          * last numberio written to a block. Checking of numberio will be
851          * done only for workloads that write data.  For verify_only,
852          * numberio check is skipped.
853          */
854         if (td_write(td) && (td_min_bs(td) == td_max_bs(td)) &&
855             !td->o.time_based)
856                 if (!td->o.verify_only)
857                         if (hdr->numberio != io_u->numberio) {
858                                 log_err("verify: bad header numberio %"PRIu16
859                                         ", wanted %"PRIu16,
860                                         hdr->numberio, io_u->numberio);
861                                 goto err;
862                         }
863
864         crc = fio_crc32c(p, offsetof(struct verify_header, crc32));
865         if (crc != hdr->crc32) {
866                 log_err("verify: bad header crc %x, calculated %x",
867                         hdr->crc32, crc);
868                 goto err;
869         }
870         return 0;
871
872 err:
873         log_err(" at file %s offset %llu, length %u"
874                 " (requested block: offset=%llu, length=%llu)\n",
875                 io_u->file->file_name,
876                 io_u->verify_offset + hdr_num * hdr_len, hdr_len,
877                 io_u->verify_offset, io_u->buflen);
878
879         if (td->o.verify_dump)
880                 dump_buf(p, hdr_len, io_u->verify_offset + hdr_num * hdr_len,
881                                 "hdr_fail", io_u->file);
882
883         return EILSEQ;
884 }
885
886 int verify_io_u(struct thread_data *td, struct io_u **io_u_ptr)
887 {
888         struct verify_header *hdr;
889         struct io_u *io_u = *io_u_ptr;
890         unsigned int header_size, hdr_inc, hdr_num = 0;
891         void *p;
892         int ret;
893
894         if (td->o.verify == VERIFY_NULL || io_u->ddir != DDIR_READ)
895                 return 0;
896         /*
897          * If the IO engine is faking IO (like null), then just pretend
898          * we verified everything.
899          */
900         if (td_ioengine_flagged(td, FIO_FAKEIO))
901                 return 0;
902
903         if (io_u->flags & IO_U_F_TRIMMED) {
904                 ret = verify_trimmed_io_u(td, io_u);
905                 goto done;
906         }
907
908         hdr_inc = get_hdr_inc(td, io_u);
909
910         ret = 0;
911         for (p = io_u->buf; p < io_u->buf + io_u->buflen;
912              p += hdr_inc, hdr_num++) {
913                 struct vcont vc = {
914                         .io_u           = io_u,
915                         .hdr_num        = hdr_num,
916                         .td             = td,
917                 };
918                 unsigned int verify_type;
919
920                 if (ret && td->o.verify_fatal)
921                         break;
922
923                 header_size = __hdr_size(td->o.verify);
924                 if (td->o.verify_offset)
925                         memswp(p, p + td->o.verify_offset, header_size);
926                 hdr = p;
927
928                 /*
929                  * Make rand_seed check pass when have verify_backlog or
930                  * zone reset frequency for zonemode=zbd.
931                  */
932                 if (!td_rw(td) || (td->flags & TD_F_VER_BACKLOG) ||
933                     td->o.zrf.u.f)
934                         io_u->rand_seed = hdr->rand_seed;
935
936                 if (td->o.verify != VERIFY_PATTERN_NO_HDR) {
937                         ret = verify_header(io_u, td, hdr, hdr_num, hdr_inc);
938                         if (ret)
939                                 return ret;
940                 }
941
942                 if (td->o.verify != VERIFY_NONE)
943                         verify_type = td->o.verify;
944                 else
945                         verify_type = hdr->verify_type;
946
947                 switch (verify_type) {
948                 case VERIFY_HDR_ONLY:
949                         /* Header is always verified, check if pattern is left
950                          * for verification. */
951                         if (td->o.verify_pattern_bytes)
952                                 ret = verify_io_u_pattern(hdr, &vc);
953                         break;
954                 case VERIFY_MD5:
955                         ret = verify_io_u_md5(hdr, &vc);
956                         break;
957                 case VERIFY_CRC64:
958                         ret = verify_io_u_crc64(hdr, &vc);
959                         break;
960                 case VERIFY_CRC32C:
961                 case VERIFY_CRC32C_INTEL:
962                         ret = verify_io_u_crc32c(hdr, &vc);
963                         break;
964                 case VERIFY_CRC32:
965                         ret = verify_io_u_crc32(hdr, &vc);
966                         break;
967                 case VERIFY_CRC16:
968                         ret = verify_io_u_crc16(hdr, &vc);
969                         break;
970                 case VERIFY_CRC7:
971                         ret = verify_io_u_crc7(hdr, &vc);
972                         break;
973                 case VERIFY_SHA256:
974                         ret = verify_io_u_sha256(hdr, &vc);
975                         break;
976                 case VERIFY_SHA512:
977                         ret = verify_io_u_sha512(hdr, &vc);
978                         break;
979                 case VERIFY_SHA3_224:
980                         ret = verify_io_u_sha3_224(hdr, &vc);
981                         break;
982                 case VERIFY_SHA3_256:
983                         ret = verify_io_u_sha3_256(hdr, &vc);
984                         break;
985                 case VERIFY_SHA3_384:
986                         ret = verify_io_u_sha3_384(hdr, &vc);
987                         break;
988                 case VERIFY_SHA3_512:
989                         ret = verify_io_u_sha3_512(hdr, &vc);
990                         break;
991                 case VERIFY_XXHASH:
992                         ret = verify_io_u_xxhash(hdr, &vc);
993                         break;
994                 case VERIFY_SHA1:
995                         ret = verify_io_u_sha1(hdr, &vc);
996                         break;
997                 case VERIFY_PATTERN:
998                 case VERIFY_PATTERN_NO_HDR:
999                         ret = verify_io_u_pattern(hdr, &vc);
1000                         break;
1001                 default:
1002                         log_err("Bad verify type %u\n", hdr->verify_type);
1003                         ret = EINVAL;
1004                 }
1005
1006                 if (ret && verify_type != hdr->verify_type)
1007                         log_err("fio: verify type mismatch (%u media, %u given)\n",
1008                                         hdr->verify_type, verify_type);
1009         }
1010
1011 done:
1012         if (ret && td->o.verify_fatal)
1013                 fio_mark_td_terminate(td);
1014
1015         return ret;
1016 }
1017
1018 static void fill_xxhash(struct verify_header *hdr, void *p, unsigned int len)
1019 {
1020         struct vhdr_xxhash *vh = hdr_priv(hdr);
1021         void *state;
1022
1023         state = XXH32_init(1);
1024         XXH32_update(state, p, len);
1025         vh->hash = XXH32_digest(state);
1026 }
1027
1028 static void fill_sha3(struct fio_sha3_ctx *sha3_ctx, void *p, unsigned int len)
1029 {
1030         fio_sha3_update(sha3_ctx, p, len);
1031         fio_sha3_final(sha3_ctx);
1032 }
1033
1034 static void fill_sha3_224(struct verify_header *hdr, void *p, unsigned int len)
1035 {
1036         struct vhdr_sha3_224 *vh = hdr_priv(hdr);
1037         struct fio_sha3_ctx sha3_ctx = {
1038                 .sha = vh->sha,
1039         };
1040
1041         fio_sha3_224_init(&sha3_ctx);
1042         fill_sha3(&sha3_ctx, p, len);
1043 }
1044
1045 static void fill_sha3_256(struct verify_header *hdr, void *p, unsigned int len)
1046 {
1047         struct vhdr_sha3_256 *vh = hdr_priv(hdr);
1048         struct fio_sha3_ctx sha3_ctx = {
1049                 .sha = vh->sha,
1050         };
1051
1052         fio_sha3_256_init(&sha3_ctx);
1053         fill_sha3(&sha3_ctx, p, len);
1054 }
1055
1056 static void fill_sha3_384(struct verify_header *hdr, void *p, unsigned int len)
1057 {
1058         struct vhdr_sha3_384 *vh = hdr_priv(hdr);
1059         struct fio_sha3_ctx sha3_ctx = {
1060                 .sha = vh->sha,
1061         };
1062
1063         fio_sha3_384_init(&sha3_ctx);
1064         fill_sha3(&sha3_ctx, p, len);
1065 }
1066
1067 static void fill_sha3_512(struct verify_header *hdr, void *p, unsigned int len)
1068 {
1069         struct vhdr_sha3_512 *vh = hdr_priv(hdr);
1070         struct fio_sha3_ctx sha3_ctx = {
1071                 .sha = vh->sha,
1072         };
1073
1074         fio_sha3_512_init(&sha3_ctx);
1075         fill_sha3(&sha3_ctx, p, len);
1076 }
1077
1078 static void fill_sha512(struct verify_header *hdr, void *p, unsigned int len)
1079 {
1080         struct vhdr_sha512 *vh = hdr_priv(hdr);
1081         struct fio_sha512_ctx sha512_ctx = {
1082                 .buf = vh->sha512,
1083         };
1084
1085         fio_sha512_init(&sha512_ctx);
1086         fio_sha512_update(&sha512_ctx, p, len);
1087 }
1088
1089 static void fill_sha256(struct verify_header *hdr, void *p, unsigned int len)
1090 {
1091         struct vhdr_sha256 *vh = hdr_priv(hdr);
1092         struct fio_sha256_ctx sha256_ctx = {
1093                 .buf = vh->sha256,
1094         };
1095
1096         fio_sha256_init(&sha256_ctx);
1097         fio_sha256_update(&sha256_ctx, p, len);
1098         fio_sha256_final(&sha256_ctx);
1099 }
1100
1101 static void fill_sha1(struct verify_header *hdr, void *p, unsigned int len)
1102 {
1103         struct vhdr_sha1 *vh = hdr_priv(hdr);
1104         struct fio_sha1_ctx sha1_ctx = {
1105                 .H = vh->sha1,
1106         };
1107
1108         fio_sha1_init(&sha1_ctx);
1109         fio_sha1_update(&sha1_ctx, p, len);
1110         fio_sha1_final(&sha1_ctx);
1111 }
1112
1113 static void fill_crc7(struct verify_header *hdr, void *p, unsigned int len)
1114 {
1115         struct vhdr_crc7 *vh = hdr_priv(hdr);
1116
1117         vh->crc7 = fio_crc7(p, len);
1118 }
1119
1120 static void fill_crc16(struct verify_header *hdr, void *p, unsigned int len)
1121 {
1122         struct vhdr_crc16 *vh = hdr_priv(hdr);
1123
1124         vh->crc16 = fio_crc16(p, len);
1125 }
1126
1127 static void fill_crc32(struct verify_header *hdr, void *p, unsigned int len)
1128 {
1129         struct vhdr_crc32 *vh = hdr_priv(hdr);
1130
1131         vh->crc32 = fio_crc32(p, len);
1132 }
1133
1134 static void fill_crc32c(struct verify_header *hdr, void *p, unsigned int len)
1135 {
1136         struct vhdr_crc32 *vh = hdr_priv(hdr);
1137
1138         vh->crc32 = fio_crc32c(p, len);
1139 }
1140
1141 static void fill_crc64(struct verify_header *hdr, void *p, unsigned int len)
1142 {
1143         struct vhdr_crc64 *vh = hdr_priv(hdr);
1144
1145         vh->crc64 = fio_crc64(p, len);
1146 }
1147
1148 static void fill_md5(struct verify_header *hdr, void *p, unsigned int len)
1149 {
1150         struct vhdr_md5 *vh = hdr_priv(hdr);
1151         struct fio_md5_ctx md5_ctx = {
1152                 .hash = (uint32_t *) vh->md5_digest,
1153         };
1154
1155         fio_md5_init(&md5_ctx);
1156         fio_md5_update(&md5_ctx, p, len);
1157         fio_md5_final(&md5_ctx);
1158 }
1159
1160 static void __fill_hdr(struct thread_data *td, struct io_u *io_u,
1161                        struct verify_header *hdr, unsigned int header_num,
1162                        unsigned int header_len, uint64_t rand_seed)
1163 {
1164         void *p = hdr;
1165
1166         hdr->magic = FIO_HDR_MAGIC;
1167         hdr->verify_type = td->o.verify;
1168         hdr->len = header_len;
1169         hdr->rand_seed = rand_seed;
1170         hdr->offset = io_u->verify_offset + header_num * td->o.verify_interval;
1171         hdr->time_sec = io_u->start_time.tv_sec;
1172         hdr->time_nsec = io_u->start_time.tv_nsec;
1173         hdr->thread = td->thread_number;
1174         hdr->numberio = io_u->numberio;
1175         hdr->crc32 = fio_crc32c(p, offsetof(struct verify_header, crc32));
1176 }
1177
1178
1179 static void fill_hdr(struct thread_data *td, struct io_u *io_u,
1180                      struct verify_header *hdr, unsigned int header_num,
1181                      unsigned int header_len, uint64_t rand_seed)
1182 {
1183         if (td->o.verify != VERIFY_PATTERN_NO_HDR)
1184                 __fill_hdr(td, io_u, hdr, header_num, header_len, rand_seed);
1185 }
1186
1187 static void populate_hdr(struct thread_data *td, struct io_u *io_u,
1188                          struct verify_header *hdr, unsigned int header_num,
1189                          unsigned int header_len)
1190 {
1191         unsigned int data_len;
1192         void *data;
1193         char *p;
1194
1195         p = (char *) hdr;
1196
1197         fill_hdr(td, io_u, hdr, header_num, header_len, io_u->rand_seed);
1198
1199         if (header_len <= hdr_size(td, hdr)) {
1200                 td_verror(td, EINVAL, "Blocksize too small");
1201                 return;
1202         }
1203         data_len = header_len - hdr_size(td, hdr);
1204
1205         data = p + hdr_size(td, hdr);
1206         switch (td->o.verify) {
1207         case VERIFY_MD5:
1208                 dprint(FD_VERIFY, "fill md5 io_u %p, len %u\n",
1209                                                 io_u, hdr->len);
1210                 fill_md5(hdr, data, data_len);
1211                 break;
1212         case VERIFY_CRC64:
1213                 dprint(FD_VERIFY, "fill crc64 io_u %p, len %u\n",
1214                                                 io_u, hdr->len);
1215                 fill_crc64(hdr, data, data_len);
1216                 break;
1217         case VERIFY_CRC32C:
1218         case VERIFY_CRC32C_INTEL:
1219                 dprint(FD_VERIFY, "fill crc32c io_u %p, len %u\n",
1220                                                 io_u, hdr->len);
1221                 fill_crc32c(hdr, data, data_len);
1222                 break;
1223         case VERIFY_CRC32:
1224                 dprint(FD_VERIFY, "fill crc32 io_u %p, len %u\n",
1225                                                 io_u, hdr->len);
1226                 fill_crc32(hdr, data, data_len);
1227                 break;
1228         case VERIFY_CRC16:
1229                 dprint(FD_VERIFY, "fill crc16 io_u %p, len %u\n",
1230                                                 io_u, hdr->len);
1231                 fill_crc16(hdr, data, data_len);
1232                 break;
1233         case VERIFY_CRC7:
1234                 dprint(FD_VERIFY, "fill crc7 io_u %p, len %u\n",
1235                                                 io_u, hdr->len);
1236                 fill_crc7(hdr, data, data_len);
1237                 break;
1238         case VERIFY_SHA256:
1239                 dprint(FD_VERIFY, "fill sha256 io_u %p, len %u\n",
1240                                                 io_u, hdr->len);
1241                 fill_sha256(hdr, data, data_len);
1242                 break;
1243         case VERIFY_SHA512:
1244                 dprint(FD_VERIFY, "fill sha512 io_u %p, len %u\n",
1245                                                 io_u, hdr->len);
1246                 fill_sha512(hdr, data, data_len);
1247                 break;
1248         case VERIFY_SHA3_224:
1249                 dprint(FD_VERIFY, "fill sha3-224 io_u %p, len %u\n",
1250                                                 io_u, hdr->len);
1251                 fill_sha3_224(hdr, data, data_len);
1252                 break;
1253         case VERIFY_SHA3_256:
1254                 dprint(FD_VERIFY, "fill sha3-256 io_u %p, len %u\n",
1255                                                 io_u, hdr->len);
1256                 fill_sha3_256(hdr, data, data_len);
1257                 break;
1258         case VERIFY_SHA3_384:
1259                 dprint(FD_VERIFY, "fill sha3-384 io_u %p, len %u\n",
1260                                                 io_u, hdr->len);
1261                 fill_sha3_384(hdr, data, data_len);
1262                 break;
1263         case VERIFY_SHA3_512:
1264                 dprint(FD_VERIFY, "fill sha3-512 io_u %p, len %u\n",
1265                                                 io_u, hdr->len);
1266                 fill_sha3_512(hdr, data, data_len);
1267                 break;
1268         case VERIFY_XXHASH:
1269                 dprint(FD_VERIFY, "fill xxhash io_u %p, len %u\n",
1270                                                 io_u, hdr->len);
1271                 fill_xxhash(hdr, data, data_len);
1272                 break;
1273         case VERIFY_SHA1:
1274                 dprint(FD_VERIFY, "fill sha1 io_u %p, len %u\n",
1275                                                 io_u, hdr->len);
1276                 fill_sha1(hdr, data, data_len);
1277                 break;
1278         case VERIFY_HDR_ONLY:
1279         case VERIFY_PATTERN:
1280         case VERIFY_PATTERN_NO_HDR:
1281                 /* nothing to do here */
1282                 break;
1283         default:
1284                 log_err("fio: bad verify type: %d\n", td->o.verify);
1285                 assert(0);
1286         }
1287
1288         if (td->o.verify_offset && hdr_size(td, hdr))
1289                 memswp(p, p + td->o.verify_offset, hdr_size(td, hdr));
1290 }
1291
1292 /*
1293  * fill body of io_u->buf with random data and add a header with the
1294  * checksum of choice
1295  */
1296 void populate_verify_io_u(struct thread_data *td, struct io_u *io_u)
1297 {
1298         if (td->o.verify == VERIFY_NULL)
1299                 return;
1300
1301         fill_pattern_headers(td, io_u, 0, 0);
1302 }
1303
1304 int get_next_verify(struct thread_data *td, struct io_u *io_u)
1305 {
1306         struct io_piece *ipo = NULL;
1307
1308         /*
1309          * this io_u is from a requeue, we already filled the offsets
1310          */
1311         if (io_u->file)
1312                 return 0;
1313
1314         if (!RB_EMPTY_ROOT(&td->io_hist_tree)) {
1315                 struct fio_rb_node *n = rb_first(&td->io_hist_tree);
1316
1317                 ipo = rb_entry(n, struct io_piece, rb_node);
1318
1319                 /*
1320                  * Ensure that the associated IO has completed
1321                  */
1322                 if (atomic_load_acquire(&ipo->flags) & IP_F_IN_FLIGHT)
1323                         goto nothing;
1324
1325                 rb_erase(n, &td->io_hist_tree);
1326                 assert(ipo->flags & IP_F_ONRB);
1327                 ipo->flags &= ~IP_F_ONRB;
1328         } else if (!flist_empty(&td->io_hist_list)) {
1329                 ipo = flist_first_entry(&td->io_hist_list, struct io_piece, list);
1330
1331                 /*
1332                  * Ensure that the associated IO has completed
1333                  */
1334                 if (atomic_load_acquire(&ipo->flags) & IP_F_IN_FLIGHT)
1335                         goto nothing;
1336
1337                 flist_del(&ipo->list);
1338                 assert(ipo->flags & IP_F_ONLIST);
1339                 ipo->flags &= ~IP_F_ONLIST;
1340         }
1341
1342         if (ipo) {
1343                 td->io_hist_len--;
1344
1345                 io_u->offset = ipo->offset;
1346                 io_u->verify_offset = ipo->offset;
1347                 io_u->buflen = ipo->len;
1348                 io_u->numberio = ipo->numberio;
1349                 io_u->file = ipo->file;
1350                 io_u_set(td, io_u, IO_U_F_VER_LIST);
1351
1352                 if (ipo->flags & IP_F_TRIMMED)
1353                         io_u_set(td, io_u, IO_U_F_TRIMMED);
1354
1355                 if (!fio_file_open(io_u->file)) {
1356                         int r = td_io_open_file(td, io_u->file);
1357
1358                         if (r) {
1359                                 dprint(FD_VERIFY, "failed file %s open\n",
1360                                                 io_u->file->file_name);
1361                                 return 1;
1362                         }
1363                 }
1364
1365                 get_file(ipo->file);
1366                 assert(fio_file_open(io_u->file));
1367                 io_u->ddir = DDIR_READ;
1368                 io_u->xfer_buf = io_u->buf;
1369                 io_u->xfer_buflen = io_u->buflen;
1370
1371                 remove_trim_entry(td, ipo);
1372                 free(ipo);
1373                 dprint(FD_VERIFY, "get_next_verify: ret io_u %p\n", io_u);
1374
1375                 if (!td->o.verify_pattern_bytes) {
1376                         io_u->rand_seed = __rand(&td->verify_state);
1377                         if (sizeof(int) != sizeof(long *))
1378                                 io_u->rand_seed *= __rand(&td->verify_state);
1379                 }
1380                 return 0;
1381         }
1382
1383 nothing:
1384         dprint(FD_VERIFY, "get_next_verify: empty\n");
1385         return 1;
1386 }
1387
1388 void fio_verify_init(struct thread_data *td)
1389 {
1390         if (td->o.verify == VERIFY_CRC32C_INTEL ||
1391             td->o.verify == VERIFY_CRC32C) {
1392                 crc32c_arm64_probe();
1393                 crc32c_intel_probe();
1394         }
1395 }
1396
1397 static void *verify_async_thread(void *data)
1398 {
1399         struct thread_data *td = data;
1400         struct io_u *io_u;
1401         int ret = 0;
1402
1403         if (fio_option_is_set(&td->o, verify_cpumask) &&
1404             fio_setaffinity(td->pid, td->o.verify_cpumask)) {
1405                 log_err("fio: failed setting verify thread affinity\n");
1406                 goto done;
1407         }
1408
1409         do {
1410                 FLIST_HEAD(list);
1411
1412                 read_barrier();
1413                 if (td->verify_thread_exit)
1414                         break;
1415
1416                 pthread_mutex_lock(&td->io_u_lock);
1417
1418                 while (flist_empty(&td->verify_list) &&
1419                        !td->verify_thread_exit) {
1420                         ret = pthread_cond_wait(&td->verify_cond,
1421                                                         &td->io_u_lock);
1422                         if (ret) {
1423                                 break;
1424                         }
1425                 }
1426
1427                 flist_splice_init(&td->verify_list, &list);
1428                 pthread_mutex_unlock(&td->io_u_lock);
1429
1430                 if (flist_empty(&list))
1431                         continue;
1432
1433                 while (!flist_empty(&list)) {
1434                         io_u = flist_first_entry(&list, struct io_u, verify_list);
1435                         flist_del_init(&io_u->verify_list);
1436
1437                         io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
1438                         ret = verify_io_u(td, &io_u);
1439
1440                         put_io_u(td, io_u);
1441                         if (!ret)
1442                                 continue;
1443                         if (td_non_fatal_error(td, ERROR_TYPE_VERIFY_BIT, ret)) {
1444                                 update_error_count(td, ret);
1445                                 td_clear_error(td);
1446                                 ret = 0;
1447                         }
1448                 }
1449         } while (!ret);
1450
1451         if (ret) {
1452                 td_verror(td, ret, "async_verify");
1453                 if (td->o.verify_fatal)
1454                         fio_mark_td_terminate(td);
1455         }
1456
1457 done:
1458         pthread_mutex_lock(&td->io_u_lock);
1459         td->nr_verify_threads--;
1460         pthread_cond_signal(&td->free_cond);
1461         pthread_mutex_unlock(&td->io_u_lock);
1462
1463         return NULL;
1464 }
1465
1466 int verify_async_init(struct thread_data *td)
1467 {
1468         int i, ret;
1469         pthread_attr_t attr;
1470
1471         pthread_attr_init(&attr);
1472         pthread_attr_setstacksize(&attr, 2 * PTHREAD_STACK_MIN);
1473
1474         td->verify_thread_exit = 0;
1475
1476         td->verify_threads = malloc(sizeof(pthread_t) * td->o.verify_async);
1477         for (i = 0; i < td->o.verify_async; i++) {
1478                 ret = pthread_create(&td->verify_threads[i], &attr,
1479                                         verify_async_thread, td);
1480                 if (ret) {
1481                         log_err("fio: async verify creation failed: %s\n",
1482                                         strerror(ret));
1483                         break;
1484                 }
1485                 ret = pthread_detach(td->verify_threads[i]);
1486                 if (ret) {
1487                         log_err("fio: async verify thread detach failed: %s\n",
1488                                         strerror(ret));
1489                         break;
1490                 }
1491                 td->nr_verify_threads++;
1492         }
1493
1494         pthread_attr_destroy(&attr);
1495
1496         if (i != td->o.verify_async) {
1497                 log_err("fio: only %d verify threads started, exiting\n", i);
1498
1499                 pthread_mutex_lock(&td->io_u_lock);
1500                 td->verify_thread_exit = 1;
1501                 pthread_cond_broadcast(&td->verify_cond);
1502                 pthread_mutex_unlock(&td->io_u_lock);
1503
1504                 return 1;
1505         }
1506
1507         return 0;
1508 }
1509
1510 void verify_async_exit(struct thread_data *td)
1511 {
1512         pthread_mutex_lock(&td->io_u_lock);
1513         td->verify_thread_exit = 1;
1514         pthread_cond_broadcast(&td->verify_cond);
1515
1516         while (td->nr_verify_threads)
1517                 pthread_cond_wait(&td->free_cond, &td->io_u_lock);
1518
1519         pthread_mutex_unlock(&td->io_u_lock);
1520         free(td->verify_threads);
1521         td->verify_threads = NULL;
1522 }
1523
1524 int paste_blockoff(char *buf, unsigned int len, void *priv)
1525 {
1526         struct io_u *io = priv;
1527         unsigned long long off;
1528
1529         typecheck(__typeof__(off), io->offset);
1530         off = cpu_to_le64((uint64_t)io->offset);
1531         len = min(len, (unsigned int)sizeof(off));
1532         memcpy(buf, &off, len);
1533         return 0;
1534 }
1535
1536 static int __fill_file_completions(struct thread_data *td,
1537                                    struct thread_io_list *s,
1538                                    struct fio_file *f, unsigned int *index)
1539 {
1540         unsigned int comps;
1541         int i, j;
1542
1543         if (!f->last_write_comp)
1544                 return 0;
1545
1546         if (td->io_blocks[DDIR_WRITE] < td->o.iodepth)
1547                 comps = td->io_blocks[DDIR_WRITE];
1548         else
1549                 comps = td->o.iodepth;
1550
1551         j = f->last_write_idx - 1;
1552         for (i = 0; i < comps; i++) {
1553                 if (j == -1)
1554                         j = td->o.iodepth - 1;
1555                 s->comps[*index].fileno = __cpu_to_le64(f->fileno);
1556                 s->comps[*index].offset = cpu_to_le64(f->last_write_comp[j]);
1557                 (*index)++;
1558                 j--;
1559         }
1560
1561         return comps;
1562 }
1563
1564 static int fill_file_completions(struct thread_data *td,
1565                                  struct thread_io_list *s, unsigned int *index)
1566 {
1567         struct fio_file *f;
1568         unsigned int i;
1569         int comps = 0;
1570
1571         for_each_file(td, f, i)
1572                 comps += __fill_file_completions(td, s, f, index);
1573
1574         return comps;
1575 }
1576
1577 struct all_io_list *get_all_io_list(int save_mask, size_t *sz)
1578 {
1579         struct all_io_list *rep;
1580         size_t depth;
1581         void *next;
1582         int nr;
1583
1584         compiletime_assert(sizeof(struct all_io_list) == 8, "all_io_list");
1585
1586         /*
1587          * Calculate reply space needed. We need one 'io_state' per thread,
1588          * and the size will vary depending on depth.
1589          */
1590         depth = 0;
1591         nr = 0;
1592         for_each_td(td) {
1593                 if (save_mask != IO_LIST_ALL && (__td_index + 1) != save_mask)
1594                         continue;
1595                 td->stop_io = 1;
1596                 td->flags |= TD_F_VSTATE_SAVED;
1597                 depth += (td->o.iodepth * td->o.nr_files);
1598                 nr++;
1599         } end_for_each();
1600
1601         if (!nr)
1602                 return NULL;
1603
1604         *sz = sizeof(*rep);
1605         *sz += nr * sizeof(struct thread_io_list);
1606         *sz += depth * sizeof(struct file_comp);
1607         rep = calloc(1, *sz);
1608
1609         rep->threads = cpu_to_le64((uint64_t) nr);
1610
1611         next = &rep->state[0];
1612         for_each_td(td) {
1613                 struct thread_io_list *s = next;
1614                 unsigned int comps, index = 0;
1615
1616                 if (save_mask != IO_LIST_ALL && (__td_index + 1) != save_mask)
1617                         continue;
1618
1619                 comps = fill_file_completions(td, s, &index);
1620
1621                 s->no_comps = cpu_to_le64((uint64_t) comps);
1622                 s->depth = cpu_to_le64((uint64_t) td->o.iodepth);
1623                 s->nofiles = cpu_to_le64((uint64_t) td->o.nr_files);
1624                 s->numberio = cpu_to_le64((uint64_t) td->io_issues[DDIR_WRITE]);
1625                 s->index = cpu_to_le64((uint64_t) __td_index);
1626                 if (td->random_state.use64) {
1627                         s->rand.state64.s[0] = cpu_to_le64(td->random_state.state64.s1);
1628                         s->rand.state64.s[1] = cpu_to_le64(td->random_state.state64.s2);
1629                         s->rand.state64.s[2] = cpu_to_le64(td->random_state.state64.s3);
1630                         s->rand.state64.s[3] = cpu_to_le64(td->random_state.state64.s4);
1631                         s->rand.state64.s[4] = cpu_to_le64(td->random_state.state64.s5);
1632                         s->rand.state64.s[5] = 0;
1633                         s->rand.use64 = cpu_to_le64((uint64_t)1);
1634                 } else {
1635                         s->rand.state32.s[0] = cpu_to_le32(td->random_state.state32.s1);
1636                         s->rand.state32.s[1] = cpu_to_le32(td->random_state.state32.s2);
1637                         s->rand.state32.s[2] = cpu_to_le32(td->random_state.state32.s3);
1638                         s->rand.state32.s[3] = 0;
1639                         s->rand.use64 = 0;
1640                 }
1641                 snprintf((char *) s->name, sizeof(s->name), "%s", td->o.name);
1642                 next = io_list_next(s);
1643         } end_for_each();
1644
1645         return rep;
1646 }
1647
1648 static int open_state_file(const char *name, const char *prefix, int num,
1649                            int for_write)
1650 {
1651         char out[PATH_MAX];
1652         int flags;
1653         int fd;
1654
1655         if (for_write)
1656                 flags = O_CREAT | O_TRUNC | O_WRONLY | O_SYNC;
1657         else
1658                 flags = O_RDONLY;
1659
1660 #ifdef _WIN32
1661         flags |= O_BINARY;
1662 #endif
1663
1664         verify_state_gen_name(out, sizeof(out), name, prefix, num);
1665
1666         fd = open(out, flags, 0644);
1667         if (fd == -1) {
1668                 perror("fio: open state file");
1669                 log_err("fio: state file: %s (for_write=%d)\n", out, for_write);
1670                 return -1;
1671         }
1672
1673         return fd;
1674 }
1675
1676 static int write_thread_list_state(struct thread_io_list *s,
1677                                    const char *prefix)
1678 {
1679         struct verify_state_hdr hdr;
1680         uint64_t crc;
1681         ssize_t ret;
1682         int fd;
1683
1684         fd = open_state_file((const char *) s->name, prefix, s->index, 1);
1685         if (fd == -1)
1686                 return 1;
1687
1688         crc = fio_crc32c((void *)s, thread_io_list_sz(s));
1689
1690         hdr.version = cpu_to_le64((uint64_t) VSTATE_HDR_VERSION);
1691         hdr.size = cpu_to_le64((uint64_t) thread_io_list_sz(s));
1692         hdr.crc = cpu_to_le64(crc);
1693         ret = write(fd, &hdr, sizeof(hdr));
1694         if (ret != sizeof(hdr))
1695                 goto write_fail;
1696
1697         ret = write(fd, s, thread_io_list_sz(s));
1698         if (ret != thread_io_list_sz(s)) {
1699 write_fail:
1700                 if (ret < 0)
1701                         perror("fio: write state file");
1702                 log_err("fio: failed to write state file\n");
1703                 ret = 1;
1704         } else
1705                 ret = 0;
1706
1707         close(fd);
1708         return ret;
1709 }
1710
1711 void __verify_save_state(struct all_io_list *state, const char *prefix)
1712 {
1713         struct thread_io_list *s = &state->state[0];
1714         unsigned int i;
1715
1716         for (i = 0; i < le64_to_cpu(state->threads); i++) {
1717                 write_thread_list_state(s,  prefix);
1718                 s = io_list_next(s);
1719         }
1720 }
1721
1722 void verify_save_state(int mask)
1723 {
1724         struct all_io_list *state;
1725         size_t sz;
1726
1727         state = get_all_io_list(mask, &sz);
1728         if (state) {
1729                 char prefix[PATH_MAX];
1730
1731                 if (aux_path)
1732                         sprintf(prefix, "%s%clocal", aux_path, FIO_OS_PATH_SEPARATOR);
1733                 else
1734                         strcpy(prefix, "local");
1735
1736                 __verify_save_state(state, prefix);
1737                 free(state);
1738         }
1739 }
1740
1741 void verify_free_state(struct thread_data *td)
1742 {
1743         if (td->vstate)
1744                 free(td->vstate);
1745 }
1746
1747 void verify_assign_state(struct thread_data *td, void *p)
1748 {
1749         struct thread_io_list *s = p;
1750         int i;
1751
1752         s->no_comps = le64_to_cpu(s->no_comps);
1753         s->depth = le32_to_cpu(s->depth);
1754         s->nofiles = le32_to_cpu(s->nofiles);
1755         s->numberio = le64_to_cpu(s->numberio);
1756         s->rand.use64 = le64_to_cpu(s->rand.use64);
1757
1758         if (s->rand.use64) {
1759                 for (i = 0; i < 6; i++)
1760                         s->rand.state64.s[i] = le64_to_cpu(s->rand.state64.s[i]);
1761         } else {
1762                 for (i = 0; i < 4; i++)
1763                         s->rand.state32.s[i] = le32_to_cpu(s->rand.state32.s[i]);
1764         }
1765
1766         for (i = 0; i < s->no_comps; i++) {
1767                 s->comps[i].fileno = le64_to_cpu(s->comps[i].fileno);
1768                 s->comps[i].offset = le64_to_cpu(s->comps[i].offset);
1769         }
1770
1771         td->vstate = p;
1772 }
1773
1774 int verify_state_hdr(struct verify_state_hdr *hdr, struct thread_io_list *s)
1775 {
1776         uint64_t crc;
1777
1778         hdr->version = le64_to_cpu(hdr->version);
1779         hdr->size = le64_to_cpu(hdr->size);
1780         hdr->crc = le64_to_cpu(hdr->crc);
1781
1782         if (hdr->version != VSTATE_HDR_VERSION)
1783                 return 1;
1784
1785         crc = fio_crc32c((void *)s, hdr->size);
1786         if (crc != hdr->crc)
1787                 return 1;
1788
1789         return 0;
1790 }
1791
1792 int verify_load_state(struct thread_data *td, const char *prefix)
1793 {
1794         struct verify_state_hdr hdr;
1795         void *s = NULL;
1796         uint64_t crc;
1797         ssize_t ret;
1798         int fd;
1799
1800         if (!td->o.verify_state)
1801                 return 0;
1802
1803         fd = open_state_file(td->o.name, prefix, td->thread_number - 1, 0);
1804         if (fd == -1)
1805                 return 1;
1806
1807         ret = read(fd, &hdr, sizeof(hdr));
1808         if (ret != sizeof(hdr)) {
1809                 if (ret < 0)
1810                         td_verror(td, errno, "read verify state hdr");
1811                 log_err("fio: failed reading verify state header\n");
1812                 goto err;
1813         }
1814
1815         hdr.version = le64_to_cpu(hdr.version);
1816         hdr.size = le64_to_cpu(hdr.size);
1817         hdr.crc = le64_to_cpu(hdr.crc);
1818
1819         if (hdr.version != VSTATE_HDR_VERSION) {
1820                 log_err("fio: unsupported (%d) version in verify state header\n",
1821                                 (unsigned int) hdr.version);
1822                 goto err;
1823         }
1824
1825         s = malloc(hdr.size);
1826         ret = read(fd, s, hdr.size);
1827         if (ret != hdr.size) {
1828                 if (ret < 0)
1829                         td_verror(td, errno, "read verify state");
1830                 log_err("fio: failed reading verity state\n");
1831                 goto err;
1832         }
1833
1834         crc = fio_crc32c(s, hdr.size);
1835         if (crc != hdr.crc) {
1836                 log_err("fio: verify state is corrupt\n");
1837                 goto err;
1838         }
1839
1840         close(fd);
1841
1842         verify_assign_state(td, s);
1843         return 0;
1844 err:
1845         if (s)
1846                 free(s);
1847         close(fd);
1848         return 1;
1849 }
1850
1851 /*
1852  * Use the loaded verify state to know when to stop doing verification
1853  */
1854 int verify_state_should_stop(struct thread_data *td, struct io_u *io_u)
1855 {
1856         struct thread_io_list *s = td->vstate;
1857         struct fio_file *f = io_u->file;
1858         int i;
1859
1860         if (!s || !f)
1861                 return 0;
1862
1863         /*
1864          * If we're not into the window of issues - depth yet, continue. If
1865          * issue is shorter than depth, do check.
1866          */
1867         if ((td->io_blocks[DDIR_READ] < s->depth ||
1868             s->numberio - td->io_blocks[DDIR_READ] > s->depth) &&
1869             s->numberio > s->depth)
1870                 return 0;
1871
1872         /*
1873          * We're in the window of having to check if this io was
1874          * completed or not. If the IO was seen as completed, then
1875          * lets verify it.
1876          */
1877         for (i = 0; i < s->no_comps; i++) {
1878                 if (s->comps[i].fileno != f->fileno)
1879                         continue;
1880                 if (io_u->verify_offset == s->comps[i].offset)
1881                         return 0;
1882         }
1883
1884         /*
1885          * Not found, we have to stop
1886          */
1887         return 1;
1888 }