#include "crc/sha512.h"
#include "crc/sha1.h"
#include "crc/xxhash.h"
+#include "crc/sha3.h"
static void populate_hdr(struct thread_data *td, struct io_u *io_u,
struct verify_header *hdr, unsigned int header_num,
(void)cpy_pattern(td->o.buffer_pattern, td->o.buffer_pattern_bytes, p, len);
}
-void __fill_buffer(struct thread_options *o, unsigned long seed, void *p,
- unsigned int len)
+static void __fill_buffer(struct thread_options *o, unsigned long seed, void *p,
+ unsigned int len)
{
__fill_random_buf_percentage(seed, p, o->compress_percentage, len, len, o->buffer_pattern, o->buffer_pattern_bytes);
}
-unsigned long fill_buffer(struct thread_data *td, void *p, unsigned int len)
+static unsigned long fill_buffer(struct thread_data *td, void *p,
+ unsigned int len)
{
struct frand_state *fs = &td->verify_state;
struct thread_options *o = &td->o;
case VERIFY_SHA512:
len = sizeof(struct vhdr_sha512);
break;
+ case VERIFY_SHA3_224:
+ len = sizeof(struct vhdr_sha3_224);
+ break;
+ case VERIFY_SHA3_256:
+ len = sizeof(struct vhdr_sha3_256);
+ break;
+ case VERIFY_SHA3_384:
+ len = sizeof(struct vhdr_sha3_384);
+ break;
+ case VERIFY_SHA3_512:
+ len = sizeof(struct vhdr_sha3_512);
+ break;
case VERIFY_XXHASH:
len = sizeof(struct vhdr_xxhash);
break;
fd = open(fname, O_CREAT | O_TRUNC | O_WRONLY, 0644);
if (fd < 0) {
perror("open verify buf file");
+ free(ptr);
return;
}
(void)paste_format_inplace(pattern, pattern_size,
td->o.verify_fmt, td->o.verify_fmt_sz, io_u);
- buf = (void *) hdr + header_size;
+ buf = (char *) hdr + header_size;
len = get_hdr_inc(td, io_u) - header_size;
mod = (get_hdr_inc(td, io_u) * vc->hdr_num + header_size) % pattern_size;
(unsigned char)pattern[mod],
bits);
log_err("fio: bad pattern block offset %u\n", i);
- dump_verify_buffers(hdr, vc);
+ vc->name = "pattern";
+ log_verify_failure(hdr, vc);
return EILSEQ;
}
mod++;
return EILSEQ;
}
+static int verify_io_u_sha3(struct verify_header *hdr, struct vcont *vc,
+ struct fio_sha3_ctx *sha3_ctx, uint8_t *sha,
+ unsigned int sha_size, const char *name)
+{
+ void *p = io_u_verify_off(hdr, vc);
+
+ dprint(FD_VERIFY, "%s verify io_u %p, len %u\n", name, vc->io_u, hdr->len);
+
+ fio_sha3_update(sha3_ctx, p, hdr->len - hdr_size(vc->td, hdr));
+ fio_sha3_final(sha3_ctx);
+
+ if (!memcmp(sha, sha3_ctx->sha, sha_size))
+ return 0;
+
+ vc->name = name;
+ vc->good_crc = sha;
+ vc->bad_crc = sha3_ctx->sha;
+ vc->crc_len = sha_size;
+ log_verify_failure(hdr, vc);
+ return EILSEQ;
+}
+
+static int verify_io_u_sha3_224(struct verify_header *hdr, struct vcont *vc)
+{
+ struct vhdr_sha3_224 *vh = hdr_priv(hdr);
+ uint8_t sha[SHA3_224_DIGEST_SIZE];
+ struct fio_sha3_ctx sha3_ctx = {
+ .sha = sha,
+ };
+
+ fio_sha3_224_init(&sha3_ctx);
+
+ return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
+ SHA3_224_DIGEST_SIZE, "sha3-224");
+}
+
+static int verify_io_u_sha3_256(struct verify_header *hdr, struct vcont *vc)
+{
+ struct vhdr_sha3_256 *vh = hdr_priv(hdr);
+ uint8_t sha[SHA3_256_DIGEST_SIZE];
+ struct fio_sha3_ctx sha3_ctx = {
+ .sha = sha,
+ };
+
+ fio_sha3_256_init(&sha3_ctx);
+
+ return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
+ SHA3_256_DIGEST_SIZE, "sha3-256");
+}
+
+static int verify_io_u_sha3_384(struct verify_header *hdr, struct vcont *vc)
+{
+ struct vhdr_sha3_384 *vh = hdr_priv(hdr);
+ uint8_t sha[SHA3_384_DIGEST_SIZE];
+ struct fio_sha3_ctx sha3_ctx = {
+ .sha = sha,
+ };
+
+ fio_sha3_384_init(&sha3_ctx);
+
+ return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
+ SHA3_384_DIGEST_SIZE, "sha3-384");
+}
+
+static int verify_io_u_sha3_512(struct verify_header *hdr, struct vcont *vc)
+{
+ struct vhdr_sha3_512 *vh = hdr_priv(hdr);
+ uint8_t sha[SHA3_512_DIGEST_SIZE];
+ struct fio_sha3_ctx sha3_ctx = {
+ .sha = sha,
+ };
+
+ fio_sha3_512_init(&sha3_ctx);
+
+ return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
+ SHA3_512_DIGEST_SIZE, "sha3-512");
+}
+
static int verify_io_u_sha512(struct verify_header *hdr, struct vcont *vc)
{
void *p = io_u_verify_off(hdr, vc);
if (io_u->flags & IO_U_F_IN_CUR_DEPTH) {
td->cur_depth--;
- io_u_clear(io_u, IO_U_F_IN_CUR_DEPTH);
+ io_u_clear(td, io_u, IO_U_F_IN_CUR_DEPTH);
}
flist_add_tail(&io_u->verify_list, &td->verify_list);
*io_u_ptr = NULL;
* state of numberio, that would have been written to each block
* in a previous run of fio, has been reached.
*/
- if ((td_write(td) || td_rw(td)) && (td_min_bs(td) == td_max_bs(td)) &&
+ if (td_write(td) && (td_min_bs(td) == td_max_bs(td)) &&
!td->o.time_based)
if (!td->o.verify_only || td->o.loops == 0)
if (hdr->numberio != io_u->numberio) {
log_err(" at file %s offset %llu, length %u\n",
io_u->file->file_name,
io_u->offset + hdr_num * hdr_len, hdr_len);
+
+ if (td->o.verify_dump)
+ dump_buf(p, hdr_len, io_u->offset + hdr_num * hdr_len,
+ "hdr_fail", io_u->file);
+
return EILSEQ;
}
* If the IO engine is faking IO (like null), then just pretend
* we verified everything.
*/
- if (td->io_ops->flags & FIO_FAKEIO)
+ if (td_ioengine_flagged(td, FIO_FAKEIO))
return 0;
if (io_u->flags & IO_U_F_TRIMMED) {
case VERIFY_SHA512:
ret = verify_io_u_sha512(hdr, &vc);
break;
+ case VERIFY_SHA3_224:
+ ret = verify_io_u_sha3_224(hdr, &vc);
+ break;
+ case VERIFY_SHA3_256:
+ ret = verify_io_u_sha3_256(hdr, &vc);
+ break;
+ case VERIFY_SHA3_384:
+ ret = verify_io_u_sha3_384(hdr, &vc);
+ break;
+ case VERIFY_SHA3_512:
+ ret = verify_io_u_sha3_512(hdr, &vc);
+ break;
case VERIFY_XXHASH:
ret = verify_io_u_xxhash(hdr, &vc);
break;
vh->hash = XXH32_digest(state);
}
+static void fill_sha3(struct fio_sha3_ctx *sha3_ctx, void *p, unsigned int len)
+{
+ fio_sha3_update(sha3_ctx, p, len);
+ fio_sha3_final(sha3_ctx);
+}
+
+static void fill_sha3_224(struct verify_header *hdr, void *p, unsigned int len)
+{
+ struct vhdr_sha3_224 *vh = hdr_priv(hdr);
+ struct fio_sha3_ctx sha3_ctx = {
+ .sha = vh->sha,
+ };
+
+ fio_sha3_224_init(&sha3_ctx);
+ fill_sha3(&sha3_ctx, p, len);
+}
+
+static void fill_sha3_256(struct verify_header *hdr, void *p, unsigned int len)
+{
+ struct vhdr_sha3_256 *vh = hdr_priv(hdr);
+ struct fio_sha3_ctx sha3_ctx = {
+ .sha = vh->sha,
+ };
+
+ fio_sha3_256_init(&sha3_ctx);
+ fill_sha3(&sha3_ctx, p, len);
+}
+
+static void fill_sha3_384(struct verify_header *hdr, void *p, unsigned int len)
+{
+ struct vhdr_sha3_384 *vh = hdr_priv(hdr);
+ struct fio_sha3_ctx sha3_ctx = {
+ .sha = vh->sha,
+ };
+
+ fio_sha3_384_init(&sha3_ctx);
+ fill_sha3(&sha3_ctx, p, len);
+}
+
+static void fill_sha3_512(struct verify_header *hdr, void *p, unsigned int len)
+{
+ struct vhdr_sha3_512 *vh = hdr_priv(hdr);
+ struct fio_sha3_ctx sha3_ctx = {
+ .sha = vh->sha,
+ };
+
+ fio_sha3_512_init(&sha3_ctx);
+ fill_sha3(&sha3_ctx, p, len);
+}
+
static void fill_sha512(struct verify_header *hdr, void *p, unsigned int len)
{
struct vhdr_sha512 *vh = hdr_priv(hdr);
hdr->rand_seed = rand_seed;
hdr->offset = io_u->offset + header_num * td->o.verify_interval;
hdr->time_sec = io_u->start_time.tv_sec;
- hdr->time_usec = io_u->start_time.tv_usec;
+ hdr->time_usec = io_u->start_time.tv_nsec / 1000;
hdr->thread = td->thread_number;
hdr->numberio = io_u->numberio;
hdr->crc32 = fio_crc32c(p, offsetof(struct verify_header, crc32));
unsigned int header_len)
{
unsigned int data_len;
- void *data, *p;
+ void *data;
+ char *p;
- p = (void *) hdr;
+ p = (char *) hdr;
fill_hdr(td, io_u, hdr, header_num, header_len, io_u->rand_seed);
io_u, hdr->len);
fill_sha512(hdr, data, data_len);
break;
+ case VERIFY_SHA3_224:
+ dprint(FD_VERIFY, "fill sha3-224 io_u %p, len %u\n",
+ io_u, hdr->len);
+ fill_sha3_224(hdr, data, data_len);
+ break;
+ case VERIFY_SHA3_256:
+ dprint(FD_VERIFY, "fill sha3-256 io_u %p, len %u\n",
+ io_u, hdr->len);
+ fill_sha3_256(hdr, data, data_len);
+ break;
+ case VERIFY_SHA3_384:
+ dprint(FD_VERIFY, "fill sha3-384 io_u %p, len %u\n",
+ io_u, hdr->len);
+ fill_sha3_384(hdr, data, data_len);
+ break;
+ case VERIFY_SHA3_512:
+ dprint(FD_VERIFY, "fill sha3-512 io_u %p, len %u\n",
+ io_u, hdr->len);
+ fill_sha3_512(hdr, data, data_len);
+ break;
case VERIFY_XXHASH:
dprint(FD_VERIFY, "fill xxhash io_u %p, len %u\n",
io_u, hdr->len);
io_u->buflen = ipo->len;
io_u->numberio = ipo->numberio;
io_u->file = ipo->file;
- io_u_set(io_u, IO_U_F_VER_LIST);
+ io_u_set(td, io_u, IO_U_F_VER_LIST);
if (ipo->flags & IP_F_TRIMMED)
- io_u_set(io_u, IO_U_F_TRIMMED);
+ io_u_set(td, io_u, IO_U_F_TRIMMED);
if (!fio_file_open(io_u->file)) {
int r = td_io_open_file(td, io_u->file);
{
if (td->o.verify == VERIFY_CRC32C_INTEL ||
td->o.verify == VERIFY_CRC32C) {
+ crc32c_arm64_probe();
crc32c_intel_probe();
}
}
io_u = flist_first_entry(&list, struct io_u, verify_list);
flist_del_init(&io_u->verify_list);
- io_u_set(io_u, IO_U_F_NO_FILE_PUT);
+ io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
ret = verify_io_u(td, &io_u);
put_io_u(td, io_u);
pthread_attr_t attr;
pthread_attr_init(&attr);
- pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN);
+ pthread_attr_setstacksize(&attr, 2 * PTHREAD_STACK_MIN);
td->verify_thread_exit = 0;
return 0;
}
+static int __fill_file_completions(struct thread_data *td,
+ struct thread_io_list *s,
+ struct fio_file *f, unsigned int *index)
+{
+ unsigned int comps;
+ int i, j;
+
+ if (!f->last_write_comp)
+ return 0;
+
+ if (td->io_blocks[DDIR_WRITE] < td->o.iodepth)
+ comps = td->io_blocks[DDIR_WRITE];
+ else
+ comps = td->o.iodepth;
+
+ j = f->last_write_idx - 1;
+ for (i = 0; i < comps; i++) {
+ if (j == -1)
+ j = td->o.iodepth - 1;
+ s->comps[*index].fileno = __cpu_to_le64(f->fileno);
+ s->comps[*index].offset = cpu_to_le64(f->last_write_comp[j]);
+ (*index)++;
+ j--;
+ }
+
+ return comps;
+}
+
+static int fill_file_completions(struct thread_data *td,
+ struct thread_io_list *s, unsigned int *index)
+{
+ struct fio_file *f;
+ unsigned int i;
+ int comps = 0;
+
+ for_each_file(td, f, i)
+ comps += __fill_file_completions(td, s, f, index);
+
+ return comps;
+}
+
struct all_io_list *get_all_io_list(int save_mask, size_t *sz)
{
struct all_io_list *rep;
continue;
td->stop_io = 1;
td->flags |= TD_F_VSTATE_SAVED;
- depth += td->o.iodepth;
+ depth += (td->o.iodepth * td->o.nr_files);
nr++;
}
*sz = sizeof(*rep);
*sz += nr * sizeof(struct thread_io_list);
- *sz += depth * sizeof(uint64_t);
+ *sz += depth * sizeof(struct file_comp);
rep = malloc(*sz);
+ memset(rep, 0, *sz);
rep->threads = cpu_to_le64((uint64_t) nr);
next = &rep->state[0];
for_each_td(td, i) {
struct thread_io_list *s = next;
- unsigned int comps;
+ unsigned int comps, index = 0;
if (save_mask != IO_LIST_ALL && (i + 1) != save_mask)
continue;
- if (td->last_write_comp) {
- int j, k;
-
- if (td->io_blocks[DDIR_WRITE] < td->o.iodepth)
- comps = td->io_blocks[DDIR_WRITE];
- else
- comps = td->o.iodepth;
-
- k = td->last_write_idx - 1;
- for (j = 0; j < comps; j++) {
- if (k == -1)
- k = td->o.iodepth - 1;
- s->offsets[j] = cpu_to_le64(td->last_write_comp[k]);
- k--;
- }
- } else
- comps = 0;
+ comps = fill_file_completions(td, s, &index);
s->no_comps = cpu_to_le64((uint64_t) comps);
s->depth = cpu_to_le64((uint64_t) td->o.iodepth);
+ s->nofiles = cpu_to_le64((uint64_t) td->o.nr_files);
s->numberio = cpu_to_le64((uint64_t) td->io_issues[DDIR_WRITE]);
s->index = cpu_to_le64((uint64_t) i);
if (td->random_state.use64) {
static int open_state_file(const char *name, const char *prefix, int num,
int for_write)
{
- char out[64];
+ char out[PATH_MAX];
int flags;
int fd;
fd = open(out, flags, 0644);
if (fd == -1) {
perror("fio: open state file");
+ log_err("fio: state file: %s (for_write=%d)\n", out, for_write);
return -1;
}
free(td->vstate);
}
-static struct thread_io_list *convert_v1_list(struct thread_io_list_v1 *s)
-{
- struct thread_io_list *til;
- int i;
-
- til = malloc(__thread_io_list_sz(s->no_comps));
- til->no_comps = s->no_comps;
- til->depth = s->depth;
- til->numberio = s->numberio;
- til->index = s->index;
- memcpy(til->name, s->name, sizeof(til->name));
-
- til->rand.use64 = 0;
- for (i = 0; i < 4; i++)
- til->rand.state32.s[i] = s->rand.s[i];
-
- for (i = 0; i < s->no_comps; i++)
- til->offsets[i] = s->offsets[i];
-
- return til;
-}
-
-void verify_convert_assign_state(struct thread_data *td, void *p, int version)
+void verify_assign_state(struct thread_data *td, void *p)
{
- struct thread_io_list *til;
+ struct thread_io_list *s = p;
int i;
- if (version == 1) {
- struct thread_io_list_v1 *s = p;
+ s->no_comps = le64_to_cpu(s->no_comps);
+ s->depth = le32_to_cpu(s->depth);
+ s->nofiles = le32_to_cpu(s->nofiles);
+ s->numberio = le64_to_cpu(s->numberio);
+ s->rand.use64 = le64_to_cpu(s->rand.use64);
- s->no_comps = le64_to_cpu(s->no_comps);
- s->depth = le64_to_cpu(s->depth);
- s->numberio = le64_to_cpu(s->numberio);
- for (i = 0; i < 4; i++)
- s->rand.s[i] = le32_to_cpu(s->rand.s[i]);
- for (i = 0; i < s->no_comps; i++)
- s->offsets[i] = le64_to_cpu(s->offsets[i]);
-
- til = convert_v1_list(s);
- free(s);
+ if (s->rand.use64) {
+ for (i = 0; i < 6; i++)
+ s->rand.state64.s[i] = le64_to_cpu(s->rand.state64.s[i]);
} else {
- struct thread_io_list *s = p;
-
- s->no_comps = le64_to_cpu(s->no_comps);
- s->depth = le64_to_cpu(s->depth);
- s->numberio = le64_to_cpu(s->numberio);
- s->rand.use64 = le64_to_cpu(s->rand.use64);
-
- if (s->rand.use64) {
- for (i = 0; i < 6; i++)
- s->rand.state64.s[i] = le64_to_cpu(s->rand.state64.s[i]);
- } else {
- for (i = 0; i < 4; i++)
- s->rand.state32.s[i] = le32_to_cpu(s->rand.state32.s[i]);
- }
- for (i = 0; i < s->no_comps; i++)
- s->offsets[i] = le64_to_cpu(s->offsets[i]);
+ for (i = 0; i < 4; i++)
+ s->rand.state32.s[i] = le32_to_cpu(s->rand.state32.s[i]);
+ }
- til = p;
+ for (i = 0; i < s->no_comps; i++) {
+ s->comps[i].fileno = le64_to_cpu(s->comps[i].fileno);
+ s->comps[i].offset = le64_to_cpu(s->comps[i].offset);
}
- td->vstate = til;
+ td->vstate = p;
}
-int verify_state_hdr(struct verify_state_hdr *hdr, struct thread_io_list *s,
- int *version)
+int verify_state_hdr(struct verify_state_hdr *hdr, struct thread_io_list *s)
{
uint64_t crc;
hdr->size = le64_to_cpu(hdr->size);
hdr->crc = le64_to_cpu(hdr->crc);
- if (hdr->version != VSTATE_HDR_VERSION ||
- hdr->version != VSTATE_HDR_VERSION_V1)
+ if (hdr->version != VSTATE_HDR_VERSION)
return 1;
crc = fio_crc32c((void *)s, hdr->size);
if (crc != hdr->crc)
return 1;
- *version = hdr->version;
return 0;
}
hdr.size = le64_to_cpu(hdr.size);
hdr.crc = le64_to_cpu(hdr.crc);
- if (hdr.version != VSTATE_HDR_VERSION &&
- hdr.version != VSTATE_HDR_VERSION_V1) {
- log_err("fio: bad version in verify state header\n");
+ if (hdr.version != VSTATE_HDR_VERSION) {
+ log_err("fio: unsupported (%d) version in verify state header\n",
+ (unsigned int) hdr.version);
goto err;
}
close(fd);
- verify_convert_assign_state(td, s, hdr.version);
+ verify_assign_state(td, s);
return 0;
err:
if (s)
int verify_state_should_stop(struct thread_data *td, struct io_u *io_u)
{
struct thread_io_list *s = td->vstate;
+ struct fio_file *f = io_u->file;
int i;
- if (!s)
+ if (!s || !f)
return 0;
/*
* completed or not. If the IO was seen as completed, then
* lets verify it.
*/
- for (i = 0; i < s->no_comps; i++)
- if (io_u->offset == s->offsets[i])
+ for (i = 0; i < s->no_comps; i++) {
+ if (s->comps[i].fileno != f->fileno)
+ continue;
+ if (io_u->offset == s->comps[i].offset)
return 0;
+ }
/*
* Not found, we have to stop