#include <pthread.h>
#include <libgen.h>
+#include "arch/arch.h"
#include "fio.h"
#include "verify.h"
#include "trim.h"
#include "lib/rand.h"
#include "lib/hweight.h"
#include "lib/pattern.h"
+#include "oslib/asprintf.h"
#include "crc/md5.h"
#include "crc/crc64.h"
#include "crc/sha512.h"
#include "crc/sha1.h"
#include "crc/xxhash.h"
+#include "crc/sha3.h"
static void populate_hdr(struct thread_data *td, struct io_u *io_u,
struct verify_header *hdr, unsigned int header_num,
unsigned int header_len);
-static void fill_hdr(struct thread_data *td, struct io_u *io_u,
- struct verify_header *hdr, unsigned int header_num,
- unsigned int header_len, uint64_t rand_seed);
static void __fill_hdr(struct thread_data *td, struct io_u *io_u,
struct verify_header *hdr, unsigned int header_num,
unsigned int header_len, uint64_t rand_seed);
(void)cpy_pattern(td->o.buffer_pattern, td->o.buffer_pattern_bytes, p, len);
}
-void __fill_buffer(struct thread_options *o, unsigned long seed, void *p,
- unsigned int len)
+static void __fill_buffer(struct thread_options *o, uint64_t seed, void *p,
+ unsigned int len)
{
__fill_random_buf_percentage(seed, p, o->compress_percentage, len, len, o->buffer_pattern, o->buffer_pattern_bytes);
}
-unsigned long fill_buffer(struct thread_data *td, void *p, unsigned int len)
-{
- struct frand_state *fs = &td->verify_state;
- struct thread_options *o = &td->o;
-
- return fill_random_buf_percentage(fs, p, o->compress_percentage, len, len, o->buffer_pattern, o->buffer_pattern_bytes);
-}
-
void fill_verify_pattern(struct thread_data *td, void *p, unsigned int len,
- struct io_u *io_u, unsigned long seed, int use_seed)
+ struct io_u *io_u, uint64_t seed, int use_seed)
{
struct thread_options *o = &td->o;
if (!o->verify_pattern_bytes) {
dprint(FD_VERIFY, "fill random bytes len=%u\n", len);
- if (use_seed)
- __fill_buffer(o, seed, p, len);
- else
- io_u->rand_seed = fill_buffer(td, p, len);
+ if (!use_seed) {
+ seed = __rand(&td->verify_state);
+ if (sizeof(int) != sizeof(long *))
+ seed *= (unsigned long)__rand(&td->verify_state);
+ }
+ io_u->rand_seed = seed;
+ __fill_buffer(o, seed, p, len);
return;
}
{
unsigned int hdr_inc;
+ /*
+ * If we use bs_unaligned, buflen can be larger than the verify
+ * interval (which just defaults to the smallest blocksize possible).
+ */
hdr_inc = io_u->buflen;
- if (td->o.verify_interval && td->o.verify_interval <= io_u->buflen)
+ if (td->o.verify_interval && td->o.verify_interval <= io_u->buflen &&
+ !td->o.bs_unaligned)
hdr_inc = td->o.verify_interval;
return hdr_inc;
}
static void fill_pattern_headers(struct thread_data *td, struct io_u *io_u,
- unsigned long seed, int use_seed)
+ uint64_t seed, int use_seed)
{
unsigned int hdr_inc, header_num;
struct verify_header *hdr;
case VERIFY_SHA512:
len = sizeof(struct vhdr_sha512);
break;
+ case VERIFY_SHA3_224:
+ len = sizeof(struct vhdr_sha3_224);
+ break;
+ case VERIFY_SHA3_256:
+ len = sizeof(struct vhdr_sha3_256);
+ break;
+ case VERIFY_SHA3_384:
+ len = sizeof(struct vhdr_sha3_384);
+ break;
+ case VERIFY_SHA3_512:
+ len = sizeof(struct vhdr_sha3_512);
+ break;
case VERIFY_XXHASH:
len = sizeof(struct vhdr_xxhash);
break;
};
#define DUMP_BUF_SZ 255
-static int dump_buf_warned;
static void dump_buf(char *buf, unsigned int len, unsigned long long offset,
const char *type, struct fio_file *f)
{
- char *ptr, fname[DUMP_BUF_SZ];
- size_t buf_left = DUMP_BUF_SZ;
+ char *ptr, *fname;
+ char sep[2] = { FIO_OS_PATH_SEPARATOR, 0 };
int ret, fd;
ptr = strdup(f->file_name);
- memset(fname, 0, sizeof(fname));
- if (aux_path)
- sprintf(fname, "%s%s", aux_path, FIO_OS_PATH_SEPARATOR);
-
- strncpy(fname + strlen(fname), basename(ptr), buf_left - 1);
-
- buf_left -= strlen(fname);
- if (buf_left <= 0) {
- if (!dump_buf_warned) {
- log_err("fio: verify failure dump buffer too small\n");
- dump_buf_warned = 1;
- }
- free(ptr);
- return;
+ if (asprintf(&fname, "%s%s%s.%llu.%s", aux_path ? : "",
+ aux_path ? sep : "", basename(ptr), offset, type) < 0) {
+ if (!fio_did_warn(FIO_WARN_VERIFY_BUF))
+ log_err("fio: not enough memory for dump buffer filename\n");
+ goto free_ptr;
}
- snprintf(fname + strlen(fname), buf_left, ".%llu.%s", offset, type);
-
fd = open(fname, O_CREAT | O_TRUNC | O_WRONLY, 0644);
if (fd < 0) {
perror("open verify buf file");
- return;
+ goto free_fname;
}
while (len) {
close(fd);
log_err(" %s data dumped as %s\n", type, fname);
+
+free_fname:
+ free(fname);
+
+free_ptr:
free(ptr);
}
*/
hdr_offset = vc->hdr_num * hdr->len;
- dump_buf(io_u->buf + hdr_offset, hdr->len, io_u->offset + hdr_offset,
+ dump_buf(io_u->buf + hdr_offset, hdr->len, io_u->verify_offset + hdr_offset,
"received", vc->io_u->file);
/*
fill_pattern_headers(td, &dummy, hdr->rand_seed, 1);
- dump_buf(buf + hdr_offset, hdr->len, io_u->offset + hdr_offset,
+ dump_buf(buf + hdr_offset, hdr->len, io_u->verify_offset + hdr_offset,
"expected", vc->io_u->file);
free(buf);
}
{
unsigned long long offset;
- offset = vc->io_u->offset;
+ offset = vc->io_u->verify_offset;
offset += vc->hdr_num * hdr->len;
- log_err("%.8s: verify failed at file %s offset %llu, length %u\n",
- vc->name, vc->io_u->file->file_name, offset, hdr->len);
+ log_err("%.8s: verify failed at file %s offset %llu, length %u"
+ " (requested block: offset=%llu, length=%llu, flags=%x)\n",
+ vc->name, vc->io_u->file->file_name, offset, hdr->len,
+ vc->io_u->verify_offset, vc->io_u->buflen, vc->io_u->flags);
if (vc->good_crc && vc->bad_crc) {
log_err(" Expected CRC: ");
(void)paste_format_inplace(pattern, pattern_size,
td->o.verify_fmt, td->o.verify_fmt_sz, io_u);
- buf = (void *) hdr + header_size;
+ buf = (char *) hdr + header_size;
len = get_hdr_inc(td, io_u) - header_size;
mod = (get_hdr_inc(td, io_u) * vc->hdr_num + header_size) % pattern_size;
(unsigned char)pattern[mod],
bits);
log_err("fio: bad pattern block offset %u\n", i);
- dump_verify_buffers(hdr, vc);
+ vc->name = "pattern";
+ log_verify_failure(hdr, vc);
return EILSEQ;
}
mod++;
return EILSEQ;
}
+static int verify_io_u_sha3(struct verify_header *hdr, struct vcont *vc,
+ struct fio_sha3_ctx *sha3_ctx, uint8_t *sha,
+ unsigned int sha_size, const char *name)
+{
+ void *p = io_u_verify_off(hdr, vc);
+
+ dprint(FD_VERIFY, "%s verify io_u %p, len %u\n", name, vc->io_u, hdr->len);
+
+ fio_sha3_update(sha3_ctx, p, hdr->len - hdr_size(vc->td, hdr));
+ fio_sha3_final(sha3_ctx);
+
+ if (!memcmp(sha, sha3_ctx->sha, sha_size))
+ return 0;
+
+ vc->name = name;
+ vc->good_crc = sha;
+ vc->bad_crc = sha3_ctx->sha;
+ vc->crc_len = sha_size;
+ log_verify_failure(hdr, vc);
+ return EILSEQ;
+}
+
+static int verify_io_u_sha3_224(struct verify_header *hdr, struct vcont *vc)
+{
+ struct vhdr_sha3_224 *vh = hdr_priv(hdr);
+ uint8_t sha[SHA3_224_DIGEST_SIZE];
+ struct fio_sha3_ctx sha3_ctx = {
+ .sha = sha,
+ };
+
+ fio_sha3_224_init(&sha3_ctx);
+
+ return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
+ SHA3_224_DIGEST_SIZE, "sha3-224");
+}
+
+static int verify_io_u_sha3_256(struct verify_header *hdr, struct vcont *vc)
+{
+ struct vhdr_sha3_256 *vh = hdr_priv(hdr);
+ uint8_t sha[SHA3_256_DIGEST_SIZE];
+ struct fio_sha3_ctx sha3_ctx = {
+ .sha = sha,
+ };
+
+ fio_sha3_256_init(&sha3_ctx);
+
+ return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
+ SHA3_256_DIGEST_SIZE, "sha3-256");
+}
+
+static int verify_io_u_sha3_384(struct verify_header *hdr, struct vcont *vc)
+{
+ struct vhdr_sha3_384 *vh = hdr_priv(hdr);
+ uint8_t sha[SHA3_384_DIGEST_SIZE];
+ struct fio_sha3_ctx sha3_ctx = {
+ .sha = sha,
+ };
+
+ fio_sha3_384_init(&sha3_ctx);
+
+ return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
+ SHA3_384_DIGEST_SIZE, "sha3-384");
+}
+
+static int verify_io_u_sha3_512(struct verify_header *hdr, struct vcont *vc)
+{
+ struct vhdr_sha3_512 *vh = hdr_priv(hdr);
+ uint8_t sha[SHA3_512_DIGEST_SIZE];
+ struct fio_sha3_ctx sha3_ctx = {
+ .sha = sha,
+ };
+
+ fio_sha3_512_init(&sha3_ctx);
+
+ return verify_io_u_sha3(hdr, vc, &sha3_ctx, vh->sha,
+ SHA3_512_DIGEST_SIZE, "sha3-512");
+}
+
static int verify_io_u_sha512(struct verify_header *hdr, struct vcont *vc)
{
void *p = io_u_verify_off(hdr, vc);
if (io_u->flags & IO_U_F_IN_CUR_DEPTH) {
td->cur_depth--;
- io_u_clear(io_u, IO_U_F_IN_CUR_DEPTH);
+ io_u_clear(td, io_u, IO_U_F_IN_CUR_DEPTH);
}
flist_add_tail(&io_u->verify_list, &td->verify_list);
*io_u_ptr = NULL;
- pthread_mutex_unlock(&td->io_u_lock);
pthread_cond_signal(&td->verify_cond);
+ pthread_mutex_unlock(&td->io_u_lock);
return 0;
}
mem_is_zero_slow(io_u->buf, io_u->buflen, &offset);
- log_err("trim: verify failed at file %s offset %llu, length %lu"
+ log_err("trim: verify failed at file %s offset %llu, length %llu"
", block offset %lu\n",
- io_u->file->file_name, io_u->offset, io_u->buflen,
+ io_u->file->file_name, io_u->verify_offset, io_u->buflen,
(unsigned long) offset);
return EILSEQ;
}
hdr->rand_seed, io_u->rand_seed);
goto err;
}
- if (hdr->offset != io_u->offset + hdr_num * td->o.verify_interval) {
+ if (hdr->offset != io_u->verify_offset + hdr_num * td->o.verify_interval) {
log_err("verify: bad header offset %"PRIu64
", wanted %llu",
- hdr->offset, io_u->offset);
+ hdr->offset, io_u->verify_offset);
goto err;
}
* For read-only workloads, the program cannot be certain of the
* last numberio written to a block. Checking of numberio will be
* done only for workloads that write data. For verify_only,
- * numberio will be checked in the last iteration when the correct
- * state of numberio, that would have been written to each block
- * in a previous run of fio, has been reached.
+ * numberio check is skipped.
*/
- if ((td_write(td) || td_rw(td)) && (td_min_bs(td) == td_max_bs(td)) &&
+ if (td_write(td) && (td_min_bs(td) == td_max_bs(td)) &&
!td->o.time_based)
- if (!td->o.verify_only || td->o.loops == 0)
+ if (!td->o.verify_only)
if (hdr->numberio != io_u->numberio) {
log_err("verify: bad header numberio %"PRIu16
", wanted %"PRIu16,
return 0;
err:
- log_err(" at file %s offset %llu, length %u\n",
+ log_err(" at file %s offset %llu, length %u"
+ " (requested block: offset=%llu, length=%llu)\n",
io_u->file->file_name,
- io_u->offset + hdr_num * hdr_len, hdr_len);
+ io_u->verify_offset + hdr_num * hdr_len, hdr_len,
+ io_u->verify_offset, io_u->buflen);
if (td->o.verify_dump)
- dump_buf(p, hdr_len, io_u->offset + hdr_num * hdr_len,
+ dump_buf(p, hdr_len, io_u->verify_offset + hdr_num * hdr_len,
"hdr_fail", io_u->file);
return EILSEQ;
* If the IO engine is faking IO (like null), then just pretend
* we verified everything.
*/
- if (td->io_ops->flags & FIO_FAKEIO)
+ if (td_ioengine_flagged(td, FIO_FAKEIO))
return 0;
if (io_u->flags & IO_U_F_TRIMMED) {
hdr = p;
/*
- * Make rand_seed check pass when have verifysort or
- * verify_backlog.
+ * Make rand_seed check pass when have verify_backlog or
+ * zone reset frequency for zonemode=zbd.
*/
- if (td->o.verifysort || (td->flags & TD_F_VER_BACKLOG))
+ if (!td_rw(td) || (td->flags & TD_F_VER_BACKLOG) ||
+ td->o.zrf.u.f)
io_u->rand_seed = hdr->rand_seed;
if (td->o.verify != VERIFY_PATTERN_NO_HDR) {
case VERIFY_SHA512:
ret = verify_io_u_sha512(hdr, &vc);
break;
+ case VERIFY_SHA3_224:
+ ret = verify_io_u_sha3_224(hdr, &vc);
+ break;
+ case VERIFY_SHA3_256:
+ ret = verify_io_u_sha3_256(hdr, &vc);
+ break;
+ case VERIFY_SHA3_384:
+ ret = verify_io_u_sha3_384(hdr, &vc);
+ break;
+ case VERIFY_SHA3_512:
+ ret = verify_io_u_sha3_512(hdr, &vc);
+ break;
case VERIFY_XXHASH:
ret = verify_io_u_xxhash(hdr, &vc);
break;
vh->hash = XXH32_digest(state);
}
+static void fill_sha3(struct fio_sha3_ctx *sha3_ctx, void *p, unsigned int len)
+{
+ fio_sha3_update(sha3_ctx, p, len);
+ fio_sha3_final(sha3_ctx);
+}
+
+static void fill_sha3_224(struct verify_header *hdr, void *p, unsigned int len)
+{
+ struct vhdr_sha3_224 *vh = hdr_priv(hdr);
+ struct fio_sha3_ctx sha3_ctx = {
+ .sha = vh->sha,
+ };
+
+ fio_sha3_224_init(&sha3_ctx);
+ fill_sha3(&sha3_ctx, p, len);
+}
+
+static void fill_sha3_256(struct verify_header *hdr, void *p, unsigned int len)
+{
+ struct vhdr_sha3_256 *vh = hdr_priv(hdr);
+ struct fio_sha3_ctx sha3_ctx = {
+ .sha = vh->sha,
+ };
+
+ fio_sha3_256_init(&sha3_ctx);
+ fill_sha3(&sha3_ctx, p, len);
+}
+
+static void fill_sha3_384(struct verify_header *hdr, void *p, unsigned int len)
+{
+ struct vhdr_sha3_384 *vh = hdr_priv(hdr);
+ struct fio_sha3_ctx sha3_ctx = {
+ .sha = vh->sha,
+ };
+
+ fio_sha3_384_init(&sha3_ctx);
+ fill_sha3(&sha3_ctx, p, len);
+}
+
+static void fill_sha3_512(struct verify_header *hdr, void *p, unsigned int len)
+{
+ struct vhdr_sha3_512 *vh = hdr_priv(hdr);
+ struct fio_sha3_ctx sha3_ctx = {
+ .sha = vh->sha,
+ };
+
+ fio_sha3_512_init(&sha3_ctx);
+ fill_sha3(&sha3_ctx, p, len);
+}
+
static void fill_sha512(struct verify_header *hdr, void *p, unsigned int len)
{
struct vhdr_sha512 *vh = hdr_priv(hdr);
hdr->verify_type = td->o.verify;
hdr->len = header_len;
hdr->rand_seed = rand_seed;
- hdr->offset = io_u->offset + header_num * td->o.verify_interval;
+ hdr->offset = io_u->verify_offset + header_num * td->o.verify_interval;
hdr->time_sec = io_u->start_time.tv_sec;
- hdr->time_usec = io_u->start_time.tv_usec;
+ hdr->time_nsec = io_u->start_time.tv_nsec;
hdr->thread = td->thread_number;
hdr->numberio = io_u->numberio;
hdr->crc32 = fio_crc32c(p, offsetof(struct verify_header, crc32));
struct verify_header *hdr, unsigned int header_num,
unsigned int header_len, uint64_t rand_seed)
{
-
if (td->o.verify != VERIFY_PATTERN_NO_HDR)
__fill_hdr(td, io_u, hdr, header_num, header_len, rand_seed);
}
unsigned int header_len)
{
unsigned int data_len;
- void *data, *p;
+ void *data;
+ char *p;
- p = (void *) hdr;
+ p = (char *) hdr;
fill_hdr(td, io_u, hdr, header_num, header_len, io_u->rand_seed);
+ if (header_len <= hdr_size(td, hdr)) {
+ td_verror(td, EINVAL, "Blocksize too small");
+ return;
+ }
data_len = header_len - hdr_size(td, hdr);
data = p + hdr_size(td, hdr);
io_u, hdr->len);
fill_sha512(hdr, data, data_len);
break;
+ case VERIFY_SHA3_224:
+ dprint(FD_VERIFY, "fill sha3-224 io_u %p, len %u\n",
+ io_u, hdr->len);
+ fill_sha3_224(hdr, data, data_len);
+ break;
+ case VERIFY_SHA3_256:
+ dprint(FD_VERIFY, "fill sha3-256 io_u %p, len %u\n",
+ io_u, hdr->len);
+ fill_sha3_256(hdr, data, data_len);
+ break;
+ case VERIFY_SHA3_384:
+ dprint(FD_VERIFY, "fill sha3-384 io_u %p, len %u\n",
+ io_u, hdr->len);
+ fill_sha3_384(hdr, data, data_len);
+ break;
+ case VERIFY_SHA3_512:
+ dprint(FD_VERIFY, "fill sha3-512 io_u %p, len %u\n",
+ io_u, hdr->len);
+ fill_sha3_512(hdr, data, data_len);
+ break;
case VERIFY_XXHASH:
dprint(FD_VERIFY, "fill xxhash io_u %p, len %u\n",
io_u, hdr->len);
if (td->o.verify == VERIFY_NULL)
return;
- io_u->numberio = td->io_issues[io_u->ddir];
-
fill_pattern_headers(td, io_u, 0, 0);
}
return 0;
if (!RB_EMPTY_ROOT(&td->io_hist_tree)) {
- struct rb_node *n = rb_first(&td->io_hist_tree);
+ struct fio_rb_node *n = rb_first(&td->io_hist_tree);
ipo = rb_entry(n, struct io_piece, rb_node);
/*
* Ensure that the associated IO has completed
*/
- read_barrier();
- if (ipo->flags & IP_F_IN_FLIGHT)
+ if (atomic_load_acquire(&ipo->flags) & IP_F_IN_FLIGHT)
goto nothing;
rb_erase(n, &td->io_hist_tree);
/*
* Ensure that the associated IO has completed
*/
- read_barrier();
- if (ipo->flags & IP_F_IN_FLIGHT)
+ if (atomic_load_acquire(&ipo->flags) & IP_F_IN_FLIGHT)
goto nothing;
flist_del(&ipo->list);
td->io_hist_len--;
io_u->offset = ipo->offset;
+ io_u->verify_offset = ipo->offset;
io_u->buflen = ipo->len;
io_u->numberio = ipo->numberio;
io_u->file = ipo->file;
- io_u_set(io_u, IO_U_F_VER_LIST);
+ io_u_set(td, io_u, IO_U_F_VER_LIST);
if (ipo->flags & IP_F_TRIMMED)
- io_u_set(io_u, IO_U_F_TRIMMED);
+ io_u_set(td, io_u, IO_U_F_TRIMMED);
if (!fio_file_open(io_u->file)) {
int r = td_io_open_file(td, io_u->file);
{
if (td->o.verify == VERIFY_CRC32C_INTEL ||
td->o.verify == VERIFY_CRC32C) {
+ crc32c_arm64_probe();
crc32c_intel_probe();
}
}
ret = pthread_cond_wait(&td->verify_cond,
&td->io_u_lock);
if (ret) {
- pthread_mutex_unlock(&td->io_u_lock);
break;
}
}
io_u = flist_first_entry(&list, struct io_u, verify_list);
flist_del_init(&io_u->verify_list);
- io_u_set(io_u, IO_U_F_NO_FILE_PUT);
+ io_u_set(td, io_u, IO_U_F_NO_FILE_PUT);
ret = verify_io_u(td, &io_u);
put_io_u(td, io_u);
done:
pthread_mutex_lock(&td->io_u_lock);
td->nr_verify_threads--;
+ pthread_cond_signal(&td->free_cond);
pthread_mutex_unlock(&td->io_u_lock);
- pthread_cond_signal(&td->free_cond);
return NULL;
}
if (i != td->o.verify_async) {
log_err("fio: only %d verify threads started, exiting\n", i);
+
+ pthread_mutex_lock(&td->io_u_lock);
td->verify_thread_exit = 1;
- write_barrier();
pthread_cond_broadcast(&td->verify_cond);
+ pthread_mutex_unlock(&td->io_u_lock);
+
return 1;
}
void verify_async_exit(struct thread_data *td)
{
+ pthread_mutex_lock(&td->io_u_lock);
td->verify_thread_exit = 1;
- write_barrier();
pthread_cond_broadcast(&td->verify_cond);
- pthread_mutex_lock(&td->io_u_lock);
-
while (td->nr_verify_threads)
pthread_cond_wait(&td->free_cond, &td->io_u_lock);
struct io_u *io = priv;
unsigned long long off;
- typecheck(typeof(off), io->offset);
+ typecheck(__typeof__(off), io->offset);
off = cpu_to_le64((uint64_t)io->offset);
len = min(len, (unsigned int)sizeof(off));
memcpy(buf, &off, len);
struct all_io_list *get_all_io_list(int save_mask, size_t *sz)
{
struct all_io_list *rep;
- struct thread_data *td;
size_t depth;
void *next;
- int i, nr;
+ int nr;
compiletime_assert(sizeof(struct all_io_list) == 8, "all_io_list");
*/
depth = 0;
nr = 0;
- for_each_td(td, i) {
- if (save_mask != IO_LIST_ALL && (i + 1) != save_mask)
+ for_each_td(td) {
+ if (save_mask != IO_LIST_ALL && (__td_index + 1) != save_mask)
continue;
td->stop_io = 1;
td->flags |= TD_F_VSTATE_SAVED;
depth += (td->o.iodepth * td->o.nr_files);
nr++;
- }
+ } end_for_each();
if (!nr)
return NULL;
*sz = sizeof(*rep);
*sz += nr * sizeof(struct thread_io_list);
*sz += depth * sizeof(struct file_comp);
- rep = malloc(*sz);
- memset(rep, 0, *sz);
+ rep = calloc(1, *sz);
rep->threads = cpu_to_le64((uint64_t) nr);
next = &rep->state[0];
- for_each_td(td, i) {
+ for_each_td(td) {
struct thread_io_list *s = next;
unsigned int comps, index = 0;
- if (save_mask != IO_LIST_ALL && (i + 1) != save_mask)
+ if (save_mask != IO_LIST_ALL && (__td_index + 1) != save_mask)
continue;
comps = fill_file_completions(td, s, &index);
s->depth = cpu_to_le64((uint64_t) td->o.iodepth);
s->nofiles = cpu_to_le64((uint64_t) td->o.nr_files);
s->numberio = cpu_to_le64((uint64_t) td->io_issues[DDIR_WRITE]);
- s->index = cpu_to_le64((uint64_t) i);
+ s->index = cpu_to_le64((uint64_t) __td_index);
if (td->random_state.use64) {
s->rand.state64.s[0] = cpu_to_le64(td->random_state.state64.s1);
s->rand.state64.s[1] = cpu_to_le64(td->random_state.state64.s2);
s->rand.state32.s[3] = 0;
s->rand.use64 = 0;
}
- s->name[sizeof(s->name) - 1] = '\0';
- strncpy((char *) s->name, td->o.name, sizeof(s->name) - 1);
+ snprintf((char *) s->name, sizeof(s->name), "%s", td->o.name);
next = io_list_next(s);
- }
+ } end_for_each();
return rep;
}
char prefix[PATH_MAX];
if (aux_path)
- sprintf(prefix, "%s%slocal", aux_path, FIO_OS_PATH_SEPARATOR);
+ sprintf(prefix, "%s%clocal", aux_path, FIO_OS_PATH_SEPARATOR);
else
strcpy(prefix, "local");
for (i = 0; i < s->no_comps; i++) {
if (s->comps[i].fileno != f->fileno)
continue;
- if (io_u->offset == s->comps[i].offset)
+ if (io_u->verify_offset == s->comps[i].offset)
return 0;
}