int nr; /* input */
int error; /* output */
- unsigned long bytes_done[DDIR_RWDIR_CNT]; /* output */
+ uint64_t bytes_done[DDIR_RWDIR_CNT]; /* output */
struct timeval time; /* output */
};
r = __rand(&td->__random_state);
}
- dprint(FD_RANDOM, "off rand %llu\n", r);
+ dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r);
*b = (lastb - 1) * (r / ((uint64_t) rmax + 1.0));
} else {
if (random_map_free(f, *b))
goto ret;
- dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", *b);
+ dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
+ (unsigned long long) *b);
*b = axmap_next_free(f->io_axmap, *b);
if (*b == (uint64_t) -1ULL)
return 1;
}
+/*
+ * Sort the reads for a verify phase in batches of verifysort_nr, if
+ * specified.
+ */
+static inline int should_sort_io(struct thread_data *td)
+{
+ if (!td->o.verifysort_nr || !td->o.do_verify)
+ return 0;
+ if (!td_random(td))
+ return 0;
+ if (td->runstate != TD_VERIFYING)
+ return 0;
+ if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE)
+ return 0;
+
+ return 1;
+}
+
static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
enum fio_ddir ddir, uint64_t *b)
{
struct rand_off *r;
int i, ret = 1;
- /*
- * If sort not enabled, or not a pure random read workload without
- * any stored write metadata, just return a random offset
- */
- if (!td->o.verifysort_nr || !(ddir == DDIR_READ && td->o.do_verify &&
- td->o.verify != VERIFY_NONE && td_random(td)))
+ if (!should_sort_io(td))
return get_off_from_method(td, f, ddir, b);
if (!flist_empty(&td->next_rand_list)) {
return 0;
if (td->o.time_based) {
- fio_file_reset(f);
+ fio_file_reset(td, f);
if (!get_next_rand_offset(td, f, ddir, b))
return 0;
}
dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
- f->file_name, f->last_pos, f->real_file_size);
+ f->file_name, (unsigned long long) f->last_pos,
+ (unsigned long long) f->real_file_size);
return 1;
}
if (io_u->offset >= f->io_size) {
dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
- io_u->offset, f->io_size);
+ (unsigned long long) io_u->offset,
+ (unsigned long long) f->io_size);
return 1;
}
io_u->offset += f->file_offset;
if (io_u->offset >= f->real_file_size) {
dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n",
- io_u->offset, f->real_file_size);
+ (unsigned long long) io_u->offset,
+ (unsigned long long) f->real_file_size);
return 1;
}
* We have too much pending sleep in this direction. See if we
* should switch.
*/
- if (td_rw(td)) {
+ if (td_rw(td) && td->o.rwmix[odir]) {
/*
* Other direction does not have too much pending, switch
*/
* io's that have been actually submitted to an async engine,
* and cur_depth is meaningless for sync engines.
*/
- if (td->io_u_in_flight) {
+ while (td->io_u_in_flight) {
int fio_unused ret;
- ret = io_u_queued_complete(td, td->io_u_in_flight, NULL);
+ ret = io_u_queued_complete(td, 1, NULL);
}
fio_gettime(&t, NULL);
if (ddir_trim(ddir))
return ddir;
+
return ddir;
}
static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
{
- io_u->ddir = get_rw_ddir(td);
+ io_u->ddir = io_u->acct_ddir = get_rw_ddir(td);
if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) &&
td->o.barrier_blocks &&
void requeue_io_u(struct thread_data *td, struct io_u **io_u)
{
struct io_u *__io_u = *io_u;
+ enum fio_ddir ddir = acct_ddir(__io_u);
dprint(FD_IO, "requeue %p\n", __io_u);
td_io_u_lock(td);
__io_u->flags |= IO_U_F_FREE;
- if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(__io_u->ddir))
- td->io_issues[__io_u->ddir]--;
+ if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir))
+ td->io_issues[ddir]--;
__io_u->flags &= ~IO_U_F_FLIGHT;
if (__io_u->flags & IO_U_F_IN_CUR_DEPTH)
if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
dprint(FD_IO, "io_u %p, offset too large\n", io_u);
- dprint(FD_IO, " off=%llu/%lu > %llu\n", io_u->offset,
- io_u->buflen, io_u->file->real_file_size);
+ dprint(FD_IO, " off=%llu/%lu > %llu\n",
+ (unsigned long long) io_u->offset, io_u->buflen,
+ (unsigned long long) io_u->file->real_file_size);
return 1;
}
if (td_random(td) && file_randommap(td, io_u->file))
mark_random_map(td, io_u);
- /*
- * If using a write iolog, store this entry.
- */
out:
dprint_io_u(io_u, "fill_io_u");
td->zone_bytes += io_u->buflen;
- log_io_u(td, io_u);
return 0;
}
io_u->flags &= ~IO_U_F_VER_LIST;
io_u->error = 0;
+ io_u->acct_ddir = -1;
flist_del(&io_u->list);
flist_add_tail(&io_u->list, &td->io_u_busylist);
td->cur_depth++;
if (td_write(td) && idx == DDIR_WRITE &&
td->o.do_verify &&
- td->o.verify != VERIFY_NONE)
+ td->o.verify != VERIFY_NONE &&
+ !td->o.experimental_verify)
log_io_piece(td, io_u);
icd->bytes_done[idx] += bytes;
* Complete a single io_u for the sync engines.
*/
int io_u_sync_complete(struct thread_data *td, struct io_u *io_u,
- unsigned long *bytes)
+ uint64_t *bytes)
{
struct io_completion_data icd;
* Called to complete min_events number of io for the async engines.
*/
int io_u_queued_complete(struct thread_data *td, int min_evts,
- unsigned long *bytes)
+ uint64_t *bytes)
{
struct io_completion_data icd;
struct timespec *tvp = NULL;