#include <unistd.h>
-#include <fcntl.h>
#include <string.h>
-#include <signal.h>
-#include <time.h>
#include <assert.h>
#include "fio.h"
-#include "hash.h"
#include "verify.h"
#include "trim.h"
#include "lib/rand.h"
{
struct zone_split_index *zsi;
uint64_t lastb, send, stotal;
- static int warned;
unsigned int v;
lastb = last_block(td, f, ddir);
* Should never happen
*/
if (send == -1U) {
- if (!warned) {
+ if (!fio_did_warn(FIO_WARN_ZONED_BUG))
log_err("fio: bug in zoned generation\n");
- warned = 1;
- }
goto bail;
} else if (send > lastb) {
/*
{
unsigned int v, send, stotal;
uint64_t offset, lastb;
- static int warned;
struct zone_split_index *zsi;
lastb = last_block(td, f, ddir);
* Should never happen
*/
if (send == -1U) {
- if (!warned) {
+ if (!fio_did_warn(FIO_WARN_ZONED_BUG))
log_err("fio: bug in zoned generation\n");
- warned = 1;
- }
goto bail;
}
if (f->last_pos[ddir] < f->real_file_size) {
uint64_t pos;
- if (f->last_pos[ddir] == f->file_offset && o->ddir_seq_add < 0) {
+ /*
+ * Only rewind if we already hit the end
+ */
+ if (f->last_pos[ddir] == f->file_offset &&
+ f->file_offset && o->ddir_seq_add < 0) {
if (f->real_file_size > f->io_size)
f->last_pos[ddir] = f->io_size;
else
static int get_next_block(struct thread_data *td, struct io_u *io_u,
enum fio_ddir ddir, int rw_seq,
- unsigned int *is_random)
+ bool *is_random)
{
struct fio_file *f = io_u->file;
uint64_t b, offset;
if (td_random(td)) {
if (should_do_random(td, ddir)) {
ret = get_next_rand_block(td, f, ddir, &b);
- *is_random = 1;
+ *is_random = true;
} else {
- *is_random = 0;
+ *is_random = false;
io_u_set(td, io_u, IO_U_F_BUSY_OK);
ret = get_next_seq_offset(td, f, ddir, &offset);
if (ret)
ret = get_next_rand_block(td, f, ddir, &b);
}
} else {
- *is_random = 0;
+ *is_random = false;
ret = get_next_seq_offset(td, f, ddir, &offset);
}
} else {
io_u_set(td, io_u, IO_U_F_BUSY_OK);
- *is_random = 0;
+ *is_random = false;
if (td->o.rw_seq == RW_SEQ_SEQ) {
ret = get_next_seq_offset(td, f, ddir, &offset);
if (ret) {
ret = get_next_rand_block(td, f, ddir, &b);
- *is_random = 0;
+ *is_random = false;
}
} else if (td->o.rw_seq == RW_SEQ_IDENT) {
if (f->last_start[ddir] != -1ULL)
* until we find a free one. For sequential io, just return the end of
* the last io issued.
*/
-static int __get_next_offset(struct thread_data *td, struct io_u *io_u,
- unsigned int *is_random)
+static int get_next_offset(struct thread_data *td, struct io_u *io_u,
+ bool *is_random)
{
struct fio_file *f = io_u->file;
enum fio_ddir ddir = io_u->ddir;
return 0;
}
-static int get_next_offset(struct thread_data *td, struct io_u *io_u,
- unsigned int *is_random)
-{
- if (td->flags & TD_F_PROFILE_OPS) {
- struct prof_io_ops *ops = &td->prof_io_ops;
-
- if (ops->fill_io_u_off)
- return ops->fill_io_u_off(td, io_u, is_random);
- }
-
- return __get_next_offset(td, io_u, is_random);
-}
-
static inline bool io_u_fits(struct thread_data *td, struct io_u *io_u,
unsigned int buflen)
{
return io_u->offset + buflen <= f->io_size + get_start_offset(td, f);
}
-static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u,
- unsigned int is_random)
+static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
+ bool is_random)
{
int ddir = io_u->ddir;
unsigned int buflen = 0;
assert(ddir_rw(ddir));
if (td->o.bs_is_seq_rand)
- ddir = is_random ? DDIR_WRITE: DDIR_READ;
+ ddir = is_random ? DDIR_WRITE : DDIR_READ;
minbs = td->o.min_bs[ddir];
maxbs = td->o.max_bs[ddir];
return buflen;
}
-static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
- unsigned int is_random)
-{
- if (td->flags & TD_F_PROFILE_OPS) {
- struct prof_io_ops *ops = &td->prof_io_ops;
-
- if (ops->fill_io_u_size)
- return ops->fill_io_u_size(td, io_u, is_random);
- }
-
- return __get_next_buflen(td, io_u, is_random);
-}
-
static void set_rwmix_bytes(struct thread_data *td)
{
unsigned int diff;
assert(!(td->flags & TD_F_CHILD));
}
io_u_qpush(&td->io_u_freelist, io_u);
- td_io_u_unlock(td);
td_io_u_free_notify(td);
+ td_io_u_unlock(td);
}
void clear_io_u(struct thread_data *td, struct io_u *io_u)
}
io_u_rpush(&td->io_u_requeues, __io_u);
- td_io_u_unlock(td);
td_io_u_free_notify(td);
+ td_io_u_unlock(td);
*io_u = NULL;
}
static int fill_io_u(struct thread_data *td, struct io_u *io_u)
{
- unsigned int is_random;
+ bool is_random;
if (td_ioengine_flagged(td, FIO_NOIO))
goto out;
return 0;
}
-static void __io_u_mark_map(unsigned int *map, unsigned int nr)
+static void __io_u_mark_map(uint64_t *map, unsigned int nr)
{
int idx = 0;
static struct fio_file *get_next_file(struct thread_data *td)
{
- if (td->flags & TD_F_PROFILE_OPS) {
- struct prof_io_ops *ops = &td->prof_io_ops;
-
- if (ops->get_next_file)
- return ops->get_next_file(td);
- }
-
return __get_next_file(td);
}
struct io_u *__get_io_u(struct thread_data *td)
{
struct io_u *io_u = NULL;
+ int ret;
if (td->stop_io)
return NULL;
* return one
*/
assert(!(td->flags & TD_F_CHILD));
- assert(!pthread_cond_wait(&td->free_cond, &td->io_u_lock));
+ ret = pthread_cond_wait(&td->free_cond, &td->io_u_lock);
+ assert(ret == 0);
goto again;
}
/*
* Return an io_u to be processed. Gets a buflen and offset, sets direction,
- * etc. The returned io_u is fully ready to be prepped and submitted.
+ * etc. The returned io_u is fully ready to be prepped, populated and submitted.
*/
struct io_u *get_io_u(struct thread_data *td)
{
td->o.min_bs[DDIR_WRITE],
io_u->buflen);
} else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) &&
- !(td->flags & TD_F_COMPRESS))
+ !(td->flags & TD_F_COMPRESS) &&
+ !(td->flags & TD_F_DO_VERIFY))
do_scramble = 1;
- if (td->flags & TD_F_VER_NONE) {
- populate_verify_io_u(td, io_u);
- do_scramble = 0;
- }
} else if (io_u->ddir == DDIR_READ) {
/*
* Reset the buf_filled parameters so next time if the