{
struct zone_split_index *zsi;
uint64_t lastb, send, stotal;
- static int warned;
unsigned int v;
lastb = last_block(td, f, ddir);
* Should never happen
*/
if (send == -1U) {
- if (!warned) {
+ if (!fio_did_warn(FIO_WARN_ZONED_BUG))
log_err("fio: bug in zoned generation\n");
- warned = 1;
- }
goto bail;
} else if (send > lastb) {
/*
{
unsigned int v, send, stotal;
uint64_t offset, lastb;
- static int warned;
struct zone_split_index *zsi;
lastb = last_block(td, f, ddir);
* Should never happen
*/
if (send == -1U) {
- if (!warned) {
+ if (!fio_did_warn(FIO_WARN_ZONED_BUG))
log_err("fio: bug in zoned generation\n");
- warned = 1;
- }
goto bail;
}
if (f->last_pos[ddir] < f->real_file_size) {
uint64_t pos;
- if (f->last_pos[ddir] == f->file_offset && o->ddir_seq_add < 0) {
+ /*
+ * Only rewind if we already hit the end
+ */
+ if (f->last_pos[ddir] == f->file_offset &&
+ f->file_offset && o->ddir_seq_add < 0) {
if (f->real_file_size > f->io_size)
f->last_pos[ddir] = f->io_size;
else
static int get_next_block(struct thread_data *td, struct io_u *io_u,
enum fio_ddir ddir, int rw_seq,
- unsigned int *is_random)
+ bool *is_random)
{
struct fio_file *f = io_u->file;
uint64_t b, offset;
if (td_random(td)) {
if (should_do_random(td, ddir)) {
ret = get_next_rand_block(td, f, ddir, &b);
- *is_random = 1;
+ *is_random = true;
} else {
- *is_random = 0;
+ *is_random = false;
io_u_set(td, io_u, IO_U_F_BUSY_OK);
ret = get_next_seq_offset(td, f, ddir, &offset);
if (ret)
ret = get_next_rand_block(td, f, ddir, &b);
}
} else {
- *is_random = 0;
+ *is_random = false;
ret = get_next_seq_offset(td, f, ddir, &offset);
}
} else {
io_u_set(td, io_u, IO_U_F_BUSY_OK);
- *is_random = 0;
+ *is_random = false;
if (td->o.rw_seq == RW_SEQ_SEQ) {
ret = get_next_seq_offset(td, f, ddir, &offset);
if (ret) {
ret = get_next_rand_block(td, f, ddir, &b);
- *is_random = 0;
+ *is_random = false;
}
} else if (td->o.rw_seq == RW_SEQ_IDENT) {
if (f->last_start[ddir] != -1ULL)
* until we find a free one. For sequential io, just return the end of
* the last io issued.
*/
-static int __get_next_offset(struct thread_data *td, struct io_u *io_u,
- unsigned int *is_random)
+static int get_next_offset(struct thread_data *td, struct io_u *io_u,
+ bool *is_random)
{
struct fio_file *f = io_u->file;
enum fio_ddir ddir = io_u->ddir;
return 0;
}
-static int get_next_offset(struct thread_data *td, struct io_u *io_u,
- unsigned int *is_random)
-{
- if (td->flags & TD_F_PROFILE_OPS) {
- struct prof_io_ops *ops = &td->prof_io_ops;
-
- if (ops->fill_io_u_off)
- return ops->fill_io_u_off(td, io_u, is_random);
- }
-
- return __get_next_offset(td, io_u, is_random);
-}
-
static inline bool io_u_fits(struct thread_data *td, struct io_u *io_u,
unsigned int buflen)
{
return io_u->offset + buflen <= f->io_size + get_start_offset(td, f);
}
-static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u,
- unsigned int is_random)
+static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
+ bool is_random)
{
int ddir = io_u->ddir;
unsigned int buflen = 0;
assert(ddir_rw(ddir));
if (td->o.bs_is_seq_rand)
- ddir = is_random ? DDIR_WRITE: DDIR_READ;
+ ddir = is_random ? DDIR_WRITE : DDIR_READ;
minbs = td->o.min_bs[ddir];
maxbs = td->o.max_bs[ddir];
return buflen;
}
-static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
- unsigned int is_random)
-{
- if (td->flags & TD_F_PROFILE_OPS) {
- struct prof_io_ops *ops = &td->prof_io_ops;
-
- if (ops->fill_io_u_size)
- return ops->fill_io_u_size(td, io_u, is_random);
- }
-
- return __get_next_buflen(td, io_u, is_random);
-}
-
static void set_rwmix_bytes(struct thread_data *td)
{
unsigned int diff;
static int fill_io_u(struct thread_data *td, struct io_u *io_u)
{
- unsigned int is_random;
+ bool is_random;
if (td_ioengine_flagged(td, FIO_NOIO))
goto out;
return 0;
}
-static void __io_u_mark_map(unsigned int *map, unsigned int nr)
+static void __io_u_mark_map(uint64_t *map, unsigned int nr)
{
int idx = 0;
static struct fio_file *get_next_file(struct thread_data *td)
{
- if (td->flags & TD_F_PROFILE_OPS) {
- struct prof_io_ops *ops = &td->prof_io_ops;
-
- if (ops->get_next_file)
- return ops->get_next_file(td);
- }
-
return __get_next_file(td);
}
if (no_reduce && per_unit_log(td->iops_log))
add_iops_sample(td, io_u, bytes);
- }
+ } else if (ddir_sync(idx) && !td->o.disable_clat)
+ add_sync_clat_sample(&td->ts, llnsec);
if (td->ts.nr_block_infos && io_u->ddir == DDIR_TRIM) {
uint32_t *info = io_u_block_info(td, io_u);
f->last_write_idx = 0;
}
+static bool should_account(struct thread_data *td)
+{
+ return ramp_time_over(td) && (td->runstate == TD_RUNNING ||
+ td->runstate == TD_VERIFYING);
+}
+
static void io_completed(struct thread_data *td, struct io_u **io_u_ptr,
struct io_completion_data *icd)
{
}
if (ddir_sync(ddir)) {
- td->last_was_sync = 1;
+ td->last_was_sync = true;
if (f) {
f->first_write = -1ULL;
f->last_write = -1ULL;
}
+ if (should_account(td))
+ account_io_completion(td, io_u, icd, ddir, io_u->buflen);
return;
}
- td->last_was_sync = 0;
+ td->last_was_sync = false;
td->last_ddir = ddir;
if (!io_u->error && ddir_rw(ddir)) {
if (ddir == DDIR_WRITE)
file_log_write_comp(td, f, io_u->offset, bytes);
- if (ramp_time_over(td) && (td->runstate == TD_RUNNING ||
- td->runstate == TD_VERIFYING))
+ if (should_account(td))
account_io_completion(td, io_u, icd, ddir, bytes);
icd->bytes_done[ddir] += bytes;