* If we have a mixed random workload, we may
* encounter blocks we already did IO to.
*/
- if (!td->o.ddir_nr && !random_map_free(td, f, block))
+ if (!td->o.ddir_nr == 1 && !random_map_free(td, f, block))
break;
idx = RAND_MAP_IDX(td, f, block);
if (get_next_rand_offset(td, f, ddir, &b))
return 1;
} else {
- if (f->last_pos >= f->real_file_size)
- return 1;
-
- b = f->last_pos / td->o.min_bs[ddir];
+ if (f->last_pos >= f->real_file_size) {
+ if (!td_random(td) || get_next_rand_offset(td, f, ddir, &b))
+ return 1;
+ } else
+ b = (f->last_pos - f->file_offset) / td->o.min_bs[ddir];
}
io_u->offset = (b * td->o.min_bs[ddir]) + f->file_offset;
buflen = (buflen + td->o.min_bs[ddir] - 1) & ~(td->o.min_bs[ddir] - 1);
}
+ if (io_u->offset + buflen > io_u->file->real_file_size)
+ buflen = td->o.min_bs[ddir];
+
return buflen;
}
static int fill_io_u(struct thread_data *td, struct io_u *io_u)
{
+ if (td->io_ops->flags & FIO_NOIO)
+ goto out;
+
/*
* see if it's time to sync
*/
if (!io_u->buflen)
return 1;
+ if (io_u->offset + io_u->buflen > io_u->file->real_file_size)
+ return 1;
+
/*
* mark entry before potentially trimming io_u
*/