static int get_next_free_block(struct thread_data *td, struct fio_file *f,
enum fio_ddir ddir, unsigned long long *b)
{
- unsigned long long min_bs = td->o.rw_min_bs;
+ unsigned long long min_bs = td->o.rw_min_bs, lastb;
int i;
+ lastb = last_block(td, f, ddir);
+ if (!lastb)
+ return 1;
+
i = f->last_free_lookup;
*b = (i * BLOCKS_PER_MAP);
while ((*b) * min_bs < f->real_file_size &&
(*b) * min_bs < f->io_size) {
if (f->file_map[i] != (unsigned int) -1) {
*b += ffz(f->file_map[i]);
- if (*b > last_block(td, f, ddir))
+ if (*b > lastb)
break;
f->last_free_lookup = i;
return 0;
static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
enum fio_ddir ddir, unsigned long long *b)
{
- unsigned long long r;
+ unsigned long long r, lastb;
int loops = 5;
+ lastb = last_block(td, f, ddir);
+ if (!lastb)
+ return 1;
+
do {
r = os_random_long(&td->random_state);
dprint(FD_RANDOM, "off rand %llu\n", r);
- *b = (last_block(td, f, ddir) - 1)
- * (r / ((unsigned long long) OS_RAND_MAX + 1.0));
+ *b = (lastb - 1) * (r / ((unsigned long long) OS_RAND_MAX + 1.0));
/*
* if we are not maintaining a random map, we are done.
return td->rwmix_ddir;
}
+static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
+{
+ io_u->ddir = get_rw_ddir(td);
+
+ if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) &&
+ td->o.barrier_blocks &&
+ !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) &&
+ td->io_issues[DDIR_WRITE])
+ io_u->flags |= IO_U_F_BARRIER;
+}
+
void put_file_log(struct thread_data *td, struct fio_file *f)
{
int ret = put_file(td, f);
if (td->io_ops->flags & FIO_NOIO)
goto out;
- io_u->ddir = get_rw_ddir(td);
+ set_rw_ddir(td, io_u);
/*
* fsync() or fdatasync() or trim etc, we are done
if (io_u) {
assert(io_u->flags & IO_U_F_FREE);
io_u->flags &= ~(IO_U_F_FREE | IO_U_F_FREE_DEF);
- io_u->flags &= ~IO_U_F_TRIMMED;
+ io_u->flags &= ~(IO_U_F_TRIMMED | IO_U_F_BARRIER);
io_u->error = 0;
flist_del(&io_u->list);