dprint(FD_RANDOM, "free: b=%llu, idx=%u, bit=%u\n", block, idx, bit);
- return (f->file_map[idx] & (1 << bit)) == 0;
+ return (f->file_map[idx] & (1UL << bit)) == 0;
}
/*
busy_check = !(io_u->flags & IO_U_F_BUSY_OK);
while (nr_blocks) {
- unsigned int this_blocks, mask;
unsigned int idx, bit;
+ unsigned long mask, this_blocks;
/*
* If we have a mixed random workload, we may
do {
if (this_blocks == BLOCKS_PER_MAP)
- mask = -1U;
+ mask = -1UL;
else
- mask = ((1U << this_blocks) - 1) << bit;
+ mask = ((1UL << this_blocks) - 1) << bit;
if (!(f->file_map[idx] & mask))
break;
static int get_next_free_block(struct thread_data *td, struct fio_file *f,
enum fio_ddir ddir, unsigned long long *b)
{
- unsigned long long min_bs = td->o.rw_min_bs;
+ unsigned long long block, min_bs = td->o.rw_min_bs, lastb;
int i;
+ lastb = last_block(td, f, ddir);
+ if (!lastb)
+ return 1;
+
i = f->last_free_lookup;
- *b = (i * BLOCKS_PER_MAP);
- while ((*b) * min_bs < f->real_file_size &&
- (*b) * min_bs < f->io_size) {
- if (f->file_map[i] != (unsigned int) -1) {
- *b += ffz(f->file_map[i]);
- if (*b > last_block(td, f, ddir))
+ block = i * BLOCKS_PER_MAP;
+ while (block * min_bs < f->real_file_size &&
+ block * min_bs < f->io_size) {
+ if (f->file_map[i] != -1UL) {
+ block += ffz(f->file_map[i]);
+ if (block > lastb)
break;
f->last_free_lookup = i;
+ *b = block;
return 0;
}
- *b += BLOCKS_PER_MAP;
+ block += BLOCKS_PER_MAP;
i++;
}
static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
enum fio_ddir ddir, unsigned long long *b)
{
- unsigned long long r;
+ unsigned long long r, lastb;
int loops = 5;
+ lastb = last_block(td, f, ddir);
+ if (!lastb)
+ return 1;
+
+ if (f->failed_rands >= 200)
+ goto ffz;
+
do {
r = os_random_long(&td->random_state);
dprint(FD_RANDOM, "off rand %llu\n", r);
- *b = (last_block(td, f, ddir) - 1)
- * (r / ((unsigned long long) OS_RAND_MAX + 1.0));
+ *b = (lastb - 1) * (r / ((unsigned long long) OS_RAND_MAX + 1.0));
/*
* if we are not maintaining a random map, we are done.
*/
if (!file_randommap(td, f))
- return 0;
+ goto ret_good;
/*
* calculate map offset and check if it's free
*/
if (random_map_free(f, *b))
- return 0;
+ goto ret_good;
dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
*b);
} while (--loops);
+ if (!f->failed_rands++)
+ f->last_free_lookup = 0;
+
/*
* we get here, if we didn't suceed in looking up a block. generate
* a random start offset into the filemap, and find the first free
f->last_free_lookup = (f->num_maps - 1) *
(r / (OS_RAND_MAX + 1.0));
if (!get_next_free_block(td, f, ddir, b))
- return 0;
+ goto ret;
r = os_random_long(&td->random_state);
} while (--loops);
* that didn't work either, try exhaustive search from the start
*/
f->last_free_lookup = 0;
+ffz:
+ if (!get_next_free_block(td, f, ddir, b))
+ return 0;
+ f->last_free_lookup = 0;
return get_next_free_block(td, f, ddir, b);
+ret_good:
+ f->failed_rands = 0;
+ret:
+ return 0;
}
static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
return td->rwmix_ddir;
}
+static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
+{
+ io_u->ddir = get_rw_ddir(td);
+
+ if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) &&
+ td->o.barrier_blocks &&
+ !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) &&
+ td->io_issues[DDIR_WRITE])
+ io_u->flags |= IO_U_F_BARRIER;
+}
+
void put_file_log(struct thread_data *td, struct fio_file *f)
{
int ret = put_file(td, f);
if (td->io_ops->flags & FIO_NOIO)
goto out;
- io_u->ddir = get_rw_ddir(td);
+ set_rw_ddir(td, io_u);
/*
* fsync() or fdatasync() or trim etc, we are done
static void __io_u_mark_map(unsigned int *map, unsigned int nr)
{
- int index = 0;
+ int idx = 0;
switch (nr) {
default:
- index = 6;
+ idx = 6;
break;
case 33 ... 64:
- index = 5;
+ idx = 5;
break;
case 17 ... 32:
- index = 4;
+ idx = 4;
break;
case 9 ... 16:
- index = 3;
+ idx = 3;
break;
case 5 ... 8:
- index = 2;
+ idx = 2;
break;
case 1 ... 4:
- index = 1;
+ idx = 1;
case 0:
break;
}
- map[index]++;
+ map[idx]++;
}
void io_u_mark_submit(struct thread_data *td, unsigned int nr)
void io_u_mark_depth(struct thread_data *td, unsigned int nr)
{
- int index = 0;
+ int idx = 0;
switch (td->cur_depth) {
default:
- index = 6;
+ idx = 6;
break;
case 32 ... 63:
- index = 5;
+ idx = 5;
break;
case 16 ... 31:
- index = 4;
+ idx = 4;
break;
case 8 ... 15:
- index = 3;
+ idx = 3;
break;
case 4 ... 7:
- index = 2;
+ idx = 2;
break;
case 2 ... 3:
- index = 1;
+ idx = 1;
case 1:
break;
}
- td->ts.io_u_map[index] += nr;
+ td->ts.io_u_map[idx] += nr;
}
static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec)
{
- int index = 0;
+ int idx = 0;
assert(usec < 1000);
switch (usec) {
case 750 ... 999:
- index = 9;
+ idx = 9;
break;
case 500 ... 749:
- index = 8;
+ idx = 8;
break;
case 250 ... 499:
- index = 7;
+ idx = 7;
break;
case 100 ... 249:
- index = 6;
+ idx = 6;
break;
case 50 ... 99:
- index = 5;
+ idx = 5;
break;
case 20 ... 49:
- index = 4;
+ idx = 4;
break;
case 10 ... 19:
- index = 3;
+ idx = 3;
break;
case 4 ... 9:
- index = 2;
+ idx = 2;
break;
case 2 ... 3:
- index = 1;
+ idx = 1;
case 0 ... 1:
break;
}
- assert(index < FIO_IO_U_LAT_U_NR);
- td->ts.io_u_lat_u[index]++;
+ assert(idx < FIO_IO_U_LAT_U_NR);
+ td->ts.io_u_lat_u[idx]++;
}
static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec)
{
- int index = 0;
+ int idx = 0;
switch (msec) {
default:
- index = 11;
+ idx = 11;
break;
case 1000 ... 1999:
- index = 10;
+ idx = 10;
break;
case 750 ... 999:
- index = 9;
+ idx = 9;
break;
case 500 ... 749:
- index = 8;
+ idx = 8;
break;
case 250 ... 499:
- index = 7;
+ idx = 7;
break;
case 100 ... 249:
- index = 6;
+ idx = 6;
break;
case 50 ... 99:
- index = 5;
+ idx = 5;
break;
case 20 ... 49:
- index = 4;
+ idx = 4;
break;
case 10 ... 19:
- index = 3;
+ idx = 3;
break;
case 4 ... 9:
- index = 2;
+ idx = 2;
break;
case 2 ... 3:
- index = 1;
+ idx = 1;
case 0 ... 1:
break;
}
- assert(index < FIO_IO_U_LAT_M_NR);
- td->ts.io_u_lat_m[index]++;
+ assert(idx < FIO_IO_U_LAT_M_NR);
+ td->ts.io_u_lat_m[idx]++;
}
static void io_u_mark_latency(struct thread_data *td, unsigned long usec)
if (io_u) {
assert(io_u->flags & IO_U_F_FREE);
io_u->flags &= ~(IO_U_F_FREE | IO_U_F_FREE_DEF);
- io_u->flags &= ~IO_U_F_TRIMMED;
+ io_u->flags &= ~(IO_U_F_TRIMMED | IO_U_F_BARRIER);
io_u->error = 0;
flist_del(&io_u->list);
void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
unsigned int max_bs)
{
+ io_u->buf_filled_len = 0;
+
if (!td->o.zero_buffers)
fill_random_buf(io_u->buf, max_bs);
else