* The ->io_axmap contains a map of blocks we have or have not done io
* to yet. Used to make sure we cover the entire range in a fair fashion.
*/
-static int random_map_free(struct fio_file *f, const unsigned long long block)
+static int random_map_free(struct fio_file *f, const uint64_t block)
{
return !axmap_isset(f->io_axmap, block);
}
{
unsigned int min_bs = td->o.rw_min_bs;
struct fio_file *f = io_u->file;
- unsigned long long block;
unsigned int nr_blocks;
+ uint64_t block;
- block = (io_u->offset - f->file_offset) / (unsigned long long) min_bs;
+ block = (io_u->offset - f->file_offset) / (uint64_t) min_bs;
nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
if (!(io_u->flags & IO_U_F_BUSY_OK))
io_u->buflen = nr_blocks * min_bs;
}
-static unsigned long long last_block(struct thread_data *td, struct fio_file *f,
- enum fio_ddir ddir)
+static uint64_t last_block(struct thread_data *td, struct fio_file *f,
+ enum fio_ddir ddir)
{
- unsigned long long max_blocks;
- unsigned long long max_size;
+ uint64_t max_blocks;
+ uint64_t max_size;
assert(ddir_rw(ddir));
if (td->o.zone_range)
max_size = td->o.zone_range;
- max_blocks = max_size / (unsigned long long) td->o.ba[ddir];
+ max_blocks = max_size / (uint64_t) td->o.ba[ddir];
if (!max_blocks)
return 0;
return max_blocks;
}
+struct rand_off {
+ struct flist_head list;
+ uint64_t off;
+};
+
static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f,
- enum fio_ddir ddir, unsigned long long *b)
+ enum fio_ddir ddir, uint64_t *b)
{
- unsigned long long r;
+ uint64_t r, lastb;
- if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) {
- unsigned long long rmax, lastb;
+ lastb = last_block(td, f, ddir);
+ if (!lastb)
+ return 1;
- lastb = last_block(td, f, ddir);
- if (!lastb)
- return 1;
+ if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) {
+ uint64_t rmax;
rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX;
dprint(FD_RANDOM, "off rand %llu\n", r);
- *b = (lastb - 1) * (r / ((unsigned long long) rmax + 1.0));
+ *b = (lastb - 1) * (r / ((uint64_t) rmax + 1.0));
} else {
uint64_t off = 0;
- if (lfsr_next(&f->lfsr, &off))
+ if (lfsr_next(&f->lfsr, &off, lastb))
return 1;
*b = off;
static int __get_next_rand_offset_zipf(struct thread_data *td,
struct fio_file *f, enum fio_ddir ddir,
- unsigned long long *b)
+ uint64_t *b)
{
*b = zipf_next(&f->zipf);
return 0;
static int __get_next_rand_offset_pareto(struct thread_data *td,
struct fio_file *f, enum fio_ddir ddir,
- unsigned long long *b)
+ uint64_t *b)
{
*b = pareto_next(&f->zipf);
return 0;
}
-static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
- enum fio_ddir ddir, unsigned long long *b)
+static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b)
+{
+ struct rand_off *r1 = flist_entry(a, struct rand_off, list);
+ struct rand_off *r2 = flist_entry(b, struct rand_off, list);
+
+ return r1->off - r2->off;
+}
+
+static int get_off_from_method(struct thread_data *td, struct fio_file *f,
+ enum fio_ddir ddir, uint64_t *b)
{
if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
return __get_next_rand_offset(td, f, ddir, b);
return 1;
}
+static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
+ enum fio_ddir ddir, uint64_t *b)
+{
+ struct rand_off *r;
+ int i, ret = 1;
+
+ /*
+ * If sort not enabled, or not a pure random read workload without
+ * any stored write metadata, just return a random offset
+ */
+ if (!td->o.verifysort_nr || !(ddir == DDIR_READ && td->o.do_verify &&
+ td->o.verify != VERIFY_NONE && td_random(td)))
+ return get_off_from_method(td, f, ddir, b);
+
+ if (!flist_empty(&td->next_rand_list)) {
+ struct rand_off *r;
+fetch:
+ r = flist_entry(td->next_rand_list.next, struct rand_off, list);
+ flist_del(&r->list);
+ *b = r->off;
+ free(r);
+ return 0;
+ }
+
+ for (i = 0; i < td->o.verifysort_nr; i++) {
+ r = malloc(sizeof(*r));
+
+ ret = get_off_from_method(td, f, ddir, &r->off);
+ if (ret) {
+ free(r);
+ break;
+ }
+
+ flist_add(&r->list, &td->next_rand_list);
+ }
+
+ if (ret && !i)
+ return ret;
+
+ assert(!flist_empty(&td->next_rand_list));
+ flist_sort(NULL, &td->next_rand_list, flist_cmp);
+ goto fetch;
+}
+
static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
- enum fio_ddir ddir, unsigned long long *b)
+ enum fio_ddir ddir, uint64_t *b)
{
if (!get_next_rand_offset(td, f, ddir, b))
return 0;
}
static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
- enum fio_ddir ddir, unsigned long long *offset)
+ enum fio_ddir ddir, uint64_t *offset)
{
assert(ddir_rw(ddir));
f->last_pos = f->last_pos - f->io_size;
if (f->last_pos < f->real_file_size) {
- unsigned long long pos;
+ uint64_t pos;
if (f->last_pos == f->file_offset && td->o.ddir_seq_add < 0)
f->last_pos = f->real_file_size;
enum fio_ddir ddir, int rw_seq)
{
struct fio_file *f = io_u->file;
- unsigned long long b, offset;
+ uint64_t b, offset;
int ret;
assert(ddir_rw(ddir));
ret = 1;
}
}
-
+
if (!ret) {
if (offset != -1ULL)
io_u->offset = offset;
static int get_next_offset(struct thread_data *td, struct io_u *io_u)
{
- struct prof_io_ops *ops = &td->prof_io_ops;
+ if (td->flags & TD_F_PROFILE_OPS) {
+ struct prof_io_ops *ops = &td->prof_io_ops;
- if (ops->fill_io_u_off)
- return ops->fill_io_u_off(td, io_u);
+ if (ops->fill_io_u_off)
+ return ops->fill_io_u_off(td, io_u);
+ }
return __get_next_offset(td, io_u);
}
static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
{
- struct prof_io_ops *ops = &td->prof_io_ops;
+ if (td->flags & TD_F_PROFILE_OPS) {
+ struct prof_io_ops *ops = &td->prof_io_ops;
- if (ops->fill_io_u_size)
- return ops->fill_io_u_size(td, io_u);
+ if (ops->fill_io_u_size)
+ return ops->fill_io_u_size(td, io_u);
+ }
return __get_next_buflen(td, io_u);
}
static struct fio_file *get_next_file(struct thread_data *td)
{
- struct prof_io_ops *ops = &td->prof_io_ops;
+ if (!(td->flags & TD_F_PROFILE_OPS)) {
+ struct prof_io_ops *ops = &td->prof_io_ops;
- if (ops->get_next_file)
- return ops->get_next_file(td);
+ if (ops->get_next_file)
+ return ops->get_next_file(td);
+ }
return __get_next_file(td);
}
static int check_get_trim(struct thread_data *td, struct io_u *io_u)
{
- if (td->o.trim_backlog && td->trim_entries) {
+ if (!(td->flags & TD_F_TRIM_BACKLOG))
+ return 0;
+
+ if (td->trim_entries) {
int get_trim = 0;
if (td->trim_batch) {
static int check_get_verify(struct thread_data *td, struct io_u *io_u)
{
- if (td->o.verify_backlog && td->io_hist_len) {
+ if (!(td->flags & TD_F_VER_BACKLOG))
+ return 0;
+
+ if (td->io_hist_len) {
int get_verify = 0;
if (td->verify_batch)
static void small_content_scramble(struct io_u *io_u)
{
unsigned int i, nr_blocks = io_u->buflen / 512;
- unsigned long long boffset;
+ uint64_t boffset;
unsigned int offset;
void *p, *end;
* and the actual offset.
*/
offset = (io_u->start_time.tv_usec ^ boffset) & 511;
- offset &= ~(sizeof(unsigned long long) - 1);
- if (offset >= 512 - sizeof(unsigned long long))
- offset -= sizeof(unsigned long long);
+ offset &= ~(sizeof(uint64_t) - 1);
+ if (offset >= 512 - sizeof(uint64_t))
+ offset -= sizeof(uint64_t);
memcpy(p + offset, &boffset, sizeof(boffset));
end = p + 512 - sizeof(io_u->start_time);
/*
* If using an iolog, grab next piece if any available.
*/
- if (td->o.read_iolog_file) {
+ if (td->flags & TD_F_READ_IOLOG) {
if (read_iolog_get(td, io_u))
goto err_put;
} else if (set_io_u_file(td, io_u)) {
f->last_pos = io_u->offset + io_u->buflen;
if (io_u->ddir == DDIR_WRITE) {
- if (td->o.refill_buffers) {
+ if (td->flags & TD_F_REFILL_BUFFERS) {
io_u_fill_buffer(td, io_u,
io_u->xfer_buflen, io_u->xfer_buflen);
- } else if (td->o.scramble_buffers)
+ } else if (td->flags & TD_F_SCRAMBLE_BUFFERS)
do_scramble = 1;
- if (td->o.verify != VERIFY_NONE) {
+ if (td->flags & TD_F_VER_NONE) {
populate_verify_io_u(td, io_u);
do_scramble = 0;
}
static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
{
- unsigned long long secs, remainder, bps, bytes;
+ uint64_t secs, remainder, bps, bytes;
+
bytes = td->this_io_bytes[ddir];
bps = td->rate_bps[ddir];
secs = bytes / bps;