+ if (__get_next_rand_offset(td, f, ddir, b, lastb) == 1)
+ return 1;
+
+ /*
+ * Add our start offset, if any
+ */
+ if (offset)
+ *b += offset;
+
+ return 0;
+}
+
+static int flist_cmp(void *data, struct flist_head *a, struct flist_head *b)
+{
+ struct rand_off *r1 = flist_entry(a, struct rand_off, list);
+ struct rand_off *r2 = flist_entry(b, struct rand_off, list);
+
+ return r1->off - r2->off;
+}
+
+static int get_off_from_method(struct thread_data *td, struct fio_file *f,
+ enum fio_ddir ddir, uint64_t *b)
+{
+ if (td->o.random_distribution == FIO_RAND_DIST_RANDOM) {
+ uint64_t lastb;
+
+ lastb = last_block(td, f, ddir);
+ if (!lastb)
+ return 1;
+
+ return __get_next_rand_offset(td, f, ddir, b, lastb);
+ } else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
+ return __get_next_rand_offset_zipf(td, f, ddir, b);
+ else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
+ return __get_next_rand_offset_pareto(td, f, ddir, b);
+ else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
+ return __get_next_rand_offset_gauss(td, f, ddir, b);
+ else if (td->o.random_distribution == FIO_RAND_DIST_ZONED)
+ return __get_next_rand_offset_zoned(td, f, ddir, b);
+
+ log_err("fio: unknown random distribution: %d\n", td->o.random_distribution);
+ return 1;
+}
+
+/*
+ * Sort the reads for a verify phase in batches of verifysort_nr, if
+ * specified.
+ */
+static inline bool should_sort_io(struct thread_data *td)
+{
+ if (!td->o.verifysort_nr || !td->o.do_verify)
+ return false;
+ if (!td_random(td))
+ return false;
+ if (td->runstate != TD_VERIFYING)
+ return false;
+ if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE ||
+ td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64)
+ return false;
+
+ return true;
+}
+
+static bool should_do_random(struct thread_data *td, enum fio_ddir ddir)
+{
+ unsigned int v;
+
+ if (td->o.perc_rand[ddir] == 100)
+ return true;
+
+ v = rand32_between(&td->seq_rand_state[ddir], 1, 100);
+
+ return v <= td->o.perc_rand[ddir];
+}
+
+static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
+ enum fio_ddir ddir, uint64_t *b)
+{
+ struct rand_off *r;
+ int i, ret = 1;
+
+ if (!should_sort_io(td))
+ return get_off_from_method(td, f, ddir, b);
+
+ if (!flist_empty(&td->next_rand_list)) {
+fetch:
+ r = flist_first_entry(&td->next_rand_list, struct rand_off, list);
+ flist_del(&r->list);
+ *b = r->off;
+ free(r);
+ return 0;
+ }
+
+ for (i = 0; i < td->o.verifysort_nr; i++) {
+ r = malloc(sizeof(*r));
+
+ ret = get_off_from_method(td, f, ddir, &r->off);
+ if (ret) {
+ free(r);
+ break;
+ }
+
+ flist_add(&r->list, &td->next_rand_list);
+ }
+
+ if (ret && !i)
+ return ret;
+
+ assert(!flist_empty(&td->next_rand_list));
+ flist_sort(NULL, &td->next_rand_list, flist_cmp);
+ goto fetch;
+}
+
+static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
+ enum fio_ddir ddir, uint64_t *b)
+{
+ if (!get_next_rand_offset(td, f, ddir, b))
+ return 0;
+
+ if (td->o.time_based ||
+ (td->o.file_service_type & __FIO_FSERVICE_NONUNIFORM)) {
+ fio_file_reset(td, f);
+ if (!get_next_rand_offset(td, f, ddir, b))
+ return 0;
+ }
+
+ dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
+ f->file_name, (unsigned long long) f->last_pos[ddir],
+ (unsigned long long) f->real_file_size);
+ return 1;
+}
+
+static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
+ enum fio_ddir ddir, uint64_t *offset)
+{
+ struct thread_options *o = &td->o;
+
+ assert(ddir_rw(ddir));
+
+ if (f->last_pos[ddir] >= f->io_size + get_start_offset(td, f) &&
+ o->time_based) {
+ struct thread_options *o = &td->o;
+ uint64_t io_size = f->io_size + (f->io_size % o->min_bs[ddir]);
+
+ if (io_size > f->last_pos[ddir])
+ f->last_pos[ddir] = 0;
+ else
+ f->last_pos[ddir] = f->last_pos[ddir] - io_size;
+ }
+
+ if (f->last_pos[ddir] < f->real_file_size) {
+ uint64_t pos;
+
+ if (f->last_pos[ddir] == f->file_offset && o->ddir_seq_add < 0) {
+ if (f->real_file_size > f->io_size)
+ f->last_pos[ddir] = f->io_size;
+ else
+ f->last_pos[ddir] = f->real_file_size;
+ }
+
+ pos = f->last_pos[ddir] - f->file_offset;
+ if (pos && o->ddir_seq_add) {
+ pos += o->ddir_seq_add;
+
+ /*
+ * If we reach beyond the end of the file
+ * with holed IO, wrap around to the
+ * beginning again. If we're doing backwards IO,
+ * wrap to the end.
+ */
+ if (pos >= f->real_file_size) {
+ if (o->ddir_seq_add > 0)
+ pos = f->file_offset;
+ else {
+ if (f->real_file_size > f->io_size)
+ pos = f->io_size;
+ else
+ pos = f->real_file_size;
+
+ pos += o->ddir_seq_add;
+ }
+ }
+ }
+
+ *offset = pos;
+ return 0;
+ }
+
+ return 1;
+}
+
+static int get_next_block(struct thread_data *td, struct io_u *io_u,
+ enum fio_ddir ddir, int rw_seq,
+ unsigned int *is_random)
+{
+ struct fio_file *f = io_u->file;
+ uint64_t b, offset;
+ int ret;
+
+ assert(ddir_rw(ddir));
+
+ b = offset = -1ULL;
+
+ if (rw_seq) {
+ if (td_random(td)) {
+ if (should_do_random(td, ddir)) {
+ ret = get_next_rand_block(td, f, ddir, &b);
+ *is_random = 1;
+ } else {
+ *is_random = 0;
+ io_u_set(td, io_u, IO_U_F_BUSY_OK);
+ ret = get_next_seq_offset(td, f, ddir, &offset);
+ if (ret)
+ ret = get_next_rand_block(td, f, ddir, &b);
+ }
+ } else {
+ *is_random = 0;
+ ret = get_next_seq_offset(td, f, ddir, &offset);
+ }
+ } else {
+ io_u_set(td, io_u, IO_U_F_BUSY_OK);
+ *is_random = 0;
+
+ if (td->o.rw_seq == RW_SEQ_SEQ) {
+ ret = get_next_seq_offset(td, f, ddir, &offset);
+ if (ret) {
+ ret = get_next_rand_block(td, f, ddir, &b);
+ *is_random = 0;
+ }
+ } else if (td->o.rw_seq == RW_SEQ_IDENT) {
+ if (f->last_start[ddir] != -1ULL)
+ offset = f->last_start[ddir] - f->file_offset;
+ else
+ offset = 0;
+ ret = 0;
+ } else {
+ log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq);
+ ret = 1;
+ }
+ }
+
+ if (!ret) {
+ if (offset != -1ULL)
+ io_u->offset = offset;
+ else if (b != -1ULL)
+ io_u->offset = b * td->o.ba[ddir];
+ else {
+ log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b);
+ ret = 1;
+ }
+ }
+
+ return ret;