return 1;
}
-static int should_do_random(struct thread_data *td)
+static int should_do_random(struct thread_data *td, enum fio_ddir ddir)
{
unsigned int v;
unsigned long r;
- if (td->o.perc_rand == 100)
+ if (td->o.perc_rand[ddir] == 100)
return 1;
if (td->o.use_os_rand) {
- r = os_random_long(&td->seq_rand_state);
+ r = os_random_long(&td->seq_rand_state[ddir]);
v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0)));
} else {
- r = __rand(&td->__seq_rand_state);
+ r = __rand(&td->__seq_rand_state[ddir]);
v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
}
- return v <= td->o.perc_rand;
+ return v <= td->o.perc_rand[ddir];
}
static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
}
static int get_next_block(struct thread_data *td, struct io_u *io_u,
- enum fio_ddir ddir, int rw_seq)
+ enum fio_ddir ddir, int rw_seq,
+ unsigned int *is_random)
{
struct fio_file *f = io_u->file;
uint64_t b, offset;
if (rw_seq) {
if (td_random(td)) {
- if (should_do_random(td))
+ if (should_do_random(td, ddir)) {
ret = get_next_rand_block(td, f, ddir, &b);
- else {
+ *is_random = 1;
+ } else {
+ *is_random = 0;
io_u->flags |= IO_U_F_BUSY_OK;
ret = get_next_seq_offset(td, f, ddir, &offset);
if (ret)
ret = get_next_rand_block(td, f, ddir, &b);
}
- } else
+ } else {
+ *is_random = 0;
ret = get_next_seq_offset(td, f, ddir, &offset);
+ }
} else {
io_u->flags |= IO_U_F_BUSY_OK;
+ *is_random = 0;
if (td->o.rw_seq == RW_SEQ_SEQ) {
ret = get_next_seq_offset(td, f, ddir, &offset);
- if (ret)
+ if (ret) {
ret = get_next_rand_block(td, f, ddir, &b);
+ *is_random = 0;
+ }
} else if (td->o.rw_seq == RW_SEQ_IDENT) {
if (f->last_start != -1ULL)
offset = f->last_start - f->file_offset;
* until we find a free one. For sequential io, just return the end of
* the last io issued.
*/
-static int __get_next_offset(struct thread_data *td, struct io_u *io_u)
+static int __get_next_offset(struct thread_data *td, struct io_u *io_u,
+ unsigned int *is_random)
{
struct fio_file *f = io_u->file;
enum fio_ddir ddir = io_u->ddir;
td->ddir_seq_nr = td->o.ddir_seq_nr;
}
- if (get_next_block(td, io_u, ddir, rw_seq_hit))
+ if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random))
return 1;
if (io_u->offset >= f->io_size) {
return 0;
}
-static int get_next_offset(struct thread_data *td, struct io_u *io_u)
+static int get_next_offset(struct thread_data *td, struct io_u *io_u,
+ unsigned int *is_random)
{
if (td->flags & TD_F_PROFILE_OPS) {
struct prof_io_ops *ops = &td->prof_io_ops;
if (ops->fill_io_u_off)
- return ops->fill_io_u_off(td, io_u);
+ return ops->fill_io_u_off(td, io_u, is_random);
}
- return __get_next_offset(td, io_u);
+ return __get_next_offset(td, io_u, is_random);
}
static inline int io_u_fits(struct thread_data *td, struct io_u *io_u,
return io_u->offset + buflen <= f->io_size + get_start_offset(td);
}
-static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u)
+static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u,
+ unsigned int is_random)
{
- const int ddir = io_u->ddir;
+ int ddir = io_u->ddir;
unsigned int buflen = 0;
unsigned int minbs, maxbs;
unsigned long r, rand_max;
- assert(ddir_rw(ddir));
+ assert(ddir_rw(io_u->ddir));
+
+ if (td->o.bs_is_seq_rand)
+ ddir = is_random ? DDIR_WRITE: DDIR_READ;
+ else
+ ddir = io_u->ddir;
minbs = td->o.min_bs[ddir];
maxbs = td->o.max_bs[ddir];
return buflen;
}
-static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
+static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
+ unsigned int is_random)
{
if (td->flags & TD_F_PROFILE_OPS) {
struct prof_io_ops *ops = &td->prof_io_ops;
if (ops->fill_io_u_size)
- return ops->fill_io_u_size(td, io_u);
+ return ops->fill_io_u_size(td, io_u, is_random);
}
- return __get_next_buflen(td, io_u);
+ return __get_next_buflen(td, io_u, is_random);
}
static void set_rwmix_bytes(struct thread_data *td)
static int fill_io_u(struct thread_data *td, struct io_u *io_u)
{
+ unsigned int is_random;
+
if (td->io_ops->flags & FIO_NOIO)
goto out;
* No log, let the seq/rand engine retrieve the next buflen and
* position.
*/
- if (get_next_offset(td, io_u)) {
+ if (get_next_offset(td, io_u, &is_random)) {
dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
return 1;
}
- io_u->buflen = get_next_buflen(td, io_u);
+ io_u->buflen = get_next_buflen(td, io_u, is_random);
if (!io_u->buflen) {
dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
return 1;
if (!td->o.disable_bw)
add_bw_sample(td, idx, bytes, &icd->time);
- add_iops_sample(td, idx, &icd->time);
+ add_iops_sample(td, idx, bytes, &icd->time);
}
static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)