r = __rand(&td->__random_state);
}
- dprint(FD_RANDOM, "off rand %llu\n", r);
+ dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r);
*b = (lastb - 1) * (r / ((uint64_t) rmax + 1.0));
} else {
if (random_map_free(f, *b))
goto ret;
- dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", *b);
+ dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
+ (unsigned long long) *b);
*b = axmap_next_free(f->io_axmap, *b);
if (*b == (uint64_t) -1ULL)
return 1;
}
+static int should_do_random(struct thread_data *td, enum fio_ddir ddir)
+{
+ unsigned int v;
+ unsigned long r;
+
+ if (td->o.perc_rand[ddir] == 100)
+ return 1;
+
+ if (td->o.use_os_rand) {
+ r = os_random_long(&td->seq_rand_state[ddir]);
+ v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0)));
+ } else {
+ r = __rand(&td->__seq_rand_state[ddir]);
+ v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
+ }
+
+ return v <= td->o.perc_rand[ddir];
+}
+
static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
enum fio_ddir ddir, uint64_t *b)
{
}
dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
- f->file_name, f->last_pos, f->real_file_size);
+ f->file_name, (unsigned long long) f->last_pos,
+ (unsigned long long) f->real_file_size);
return 1;
}
}
static int get_next_block(struct thread_data *td, struct io_u *io_u,
- enum fio_ddir ddir, int rw_seq)
+ enum fio_ddir ddir, int rw_seq,
+ unsigned int *is_random)
{
struct fio_file *f = io_u->file;
uint64_t b, offset;
b = offset = -1ULL;
if (rw_seq) {
- if (td_random(td))
- ret = get_next_rand_block(td, f, ddir, &b);
- else
+ if (td_random(td)) {
+ if (should_do_random(td, ddir)) {
+ ret = get_next_rand_block(td, f, ddir, &b);
+ *is_random = 1;
+ } else {
+ *is_random = 0;
+ io_u->flags |= IO_U_F_BUSY_OK;
+ ret = get_next_seq_offset(td, f, ddir, &offset);
+ if (ret)
+ ret = get_next_rand_block(td, f, ddir, &b);
+ }
+ } else {
+ *is_random = 0;
ret = get_next_seq_offset(td, f, ddir, &offset);
+ }
} else {
io_u->flags |= IO_U_F_BUSY_OK;
+ *is_random = 0;
if (td->o.rw_seq == RW_SEQ_SEQ) {
ret = get_next_seq_offset(td, f, ddir, &offset);
- if (ret)
+ if (ret) {
ret = get_next_rand_block(td, f, ddir, &b);
+ *is_random = 0;
+ }
} else if (td->o.rw_seq == RW_SEQ_IDENT) {
if (f->last_start != -1ULL)
offset = f->last_start - f->file_offset;
else if (b != -1ULL)
io_u->offset = b * td->o.ba[ddir];
else {
- log_err("fio: bug in offset generation: offset=%llu, b=%llu\n",
- offset, b);
+ log_err("fio: bug in offset generation: offset=%llu, b=%llu\n", (unsigned long long) offset, (unsigned long long) b);
ret = 1;
}
}
* until we find a free one. For sequential io, just return the end of
* the last io issued.
*/
-static int __get_next_offset(struct thread_data *td, struct io_u *io_u)
+static int __get_next_offset(struct thread_data *td, struct io_u *io_u,
+ unsigned int *is_random)
{
struct fio_file *f = io_u->file;
enum fio_ddir ddir = io_u->ddir;
td->ddir_seq_nr = td->o.ddir_seq_nr;
}
- if (get_next_block(td, io_u, ddir, rw_seq_hit))
+ if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random))
return 1;
if (io_u->offset >= f->io_size) {
dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
- io_u->offset, f->io_size);
+ (unsigned long long) io_u->offset,
+ (unsigned long long) f->io_size);
return 1;
}
io_u->offset += f->file_offset;
if (io_u->offset >= f->real_file_size) {
dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n",
- io_u->offset, f->real_file_size);
+ (unsigned long long) io_u->offset,
+ (unsigned long long) f->real_file_size);
return 1;
}
return 0;
}
-static int get_next_offset(struct thread_data *td, struct io_u *io_u)
+static int get_next_offset(struct thread_data *td, struct io_u *io_u,
+ unsigned int *is_random)
{
if (td->flags & TD_F_PROFILE_OPS) {
struct prof_io_ops *ops = &td->prof_io_ops;
if (ops->fill_io_u_off)
- return ops->fill_io_u_off(td, io_u);
+ return ops->fill_io_u_off(td, io_u, is_random);
}
- return __get_next_offset(td, io_u);
+ return __get_next_offset(td, io_u, is_random);
}
static inline int io_u_fits(struct thread_data *td, struct io_u *io_u,
return io_u->offset + buflen <= f->io_size + get_start_offset(td);
}
-static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u)
+static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u,
+ unsigned int is_random)
{
- const int ddir = io_u->ddir;
+ int ddir = io_u->ddir;
unsigned int buflen = 0;
unsigned int minbs, maxbs;
unsigned long r, rand_max;
- assert(ddir_rw(ddir));
+ assert(ddir_rw(io_u->ddir));
+
+ if (td->o.bs_is_seq_rand)
+ ddir = is_random ? DDIR_WRITE: DDIR_READ;
+ else
+ ddir = io_u->ddir;
minbs = td->o.min_bs[ddir];
maxbs = td->o.max_bs[ddir];
}
}
+ if (td->o.do_verify && td->o.verify != VERIFY_NONE)
+ buflen = (buflen + td->o.verify_interval - 1) &
+ ~(td->o.verify_interval - 1);
+
if (!td->o.bs_unaligned && is_power_of_2(minbs))
buflen = (buflen + minbs - 1) & ~(minbs - 1);
return buflen;
}
-static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
+static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u,
+ unsigned int is_random)
{
if (td->flags & TD_F_PROFILE_OPS) {
struct prof_io_ops *ops = &td->prof_io_ops;
if (ops->fill_io_u_size)
- return ops->fill_io_u_size(td, io_u);
+ return ops->fill_io_u_size(td, io_u, is_random);
}
- return __get_next_buflen(td, io_u);
+ return __get_next_buflen(td, io_u, is_random);
}
static void set_rwmix_bytes(struct thread_data *td)
return DDIR_WRITE;
}
+void io_u_quiesce(struct thread_data *td)
+{
+ /*
+ * We are going to sleep, ensure that we flush anything pending as
+ * not to skew our latency numbers.
+ *
+ * Changed to only monitor 'in flight' requests here instead of the
+ * td->cur_depth, b/c td->cur_depth does not accurately represent
+ * io's that have been actually submitted to an async engine,
+ * and cur_depth is meaningless for sync engines.
+ */
+ while (td->io_u_in_flight) {
+ int fio_unused ret;
+
+ ret = io_u_queued_complete(td, 1, NULL);
+ }
+}
+
static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
{
enum fio_ddir odir = ddir ^ 1;
} else
usec = td->rate_pending_usleep[ddir];
- /*
- * We are going to sleep, ensure that we flush anything pending as
- * not to skew our latency numbers.
- *
- * Changed to only monitor 'in flight' requests here instead of the
- * td->cur_depth, b/c td->cur_depth does not accurately represent
- * io's that have been actually submitted to an async engine,
- * and cur_depth is meaningless for sync engines.
- */
- if (td->io_u_in_flight) {
- int fio_unused ret;
-
- ret = io_u_queued_complete(td, td->io_u_in_flight, NULL);
- }
+ io_u_quiesce(td);
fio_gettime(&t, NULL);
usec_sleep(td, usec);
if (io_u->flags & IO_U_F_IN_CUR_DEPTH)
td->cur_depth--;
- flist_del_init(&io_u->list);
- flist_add(&io_u->list, &td->io_u_freelist);
+ io_u_qpush(&td->io_u_freelist, io_u);
td_io_u_unlock(td);
td_io_u_free_notify(td);
}
__io_u->flags &= ~IO_U_F_FLIGHT;
if (__io_u->flags & IO_U_F_IN_CUR_DEPTH)
td->cur_depth--;
- flist_del(&__io_u->list);
- flist_add_tail(&__io_u->list, &td->io_u_requeues);
+
+ io_u_rpush(&td->io_u_requeues, __io_u);
td_io_u_unlock(td);
*io_u = NULL;
}
static int fill_io_u(struct thread_data *td, struct io_u *io_u)
{
+ unsigned int is_random;
+
if (td->io_ops->flags & FIO_NOIO)
goto out;
* No log, let the seq/rand engine retrieve the next buflen and
* position.
*/
- if (get_next_offset(td, io_u)) {
+ if (get_next_offset(td, io_u, &is_random)) {
dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
return 1;
}
- io_u->buflen = get_next_buflen(td, io_u);
+ io_u->buflen = get_next_buflen(td, io_u, is_random);
if (!io_u->buflen) {
dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
return 1;
if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
dprint(FD_IO, "io_u %p, offset too large\n", io_u);
- dprint(FD_IO, " off=%llu/%lu > %llu\n", io_u->offset,
- io_u->buflen, io_u->file->real_file_size);
+ dprint(FD_IO, " off=%llu/%lu > %llu\n",
+ (unsigned long long) io_u->offset, io_u->buflen,
+ (unsigned long long) io_u->file->real_file_size);
return 1;
}
struct io_u *__get_io_u(struct thread_data *td)
{
- struct io_u *io_u = NULL;
+ struct io_u *io_u;
td_io_u_lock(td);
again:
- if (!flist_empty(&td->io_u_requeues))
- io_u = flist_entry(td->io_u_requeues.next, struct io_u, list);
- else if (!queue_full(td)) {
- io_u = flist_entry(td->io_u_freelist.next, struct io_u, list);
+ if (!io_u_rempty(&td->io_u_requeues))
+ io_u = io_u_rpop(&td->io_u_requeues);
+ else if (!io_u_qempty(&td->io_u_freelist)) {
+ io_u = io_u_qpop(&td->io_u_freelist);
io_u->buflen = 0;
io_u->resid = 0;
io_u->error = 0;
io_u->acct_ddir = -1;
- flist_del(&io_u->list);
- flist_add_tail(&io_u->list, &td->io_u_busylist);
td->cur_depth++;
io_u->flags |= IO_U_F_IN_CUR_DEPTH;
} else if (td->o.verify_async) {
tusec = utime_since(&io_u->start_time, &icd->time);
add_lat_sample(td, idx, tusec, bytes);
+ if (td->flags & TD_F_PROFILE_OPS) {
+ struct prof_io_ops *ops = &td->prof_io_ops;
+
+ if (ops->io_u_lat)
+ icd->error = ops->io_u_lat(td, tusec);
+ }
+
if (td->o.max_latency && tusec > td->o.max_latency) {
if (!td->error)
log_err("fio: latency of %lu usec exceeds specified max (%u usec)\n", tusec, td->o.max_latency);
if (!td->o.disable_bw)
add_bw_sample(td, idx, bytes, &icd->time);
- add_iops_sample(td, idx, &icd->time);
+ add_iops_sample(td, idx, bytes, &icd->time);
+
+ if (td->o.number_ios && !--td->o.number_ios)
+ td->done = 1;
}
static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
}
}
-/*
- * "randomly" fill the buffer contents
- */
-void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
- unsigned int min_write, unsigned int max_bs)
+void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
+ unsigned int max_bs)
{
- io_u->buf_filled_len = 0;
-
if (!td->o.zero_buffers) {
unsigned int perc = td->o.compress_percentage;
unsigned int seg = min_write;
seg = min(min_write, td->o.compress_chunk);
- fill_random_buf_percentage(&td->buf_state, io_u->buf,
+ if (!seg)
+ seg = min_write;
+
+ fill_random_buf_percentage(&td->buf_state, buf,
perc, seg, max_bs);
} else
- fill_random_buf(&td->buf_state, io_u->buf, max_bs);
+ fill_random_buf(&td->buf_state, buf, max_bs);
} else
- memset(io_u->buf, 0, max_bs);
+ memset(buf, 0, max_bs);
+}
+
+/*
+ * "randomly" fill the buffer contents
+ */
+void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
+ unsigned int min_write, unsigned int max_bs)
+{
+ io_u->buf_filled_len = 0;
+ fill_io_buffer(td, io_u->buf, min_write, max_bs);
}