static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
enum fio_ddir ddir, uint64_t *offset)
{
+ struct thread_options *o = &td->o;
+
assert(ddir_rw(ddir));
- if (f->last_pos >= f->io_size + get_start_offset(td, f) && td->o.time_based)
+ if (f->last_pos >= f->io_size + get_start_offset(td, f) &&
+ o->time_based)
f->last_pos = f->last_pos - f->io_size;
if (f->last_pos < f->real_file_size) {
uint64_t pos;
- if (f->last_pos == f->file_offset && td->o.ddir_seq_add < 0)
+ if (f->last_pos == f->file_offset && o->ddir_seq_add < 0)
f->last_pos = f->real_file_size;
pos = f->last_pos - f->file_offset;
- if (pos)
- pos += td->o.ddir_seq_add;
+ if (pos && o->ddir_seq_add) {
+ pos += o->ddir_seq_add;
+
+ /*
+ * If we reach beyond the end of the file
+ * with holed IO, wrap around to the
+ * beginning again.
+ */
+ if (pos >= f->real_file_size)
+ pos = f->file_offset;
+ }
*offset = pos;
return 0;
* See if it's time to switch to a new zone
*/
if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
+ struct fio_file *f = io_u->file;
+
td->zone_bytes = 0;
- io_u->file->file_offset += td->o.zone_range + td->o.zone_skip;
- io_u->file->last_pos = io_u->file->file_offset;
+ f->file_offset += td->o.zone_range + td->o.zone_skip;
+
+ /*
+ * Wrap from the beginning, if we exceed the file size
+ */
+ if (f->file_offset >= f->real_file_size)
+ f->file_offset = f->real_file_size - f->file_offset;
+ f->last_pos = f->file_offset;
td->io_skip_bytes += td->o.zone_skip;
}
* If latency target is enabled, we might be ramping up or down and not
* using the full queue depth available.
*/
-int queue_full(struct thread_data *td)
+int queue_full(const struct thread_data *td)
{
const int qempty = io_u_qempty(&td->io_u_freelist);
if (io_u->ddir == DDIR_WRITE) {
if (td->flags & TD_F_REFILL_BUFFERS) {
io_u_fill_buffer(td, io_u,
- io_u->xfer_buflen, io_u->xfer_buflen);
+ td->o.min_bs[DDIR_WRITE],
+ io_u->xfer_buflen);
} else if ((td->flags & TD_F_SCRAMBLE_BUFFERS) &&
!(td->flags & TD_F_COMPRESS))
do_scramble = 1;
void io_u_log_error(struct thread_data *td, struct io_u *io_u)
{
enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error);
- const char *msg[] = { "read", "write", "sync", "datasync",
- "sync_file_range", "wait", "trim" };
if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump)
return;
- log_err("fio: io_u error");
-
- if (io_u->file)
- log_err(" on file %s", io_u->file->file_name);
-
- log_err(": %s\n", strerror(io_u->error));
-
- log_err(" %s offset=%llu, buflen=%lu\n", msg[io_u->ddir],
- io_u->offset, io_u->xfer_buflen);
+ log_err("fio: io_u error%s%s: %s: %s offset=%llu, buflen=%lu\n",
+ io_u->file ? " on file " : "",
+ io_u->file ? io_u->file->file_name : "",
+ strerror(io_u->error),
+ io_ddir_name(io_u->ddir),
+ io_u->offset, io_u->xfer_buflen);
if (!td->error)
td_verror(td, io_u->error, "io_u error");
if (!min_evts)
tvp = &ts;
+ else if (min_evts > td->cur_depth)
+ min_evts = td->cur_depth;
ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp);
if (ret < 0) {
}
}
+/*
+ * See if we should reuse the last seed, if dedupe is enabled
+ */
+static struct frand_state *get_buf_state(struct thread_data *td)
+{
+ unsigned int v;
+ unsigned long r;
+
+ if (!td->o.dedupe_percentage)
+ return &td->buf_state;
+ else if (td->o.dedupe_percentage == 100)
+ return &td->buf_state_prev;
+
+ r = __rand(&td->dedupe_state);
+ v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
+
+ if (v <= td->o.dedupe_percentage)
+ return &td->buf_state_prev;
+
+ return &td->buf_state;
+}
+
+static void save_buf_state(struct thread_data *td, struct frand_state *rs)
+{
+ if (rs == &td->buf_state)
+ frand_copy(&td->buf_state_prev, rs);
+}
+
void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
unsigned int max_bs)
{
fill_buffer_pattern(td, buf, max_bs);
else if (!td->o.zero_buffers) {
unsigned int perc = td->o.compress_percentage;
+ struct frand_state *rs;
+ unsigned int left = max_bs;
+
+ do {
+ rs = get_buf_state(td);
+
+ min_write = min(min_write, left);
+
+ if (perc) {
+ unsigned int seg = min_write;
- if (perc) {
- unsigned int seg = min_write;
+ seg = min(min_write, td->o.compress_chunk);
+ if (!seg)
+ seg = min_write;
- seg = min(min_write, td->o.compress_chunk);
- if (!seg)
- seg = min_write;
+ fill_random_buf_percentage(rs, buf, perc, seg,
+ min_write);
+ } else
+ fill_random_buf(rs, buf, min_write);
- fill_random_buf_percentage(&td->buf_state, buf,
- perc, seg, max_bs);
- } else
- fill_random_buf(&td->buf_state, buf, max_bs);
+ buf += min_write;
+ left -= min_write;
+ save_buf_state(td, rs);
+ } while (left);
} else
memset(buf, 0, max_bs);
}