static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
enum fio_ddir ddir, uint64_t *offset)
{
+ struct thread_options *o = &td->o;
+
assert(ddir_rw(ddir));
- if (f->last_pos >= f->io_size + get_start_offset(td, f) && td->o.time_based)
+ if (f->last_pos >= f->io_size + get_start_offset(td, f) &&
+ o->time_based)
f->last_pos = f->last_pos - f->io_size;
if (f->last_pos < f->real_file_size) {
uint64_t pos;
- if (f->last_pos == f->file_offset && td->o.ddir_seq_add < 0)
+ if (f->last_pos == f->file_offset && o->ddir_seq_add < 0)
f->last_pos = f->real_file_size;
pos = f->last_pos - f->file_offset;
- if (pos)
- pos += td->o.ddir_seq_add;
+ if (pos && o->ddir_seq_add) {
+ pos += o->ddir_seq_add;
+
+ /*
+ * If we reach beyond the end of the file
+ * with holed IO, wrap around to the
+ * beginning again.
+ */
+ if (pos >= f->real_file_size)
+ pos = f->file_offset;
+ }
*offset = pos;
return 0;
* See if it's time to switch to a new zone
*/
if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
+ struct fio_file *f = io_u->file;
+
td->zone_bytes = 0;
- io_u->file->file_offset += td->o.zone_range + td->o.zone_skip;
- io_u->file->last_pos = io_u->file->file_offset;
+ f->file_offset += td->o.zone_range + td->o.zone_skip;
+
+ /*
+ * Wrap from the beginning, if we exceed the file size
+ */
+ if (f->file_offset >= f->real_file_size)
+ f->file_offset = f->real_file_size - f->file_offset;
+ f->last_pos = f->file_offset;
td->io_skip_bytes += td->o.zone_skip;
}
* If latency target is enabled, we might be ramping up or down and not
* using the full queue depth available.
*/
-int queue_full(struct thread_data *td)
+int queue_full(const struct thread_data *td)
{
const int qempty = io_u_qempty(&td->io_u_freelist);