F_ADV_SEQUENTIAL,
};
+enum {
+ IOD_NONE = 0,
+ IOD_STEPPED,
+};
+
/*
* Per-thread/process specific data. Only used for the network client
* for now.
unsigned int latency_qd;
unsigned int latency_qd_high;
unsigned int latency_qd_low;
+ unsigned int latency_qd_step;
unsigned int latency_failed;
- uint64_t latency_ios;
+ unsigned int latency_state;
+ unsigned int latency_iops[DDIR_RWDIR_CNT];
+ unsigned int latency_step;
+ uint64_t latency_ios[DDIR_RWDIR_CNT];
int latency_end_run;
+ unsigned int nr_lat_stats;
/*
* read/write mixed workload state
enum fio_ddir ddir, uint64_t *bytes_issued, int from_verify,
struct timespec *comp_time);
-/*
- * Latency target helpers
- */
-extern void lat_target_check(struct thread_data *);
-extern void lat_target_init(struct thread_data *);
-extern void lat_target_reset(struct thread_data *);
-
/*
* Iterates all threads/processes within all the defined jobs
*/
return ddir_rw_sum(td->bytes_done) != 0;
}
+int setup_rate(struct thread_data *td);
+
static inline unsigned long long td_max_bs(struct thread_data *td)
{
unsigned long long max_bs;
return (td->flags & TD_F_NEED_LOCK) != 0;
}
+static inline bool td_offload_overlap(struct thread_data *td)
+{
+ return td->o.serialize_overlap && td->o.io_submit_mode == IO_MODE_OFFLOAD;
+}
+
/*
* We currently only need to do locking if we have verifier threads
* accessing our internal structures too