unsigned int fsync_blocks;
unsigned int start_delay;
unsigned long long timeout;
+ unsigned long long ramp_time;
unsigned int overwrite;
unsigned int bw_avg_time;
unsigned int loops;
unsigned int zero_buffers;
unsigned int refill_buffers;
unsigned int time_based;
+ unsigned int disable_clat;
+ unsigned int disable_slat;
+ unsigned int disable_bw;
char *read_iolog_file;
char *write_iolog_file;
struct timeval rw_end[2];
struct timeval last_issue;
unsigned int rw_end_set[2];
+ unsigned int ramp_time_over;
/*
* read/write mixed workload state
*/
struct flist_head io_log_list;
- /*
- * timeout handling
- */
- struct timeval timeout_end;
- struct itimerval timer;
-
/*
* for fileservice, how often to switch to a new file
*/
FIO_ETA_NEVER,
};
-/*
- * 30 second per-io_u timeout, with 5 second intervals to avoid resetting
- * the timer on each queue operation.
- */
-#define IO_U_TIMEOUT_INC 5
-#define IO_U_TIMEOUT 30
-
#define __td_verror(td, err, msg, func) \
do { \
if ((td)->error) \
*/
struct disk_util {
struct flist_head list;
+ /* If this disk is a slave, hook it into the master's
+ * list using this head.
+ */
+ struct flist_head slavelist;
char *name;
char *sysfs_root;
struct disk_util_stat dus;
struct disk_util_stat last_dus;
+ /* For software raids, this entry maintains pointers to the
+ * entries for the slave devices. The disk_util entries for
+ * the slaves devices should primarily be maintained through
+ * the disk_list list, i.e. for memory allocation and
+ * de-allocation, etc. Whereas this list should be used only
+ * for aggregating a software RAID's disk util figures.
+ */
+ struct flist_head slaves;
+
unsigned long msec;
struct timeval time;
};
extern void fill_start_time(struct timeval *);
extern void fio_gettime(struct timeval *, void *);
extern void set_genesis_time(void);
+extern int ramp_time_over(struct thread_data *);
+extern int in_ramp_time(struct thread_data *);
/*
* Init/option functions
TD_NOT_CREATED = 0,
TD_CREATED,
TD_INITIALIZED,
+ TD_RAMP,
TD_RUNNING,
TD_VERIFYING,
TD_FSYNCING,
TD_REAPED,
};
+extern void td_set_runstate(struct thread_data *, int);
+
/*
* Verify helpers
*/
extern long __must_check io_u_queued_complete(struct thread_data *, int);
extern void io_u_queued(struct thread_data *, struct io_u *);
extern void io_u_log_error(struct thread_data *, struct io_u *);
-extern void io_u_init_timeout(void);
-extern void io_u_set_timeout(struct thread_data *);
extern void io_u_mark_depth(struct thread_data *, unsigned int);
extern void io_u_fill_buffer(struct thread_data *td, struct io_u *, unsigned int);
void io_u_mark_complete(struct thread_data *, unsigned int);
void io_u_mark_submit(struct thread_data *, unsigned int);
+/*
+ * Reset stats after ramp time completes
+ */
+extern void reset_all_stats(struct thread_data *);
+
/*
* io engine entry points
*/
#define dprint_io_u(io_u, p)
#endif
+static inline int fio_fill_issue_time(struct thread_data *td)
+{
+ if (td->o.read_iolog_file ||
+ !td->o.disable_clat || !td->o.disable_slat || !td->o.disable_bw)
+ return 1;
+
+ return 0;
+}
+
#endif