unsigned int latency_qd_low;
unsigned int latency_failed;
uint64_t latency_ios;
+ int latency_end_run;
/*
* read/write mixed workload state
extern int shm_id;
extern int groupid;
extern int output_format;
+extern int append_terse_output;
extern int temp_stall_ts;
extern uintptr_t page_mask, page_size;
extern int read_only;
};
extern void td_set_runstate(struct thread_data *, int);
+extern int td_bump_runstate(struct thread_data *, int);
+extern void td_restore_runstate(struct thread_data *, int);
+
#define TERMINATE_ALL (-1)
extern void fio_terminate_threads(int);
*/
extern void lat_target_check(struct thread_data *);
extern void lat_target_init(struct thread_data *);
+extern void lat_target_reset(struct thread_data *);
#define for_each_td(td, i) \
for ((i) = 0, (td) = &threads[0]; (i) < (int) thread_number; (i)++, (td)++)
FIO_RAND_GEN_LFSR,
};
+enum {
+ FIO_CPUS_SHARED = 0,
+ FIO_CPUS_SPLIT,
+};
+
#endif