fio_fp64_t S;
};
+struct io_hist {
+ uint64_t samples;
+ unsigned long hist_last;
+ struct flist_head list;
+};
+
+
+union io_sample_data {
+ uint64_t val;
+ struct io_u_plat_entry *plat_entry;
+};
+
+#define sample_val(value) ((union io_sample_data) { .val = value })
+#define sample_plat(plat) ((union io_sample_data) { .plat_entry = plat })
+
/*
* A single data sample
*/
struct io_sample {
uint64_t time;
- uint64_t val;
+ union io_sample_data data;
uint32_t __ddir;
uint32_t bs;
};
IO_LOG_TYPE_SLAT,
IO_LOG_TYPE_BW,
IO_LOG_TYPE_IOPS,
+ IO_LOG_TYPE_HIST,
};
#define DEF_LOG_ENTRIES 1024
#define MAX_LOG_ENTRIES (1024 * DEF_LOG_ENTRIES)
-#define LOG_QUIESCE_SZ (64 * 1024 * 1024)
-
struct io_logs {
struct flist_head list;
uint64_t nr_samples;
struct flist_head io_logs;
uint32_t cur_log_max;
+ /*
+ * When the current log runs out of space, store events here until
+ * we have a chance to regrow
+ */
+ struct io_logs *pending;
+
unsigned int log_ddir_mask;
char *filename;
unsigned long avg_msec;
unsigned long avg_last;
+ /*
+ * Windowed latency histograms, for keeping track of when we need to
+ * save a copy of the histogram every approximately hist_msec
+ * milliseconds.
+ */
+ struct io_hist hist_window[DDIR_RWDIR_CNT];
+ unsigned long hist_msec;
+ unsigned int hist_coarseness;
+
pthread_mutex_t chunk_lock;
unsigned int chunk_seq;
struct flist_head chunk_list;
return __log_entry_sz(log->log_offset);
}
+static inline size_t log_sample_sz(struct io_log *log, struct io_logs *cur_log)
+{
+ return cur_log->nr_samples * log_entry_sz(log);
+}
+
static inline struct io_sample *__get_sample(void *samples, int log_offset,
uint64_t sample)
{
struct io_logs *iolog_cur_log(struct io_log *);
uint64_t iolog_nr_samples(struct io_log *);
+void regrow_logs(struct thread_data *);
static inline struct io_sample *get_sample(struct io_log *iolog,
struct io_logs *cur_log,
struct log_params {
struct thread_data *td;
unsigned long avg_msec;
+ unsigned long hist_msec;
+ int hist_coarseness;
int log_type;
int log_offset;
int log_gz;
return log && !log->avg_msec;
}
+static inline bool inline_log(struct io_log *log)
+{
+ return log->log_type == IO_LOG_TYPE_LAT ||
+ log->log_type == IO_LOG_TYPE_CLAT ||
+ log->log_type == IO_LOG_TYPE_SLAT;
+}
+
+static inline void ipo_bytes_align(unsigned int replay_align, struct io_piece *ipo)
+{
+ if (replay_align)
+ return;
+
+ ipo->offset &= ~(replay_align - (uint64_t)1);
+}
+
extern void finalize_logs(struct thread_data *td, bool);
extern void setup_log(struct io_log **, struct log_params *, const char *);
-extern void flush_log(struct io_log *, int);
+extern void flush_log(struct io_log *, bool);
extern void flush_samples(FILE *, void *, uint64_t);
+extern unsigned long hist_sum(int, int, unsigned int *, unsigned int *);
extern void free_log(struct io_log *);
extern void fio_writeout_logs(bool);
extern void td_writeout_logs(struct thread_data *, bool);