4 #include "lib/rbtree.h"
5 #include "lib/ieee754.h"
10 * Use for maintaining statistics
23 unsigned long hist_last;
24 struct flist_head list;
28 union io_sample_data {
30 struct io_u_plat_entry *plat_entry;
33 #define sample_val(value) ((union io_sample_data) { .val = value })
34 #define sample_plat(plat) ((union io_sample_data) { .plat_entry = plat })
37 * A single data sample
41 union io_sample_data data;
46 struct io_sample_offset {
60 #define DEF_LOG_ENTRIES 1024
61 #define MAX_LOG_ENTRIES (1024 * DEF_LOG_ENTRIES)
64 struct flist_head list;
71 * Dynamically growing data sample log
75 * Entries already logged
77 struct flist_head io_logs;
81 * When the current log runs out of space, store events here until
82 * we have a chance to regrow
84 struct io_logs *pending;
86 unsigned int log_ddir_mask;
90 struct thread_data *td;
92 unsigned int log_type;
95 * If we fail extending the log, stop collecting more entries.
102 unsigned int log_offset;
105 * Max size of log entries before a chunk is compressed
110 * Don't deflate for storing, just store the compressed bits
112 unsigned int log_gz_store;
115 * Windowed average, for logging single entries average over some
118 struct io_stat avg_window[DDIR_RWDIR_CNT];
119 unsigned long avg_msec;
120 unsigned long avg_last;
123 * Windowed latency histograms, for keeping track of when we need to
124 * save a copy of the histogram every approximately hist_msec
127 struct io_hist hist_window[DDIR_RWDIR_CNT];
128 unsigned long hist_msec;
129 unsigned int hist_coarseness;
131 pthread_mutex_t chunk_lock;
132 unsigned int chunk_seq;
133 struct flist_head chunk_list;
137 * If the upper bit is set, then we have the offset as well
139 #define LOG_OFFSET_SAMPLE_BIT 0x80000000U
140 #define io_sample_ddir(io) ((io)->__ddir & ~LOG_OFFSET_SAMPLE_BIT)
142 static inline void io_sample_set_ddir(struct io_log *log,
143 struct io_sample *io,
146 io->__ddir = ddir | log->log_ddir_mask;
149 static inline size_t __log_entry_sz(int log_offset)
152 return sizeof(struct io_sample_offset);
154 return sizeof(struct io_sample);
157 static inline size_t log_entry_sz(struct io_log *log)
159 return __log_entry_sz(log->log_offset);
162 static inline size_t log_sample_sz(struct io_log *log, struct io_logs *cur_log)
164 return cur_log->nr_samples * log_entry_sz(log);
167 static inline struct io_sample *__get_sample(void *samples, int log_offset,
170 uint64_t sample_offset = sample * __log_entry_sz(log_offset);
171 return (struct io_sample *) ((char *) samples + sample_offset);
174 struct io_logs *iolog_cur_log(struct io_log *);
175 uint64_t iolog_nr_samples(struct io_log *);
176 void regrow_logs(struct thread_data *);
178 static inline struct io_sample *get_sample(struct io_log *iolog,
179 struct io_logs *cur_log,
182 return __get_sample(cur_log->log, iolog->log_offset, sample);
193 * When logging io actions, this matches a single sent io_u
197 struct rb_node rb_node;
198 struct flist_head list;
200 struct flist_head trim_list;
203 struct fio_file *file;
205 unsigned long long offset;
206 unsigned short numberio;
212 unsigned int file_action;
227 extern int __must_check read_iolog_get(struct thread_data *, struct io_u *);
228 extern void log_io_u(const struct thread_data *, const struct io_u *);
229 extern void log_file(struct thread_data *, struct fio_file *, enum file_log_act);
230 extern int __must_check init_iolog(struct thread_data *td);
231 extern void log_io_piece(struct thread_data *, struct io_u *);
232 extern void unlog_io_piece(struct thread_data *, struct io_u *);
233 extern void trim_io_piece(struct thread_data *, const struct io_u *);
234 extern void queue_io_piece(struct thread_data *, struct io_piece *);
235 extern void prune_io_piece_log(struct thread_data *);
236 extern void write_iolog_close(struct thread_data *);
237 extern int iolog_compress_init(struct thread_data *, struct sk_out *);
238 extern void iolog_compress_exit(struct thread_data *);
239 extern size_t log_chunk_sizes(struct io_log *);
242 extern int iolog_file_inflate(const char *);
249 struct thread_data *td;
250 unsigned long avg_msec;
251 unsigned long hist_msec;
260 static inline bool per_unit_log(struct io_log *log)
262 return log && !log->avg_msec;
265 static inline bool inline_log(struct io_log *log)
267 return log->log_type == IO_LOG_TYPE_LAT ||
268 log->log_type == IO_LOG_TYPE_CLAT ||
269 log->log_type == IO_LOG_TYPE_SLAT;
272 static inline void ipo_bytes_align(unsigned int replay_align, struct io_piece *ipo)
277 ipo->offset &= ~(replay_align - (uint64_t)1);
280 extern void finalize_logs(struct thread_data *td, bool);
281 extern void setup_log(struct io_log **, struct log_params *, const char *);
282 extern void flush_log(struct io_log *, bool);
283 extern void flush_samples(FILE *, void *, uint64_t);
284 extern unsigned long hist_sum(int, int, unsigned int *, unsigned int *);
285 extern void free_log(struct io_log *);
286 extern void fio_writeout_logs(bool);
287 extern void td_writeout_logs(struct thread_data *, bool);
288 extern int iolog_cur_flush(struct io_log *, struct io_logs *);
290 static inline void init_ipo(struct io_piece *ipo)
292 memset(ipo, 0, sizeof(*ipo));
293 INIT_FLIST_HEAD(&ipo->trim_list);
296 struct iolog_compress {
297 struct flist_head list;