6 #include "lib/rbtree.h"
7 #include "lib/ieee754.h"
12 * Use for maintaining statistics
25 unsigned long hist_last;
26 struct flist_head list;
30 union io_sample_data {
32 struct io_u_plat_entry *plat_entry;
35 #define sample_val(value) ((union io_sample_data) { .val = value })
36 #define sample_plat(plat) ((union io_sample_data) { .plat_entry = plat })
39 * A single data sample
43 union io_sample_data data;
48 struct io_sample_offset {
62 #define DEF_LOG_ENTRIES 1024
63 #define MAX_LOG_ENTRIES (1024 * DEF_LOG_ENTRIES)
66 struct flist_head list;
73 * Dynamically growing data sample log
77 * Entries already logged
79 struct flist_head io_logs;
83 * When the current log runs out of space, store events here until
84 * we have a chance to regrow
86 struct io_logs *pending;
88 unsigned int log_ddir_mask;
92 struct thread_data *td;
94 unsigned int log_type;
97 * If we fail extending the log, stop collecting more entries.
104 unsigned int log_offset;
107 * Max size of log entries before a chunk is compressed
112 * Don't deflate for storing, just store the compressed bits
114 unsigned int log_gz_store;
117 * Windowed average, for logging single entries average over some
120 struct io_stat avg_window[DDIR_RWDIR_CNT];
121 unsigned long avg_msec;
122 unsigned long avg_last[DDIR_RWDIR_CNT];
125 * Windowed latency histograms, for keeping track of when we need to
126 * save a copy of the histogram every approximately hist_msec
129 struct io_hist hist_window[DDIR_RWDIR_CNT];
130 unsigned long hist_msec;
131 unsigned int hist_coarseness;
133 pthread_mutex_t chunk_lock;
134 unsigned int chunk_seq;
135 struct flist_head chunk_list;
137 pthread_mutex_t deferred_free_lock;
138 #define IOLOG_MAX_DEFER 8
139 void *deferred_items[IOLOG_MAX_DEFER];
140 unsigned int deferred;
144 * If the upper bit is set, then we have the offset as well
146 #define LOG_OFFSET_SAMPLE_BIT 0x80000000U
147 #define io_sample_ddir(io) ((io)->__ddir & ~LOG_OFFSET_SAMPLE_BIT)
149 static inline void io_sample_set_ddir(struct io_log *log,
150 struct io_sample *io,
153 io->__ddir = ddir | log->log_ddir_mask;
156 static inline size_t __log_entry_sz(int log_offset)
159 return sizeof(struct io_sample_offset);
161 return sizeof(struct io_sample);
164 static inline size_t log_entry_sz(struct io_log *log)
166 return __log_entry_sz(log->log_offset);
169 static inline size_t log_sample_sz(struct io_log *log, struct io_logs *cur_log)
171 return cur_log->nr_samples * log_entry_sz(log);
174 static inline struct io_sample *__get_sample(void *samples, int log_offset,
177 uint64_t sample_offset = sample * __log_entry_sz(log_offset);
178 return (struct io_sample *) ((char *) samples + sample_offset);
181 struct io_logs *iolog_cur_log(struct io_log *);
182 uint64_t iolog_nr_samples(struct io_log *);
183 void regrow_logs(struct thread_data *);
185 static inline struct io_sample *get_sample(struct io_log *iolog,
186 struct io_logs *cur_log,
189 return __get_sample(cur_log->log, iolog->log_offset, sample);
200 * When logging io actions, this matches a single sent io_u
204 struct fio_rb_node rb_node;
205 struct flist_head list;
207 struct flist_head trim_list;
210 struct fio_file *file;
212 unsigned long long offset;
213 unsigned short numberio;
219 unsigned int file_action;
234 extern int __must_check read_iolog_get(struct thread_data *, struct io_u *);
235 extern void log_io_u(const struct thread_data *, const struct io_u *);
236 extern void log_file(struct thread_data *, struct fio_file *, enum file_log_act);
237 extern bool __must_check init_iolog(struct thread_data *td);
238 extern void log_io_piece(struct thread_data *, struct io_u *);
239 extern void unlog_io_piece(struct thread_data *, struct io_u *);
240 extern void trim_io_piece(const struct io_u *);
241 extern void queue_io_piece(struct thread_data *, struct io_piece *);
242 extern void prune_io_piece_log(struct thread_data *);
243 extern void write_iolog_close(struct thread_data *);
244 extern int iolog_compress_init(struct thread_data *, struct sk_out *);
245 extern void iolog_compress_exit(struct thread_data *);
246 extern size_t log_chunk_sizes(struct io_log *);
249 extern int iolog_file_inflate(const char *);
256 struct thread_data *td;
257 unsigned long avg_msec;
258 unsigned long hist_msec;
267 static inline bool per_unit_log(struct io_log *log)
269 return log && (!log->avg_msec || log->log_gz || log->log_gz_store);
272 static inline bool inline_log(struct io_log *log)
274 return log->log_type == IO_LOG_TYPE_LAT ||
275 log->log_type == IO_LOG_TYPE_CLAT ||
276 log->log_type == IO_LOG_TYPE_SLAT;
279 static inline void ipo_bytes_align(unsigned int replay_align, struct io_piece *ipo)
284 ipo->offset &= ~(replay_align - (uint64_t)1);
287 extern void finalize_logs(struct thread_data *td, bool);
288 extern void setup_log(struct io_log **, struct log_params *, const char *);
289 extern void flush_log(struct io_log *, bool);
290 extern void flush_samples(FILE *, void *, uint64_t);
291 extern uint64_t hist_sum(int, int, uint64_t *, uint64_t *);
292 extern void free_log(struct io_log *);
293 extern void fio_writeout_logs(bool);
294 extern void td_writeout_logs(struct thread_data *, bool);
295 extern int iolog_cur_flush(struct io_log *, struct io_logs *);
297 static inline void init_ipo(struct io_piece *ipo)
299 INIT_FLIST_HEAD(&ipo->list);
300 INIT_FLIST_HEAD(&ipo->trim_list);
303 struct iolog_compress {
304 struct flist_head list;