6 #include "lib/rbtree.h"
7 #include "lib/ieee754.h"
12 * Use for maintaining statistics
25 unsigned long hist_last;
26 struct flist_head list;
30 IO_LOG_SAMPLE_AVG = 0,
35 struct io_sample_value {
40 union io_sample_data {
41 struct io_sample_value val;
42 struct io_u_plat_entry *plat_entry;
45 #define sample_val(value) ((union io_sample_data) { .val.val0 = value })
46 #define sample_plat(plat) ((union io_sample_data) { .plat_entry = plat })
49 * A single data sample
53 union io_sample_data data;
59 struct io_sample_offset {
73 #define DEF_LOG_ENTRIES 1024
74 #define MAX_LOG_ENTRIES (1024 * DEF_LOG_ENTRIES)
77 struct flist_head list;
84 * Dynamically growing data sample log
88 * Entries already logged
90 struct flist_head io_logs;
94 * When the current log runs out of space, store events here until
95 * we have a chance to regrow
97 struct io_logs *pending;
99 unsigned int log_ddir_mask;
103 struct thread_data *td;
105 unsigned int log_type;
108 * If we fail extending the log, stop collecting more entries.
115 unsigned int log_offset;
120 unsigned int log_prio;
123 * Max size of log entries before a chunk is compressed
128 * Don't deflate for storing, just store the compressed bits
130 unsigned int log_gz_store;
133 * Windowed average, for logging single entries average over some
136 struct io_stat avg_window[DDIR_RWDIR_CNT];
137 unsigned long avg_msec;
138 unsigned long avg_last[DDIR_RWDIR_CNT];
141 * Windowed latency histograms, for keeping track of when we need to
142 * save a copy of the histogram every approximately hist_msec
145 struct io_hist hist_window[DDIR_RWDIR_CNT];
146 unsigned long hist_msec;
147 unsigned int hist_coarseness;
149 pthread_mutex_t chunk_lock;
150 unsigned int chunk_seq;
151 struct flist_head chunk_list;
153 pthread_mutex_t deferred_free_lock;
154 #define IOLOG_MAX_DEFER 8
155 void *deferred_items[IOLOG_MAX_DEFER];
156 unsigned int deferred;
160 * If the upper bit is set, then we have the offset as well
162 #define LOG_OFFSET_SAMPLE_BIT 0x80000000U
164 * If the bit following the upper bit is set, then we have the priority
166 #define LOG_PRIO_SAMPLE_BIT 0x40000000U
168 * If the bit following prioity sample vit is set, we report both avg and max
170 #define LOG_AVG_MAX_SAMPLE_BIT 0x20000000U
172 #define LOG_SAMPLE_BITS (LOG_OFFSET_SAMPLE_BIT | LOG_PRIO_SAMPLE_BIT |\
173 LOG_AVG_MAX_SAMPLE_BIT)
174 #define io_sample_ddir(io) ((io)->__ddir & ~LOG_SAMPLE_BITS)
176 static inline void io_sample_set_ddir(struct io_log *log,
177 struct io_sample *io,
180 io->__ddir = ddir | log->log_ddir_mask;
183 static inline size_t __log_entry_sz(int log_offset)
186 return sizeof(struct io_sample_offset);
188 return sizeof(struct io_sample);
191 static inline size_t log_entry_sz(struct io_log *log)
193 return __log_entry_sz(log->log_offset);
196 static inline size_t log_sample_sz(struct io_log *log, struct io_logs *cur_log)
198 return cur_log->nr_samples * log_entry_sz(log);
201 static inline struct io_sample *__get_sample(void *samples, int log_offset,
204 uint64_t sample_offset = sample * __log_entry_sz(log_offset);
205 return (struct io_sample *) ((char *) samples + sample_offset);
208 struct io_logs *iolog_cur_log(struct io_log *);
209 uint64_t iolog_nr_samples(struct io_log *);
210 void regrow_logs(struct thread_data *);
211 void regrow_agg_logs(void);
213 static inline struct io_sample *get_sample(struct io_log *iolog,
214 struct io_logs *cur_log,
217 return __get_sample(cur_log->log, iolog->log_offset, sample);
228 * When logging io actions, this matches a single sent io_u
232 struct fio_rb_node rb_node;
233 struct flist_head list;
235 struct flist_head trim_list;
238 struct fio_file *file;
240 unsigned long long offset;
241 unsigned short numberio;
246 unsigned int file_action;
260 extern int __must_check read_iolog_get(struct thread_data *, struct io_u *);
261 extern void log_io_u(const struct thread_data *, const struct io_u *);
262 extern void log_file(struct thread_data *, struct fio_file *, enum file_log_act);
263 extern bool __must_check init_iolog(struct thread_data *td);
264 extern void log_io_piece(struct thread_data *, struct io_u *);
265 extern void unlog_io_piece(struct thread_data *, struct io_u *);
266 extern void trim_io_piece(const struct io_u *);
267 extern void queue_io_piece(struct thread_data *, struct io_piece *);
268 extern void prune_io_piece_log(struct thread_data *);
269 extern void write_iolog_close(struct thread_data *);
270 int64_t iolog_items_to_fetch(struct thread_data *td);
271 extern int iolog_compress_init(struct thread_data *, struct sk_out *);
272 extern void iolog_compress_exit(struct thread_data *);
273 extern size_t log_chunk_sizes(struct io_log *);
274 extern int init_io_u_buffers(struct thread_data *);
275 extern unsigned long long delay_since_ttime(const struct thread_data *,
279 extern int iolog_file_inflate(const char *);
286 struct thread_data *td;
287 unsigned long avg_msec;
288 unsigned long hist_msec;
298 static inline bool per_unit_log(struct io_log *log)
300 return log && (!log->avg_msec || log->log_gz || log->log_gz_store);
303 static inline bool inline_log(struct io_log *log)
305 return log->log_type == IO_LOG_TYPE_LAT ||
306 log->log_type == IO_LOG_TYPE_CLAT ||
307 log->log_type == IO_LOG_TYPE_SLAT;
310 static inline void ipo_bytes_align(unsigned int replay_align, struct io_piece *ipo)
315 ipo->offset &= ~(replay_align - (uint64_t)1);
318 extern void finalize_logs(struct thread_data *td, bool);
319 extern void setup_log(struct io_log **, struct log_params *, const char *);
320 extern void flush_log(struct io_log *, bool);
321 extern void flush_samples(FILE *, void *, uint64_t);
322 extern uint64_t hist_sum(int, int, uint64_t *, uint64_t *);
323 extern void free_log(struct io_log *);
324 extern void fio_writeout_logs(bool);
325 extern void td_writeout_logs(struct thread_data *, bool);
326 extern int iolog_cur_flush(struct io_log *, struct io_logs *);
328 static inline void init_ipo(struct io_piece *ipo)
330 INIT_FLIST_HEAD(&ipo->list);
331 INIT_FLIST_HEAD(&ipo->trim_list);
334 struct iolog_compress {
335 struct flist_head list;