Add regression test for recent offload locking bug
[fio.git] / iolog.h
... / ...
CommitLineData
1#ifndef FIO_IOLOG_H
2#define FIO_IOLOG_H
3
4#include <stdio.h>
5
6#include "lib/rbtree.h"
7#include "lib/ieee754.h"
8#include "flist.h"
9#include "ioengines.h"
10
11/*
12 * Use for maintaining statistics
13 */
14struct io_stat {
15 uint64_t max_val;
16 uint64_t min_val;
17 uint64_t samples;
18
19 fio_fp64_t mean;
20 fio_fp64_t S;
21};
22
23struct io_hist {
24 uint64_t samples;
25 unsigned long hist_last;
26 struct flist_head list;
27};
28
29
30union io_sample_data {
31 uint64_t val;
32 struct io_u_plat_entry *plat_entry;
33};
34
35#define sample_val(value) ((union io_sample_data) { .val = value })
36#define sample_plat(plat) ((union io_sample_data) { .plat_entry = plat })
37
38/*
39 * A single data sample
40 */
41struct io_sample {
42 uint64_t time;
43 union io_sample_data data;
44 uint32_t __ddir;
45 uint64_t bs;
46};
47
48struct io_sample_offset {
49 struct io_sample s;
50 uint64_t offset;
51};
52
53enum {
54 IO_LOG_TYPE_LAT = 1,
55 IO_LOG_TYPE_CLAT,
56 IO_LOG_TYPE_SLAT,
57 IO_LOG_TYPE_BW,
58 IO_LOG_TYPE_IOPS,
59 IO_LOG_TYPE_HIST,
60};
61
62#define DEF_LOG_ENTRIES 1024
63#define MAX_LOG_ENTRIES (1024 * DEF_LOG_ENTRIES)
64
65struct io_logs {
66 struct flist_head list;
67 uint64_t nr_samples;
68 uint64_t max_samples;
69 void *log;
70};
71
72/*
73 * Dynamically growing data sample log
74 */
75struct io_log {
76 /*
77 * Entries already logged
78 */
79 struct flist_head io_logs;
80 uint32_t cur_log_max;
81
82 /*
83 * When the current log runs out of space, store events here until
84 * we have a chance to regrow
85 */
86 struct io_logs *pending;
87
88 unsigned int log_ddir_mask;
89
90 char *filename;
91
92 struct thread_data *td;
93
94 unsigned int log_type;
95
96 /*
97 * If we fail extending the log, stop collecting more entries.
98 */
99 bool disabled;
100
101 /*
102 * Log offsets
103 */
104 unsigned int log_offset;
105
106 /*
107 * Max size of log entries before a chunk is compressed
108 */
109 unsigned int log_gz;
110
111 /*
112 * Don't deflate for storing, just store the compressed bits
113 */
114 unsigned int log_gz_store;
115
116 /*
117 * Windowed average, for logging single entries average over some
118 * period of time.
119 */
120 struct io_stat avg_window[DDIR_RWDIR_CNT];
121 unsigned long avg_msec;
122 unsigned long avg_last[DDIR_RWDIR_CNT];
123
124 /*
125 * Windowed latency histograms, for keeping track of when we need to
126 * save a copy of the histogram every approximately hist_msec
127 * milliseconds.
128 */
129 struct io_hist hist_window[DDIR_RWDIR_CNT];
130 unsigned long hist_msec;
131 unsigned int hist_coarseness;
132
133 pthread_mutex_t chunk_lock;
134 unsigned int chunk_seq;
135 struct flist_head chunk_list;
136
137 pthread_mutex_t deferred_free_lock;
138#define IOLOG_MAX_DEFER 8
139 void *deferred_items[IOLOG_MAX_DEFER];
140 unsigned int deferred;
141};
142
143/*
144 * If the upper bit is set, then we have the offset as well
145 */
146#define LOG_OFFSET_SAMPLE_BIT 0x80000000U
147#define io_sample_ddir(io) ((io)->__ddir & ~LOG_OFFSET_SAMPLE_BIT)
148
149static inline void io_sample_set_ddir(struct io_log *log,
150 struct io_sample *io,
151 enum fio_ddir ddir)
152{
153 io->__ddir = ddir | log->log_ddir_mask;
154}
155
156static inline size_t __log_entry_sz(int log_offset)
157{
158 if (log_offset)
159 return sizeof(struct io_sample_offset);
160 else
161 return sizeof(struct io_sample);
162}
163
164static inline size_t log_entry_sz(struct io_log *log)
165{
166 return __log_entry_sz(log->log_offset);
167}
168
169static inline size_t log_sample_sz(struct io_log *log, struct io_logs *cur_log)
170{
171 return cur_log->nr_samples * log_entry_sz(log);
172}
173
174static inline struct io_sample *__get_sample(void *samples, int log_offset,
175 uint64_t sample)
176{
177 uint64_t sample_offset = sample * __log_entry_sz(log_offset);
178 return (struct io_sample *) ((char *) samples + sample_offset);
179}
180
181struct io_logs *iolog_cur_log(struct io_log *);
182uint64_t iolog_nr_samples(struct io_log *);
183void regrow_logs(struct thread_data *);
184
185static inline struct io_sample *get_sample(struct io_log *iolog,
186 struct io_logs *cur_log,
187 uint64_t sample)
188{
189 return __get_sample(cur_log->log, iolog->log_offset, sample);
190}
191
192enum {
193 IP_F_ONRB = 1,
194 IP_F_ONLIST = 2,
195 IP_F_TRIMMED = 4,
196 IP_F_IN_FLIGHT = 8,
197};
198
199/*
200 * When logging io actions, this matches a single sent io_u
201 */
202struct io_piece {
203 union {
204 struct fio_rb_node rb_node;
205 struct flist_head list;
206 };
207 struct flist_head trim_list;
208 union {
209 int fileno;
210 struct fio_file *file;
211 };
212 unsigned long long offset;
213 unsigned short numberio;
214 unsigned long len;
215 unsigned int flags;
216 enum fio_ddir ddir;
217 union {
218 unsigned long delay;
219 unsigned int file_action;
220 };
221};
222
223/*
224 * Log exports
225 */
226enum file_log_act {
227 FIO_LOG_ADD_FILE,
228 FIO_LOG_OPEN_FILE,
229 FIO_LOG_CLOSE_FILE,
230 FIO_LOG_UNLINK_FILE,
231};
232
233struct io_u;
234extern int __must_check read_iolog_get(struct thread_data *, struct io_u *);
235extern void log_io_u(const struct thread_data *, const struct io_u *);
236extern void log_file(struct thread_data *, struct fio_file *, enum file_log_act);
237extern bool __must_check init_iolog(struct thread_data *td);
238extern void log_io_piece(struct thread_data *, struct io_u *);
239extern void unlog_io_piece(struct thread_data *, struct io_u *);
240extern void trim_io_piece(const struct io_u *);
241extern void queue_io_piece(struct thread_data *, struct io_piece *);
242extern void prune_io_piece_log(struct thread_data *);
243extern void write_iolog_close(struct thread_data *);
244extern int iolog_compress_init(struct thread_data *, struct sk_out *);
245extern void iolog_compress_exit(struct thread_data *);
246extern size_t log_chunk_sizes(struct io_log *);
247extern int init_io_u_buffers(struct thread_data *);
248
249#ifdef CONFIG_ZLIB
250extern int iolog_file_inflate(const char *);
251#endif
252
253/*
254 * Logging
255 */
256struct log_params {
257 struct thread_data *td;
258 unsigned long avg_msec;
259 unsigned long hist_msec;
260 int hist_coarseness;
261 int log_type;
262 int log_offset;
263 int log_gz;
264 int log_gz_store;
265 int log_compress;
266};
267
268static inline bool per_unit_log(struct io_log *log)
269{
270 return log && (!log->avg_msec || log->log_gz || log->log_gz_store);
271}
272
273static inline bool inline_log(struct io_log *log)
274{
275 return log->log_type == IO_LOG_TYPE_LAT ||
276 log->log_type == IO_LOG_TYPE_CLAT ||
277 log->log_type == IO_LOG_TYPE_SLAT;
278}
279
280static inline void ipo_bytes_align(unsigned int replay_align, struct io_piece *ipo)
281{
282 if (!replay_align)
283 return;
284
285 ipo->offset &= ~(replay_align - (uint64_t)1);
286}
287
288extern void finalize_logs(struct thread_data *td, bool);
289extern void setup_log(struct io_log **, struct log_params *, const char *);
290extern void flush_log(struct io_log *, bool);
291extern void flush_samples(FILE *, void *, uint64_t);
292extern uint64_t hist_sum(int, int, uint64_t *, uint64_t *);
293extern void free_log(struct io_log *);
294extern void fio_writeout_logs(bool);
295extern void td_writeout_logs(struct thread_data *, bool);
296extern int iolog_cur_flush(struct io_log *, struct io_logs *);
297
298static inline void init_ipo(struct io_piece *ipo)
299{
300 INIT_FLIST_HEAD(&ipo->list);
301 INIT_FLIST_HEAD(&ipo->trim_list);
302}
303
304struct iolog_compress {
305 struct flist_head list;
306 void *buf;
307 size_t len;
308 unsigned int seq;
309};
310
311#endif