5 #include "lib/output_buffer.h"
7 struct group_run_stats {
8 uint64_t max_run[DDIR_RWDIR_CNT], min_run[DDIR_RWDIR_CNT];
9 uint64_t max_bw[DDIR_RWDIR_CNT], min_bw[DDIR_RWDIR_CNT];
10 uint64_t io_kb[DDIR_RWDIR_CNT];
11 uint64_t agg[DDIR_RWDIR_CNT];
15 uint32_t unified_rw_rep;
16 } __attribute__((packed));
19 * How many depth levels to log
21 #define FIO_IO_U_MAP_NR 7
22 #define FIO_IO_U_LAT_U_NR 10
23 #define FIO_IO_U_LAT_M_NR 12
26 * Aggregate clat samples to report percentile(s) of them.
30 * FIO_IO_U_PLAT_BITS determines the maximum statistical error on the
31 * value of resulting percentiles. The error will be approximately
32 * 1/2^(FIO_IO_U_PLAT_BITS+1) of the value.
34 * FIO_IO_U_PLAT_GROUP_NR and FIO_IO_U_PLAT_BITS determine the maximum
35 * range being tracked for latency samples. The maximum value tracked
36 * accurately will be 2^(GROUP_NR + PLAT_BITS -1) microseconds.
38 * FIO_IO_U_PLAT_GROUP_NR and FIO_IO_U_PLAT_BITS determine the memory
39 * requirement of storing those aggregate counts. The memory used will
40 * be (FIO_IO_U_PLAT_GROUP_NR * 2^FIO_IO_U_PLAT_BITS) * sizeof(int)
43 * FIO_IO_U_PLAT_NR is the total number of buckets.
47 * Suppose the clat varies from 0 to 999 (usec), the straightforward
48 * method is to keep an array of (999 + 1) buckets, in which a counter
49 * keeps the count of samples which fall in the bucket, e.g.,
50 * {[0],[1],...,[999]}. However this consumes a huge amount of space,
51 * and can be avoided if an approximation is acceptable.
53 * One such method is to let the range of the bucket to be greater
54 * than one. This method has low accuracy when the value is small. For
55 * example, let the buckets be {[0,99],[100,199],...,[900,999]}, and
56 * the represented value of each bucket be the mean of the range. Then
57 * a value 0 has an round-off error of 49.5. To improve on this, we
58 * use buckets with non-uniform ranges, while bounding the error of
59 * each bucket within a ratio of the sample value. A simple example
60 * would be when error_bound = 0.005, buckets are {
61 * {[0],[1],...,[99]}, {[100,101],[102,103],...,[198,199]},..,
62 * {[900,909],[910,919]...} }. The total range is partitioned into
63 * groups with different ranges, then buckets with uniform ranges. An
64 * upper bound of the error is (range_of_bucket/2)/value_of_bucket
66 * For better efficiency, we implement this using base two. We group
67 * samples by their Most Significant Bit (MSB), extract the next M bit
68 * of them as an index within the group, and discard the rest of the
71 * E.g., assume a sample 'x' whose MSB is bit n (starting from bit 0),
72 * and use M bit for indexing
74 * | n | M bits | bit (n-M-1) ... bit 0 |
76 * Because x is at least 2^n, and bit 0 to bit (n-M-1) is at most
77 * (2^(n-M) - 1), discarding bit 0 to (n-M-1) makes the round-off
81 * e <= --------- <= ------- = ---
84 * Furthermore, we use "mean" of the range to represent the bucket,
85 * the error e can be lowered by half to 1 / 2^(M+1). By using M bits
86 * as the index, each group must contains 2^M buckets.
88 * E.g. Let M (FIO_IO_U_PLAT_BITS) be 6
89 * Error bound is 1/2^(6+1) = 0.0078125 (< 1%)
91 * Group MSB #discarded range of #buckets
93 * ----------------------------------------------------------------
99 * ... ... ... [...,...] ...
100 * 18 23 17 [8838608,+inf]** 64
102 * * Special cases: when n < (M-1) or when n == (M-1), in both cases,
103 * the value cannot be rounded off. Use all bits of the sample as
106 * ** If a sample's MSB is greater than 23, it will be counted as 23.
109 #define FIO_IO_U_PLAT_BITS 6
110 #define FIO_IO_U_PLAT_VAL (1 << FIO_IO_U_PLAT_BITS)
111 #define FIO_IO_U_PLAT_GROUP_NR 19
112 #define FIO_IO_U_PLAT_NR (FIO_IO_U_PLAT_GROUP_NR * FIO_IO_U_PLAT_VAL)
113 #define FIO_IO_U_LIST_MAX_LEN 20 /* The size of the default and user-specified
114 list of percentiles */
117 * Trim cycle count measurements
119 #define MAX_NR_BLOCK_INFOS 8192
120 #define BLOCK_INFO_STATE_SHIFT 29
121 #define BLOCK_INFO_TRIMS(block_info) \
122 ((block_info) & ((1 << BLOCK_INFO_STATE_SHIFT) - 1))
123 #define BLOCK_INFO_STATE(block_info) \
124 ((block_info) >> BLOCK_INFO_STATE_SHIFT)
125 #define BLOCK_INFO(state, trim_cycles) \
126 ((trim_cycles) | ((state) << BLOCK_INFO_STATE_SHIFT))
127 #define BLOCK_INFO_SET_STATE(block_info, state) \
128 BLOCK_INFO(state, BLOCK_INFO_TRIMS(block_info))
129 enum block_info_state {
133 BLOCK_STATE_TRIM_FAILURE,
134 BLOCK_STATE_WRITE_FAILURE,
138 #define MAX_PATTERN_SIZE 512
139 #define FIO_JOBNAME_SIZE 128
140 #define FIO_JOBDESC_SIZE 256
141 #define FIO_VERROR_SIZE 128
144 char name[FIO_JOBNAME_SIZE];
145 char verror[FIO_VERROR_SIZE];
147 uint32_t thread_number;
150 char description[FIO_JOBDESC_SIZE];
152 uint32_t unified_rw_rep;
155 * bandwidth and latency stats
157 struct io_stat clat_stat[DDIR_RWDIR_CNT]; /* completion latency */
158 struct io_stat slat_stat[DDIR_RWDIR_CNT]; /* submission latency */
159 struct io_stat lat_stat[DDIR_RWDIR_CNT]; /* total latency */
160 struct io_stat bw_stat[DDIR_RWDIR_CNT]; /* bandwidth stats */
161 struct io_stat iops_stat[DDIR_RWDIR_CNT]; /* IOPS stats */
164 * fio system usage accounting
172 * IO depth and latency stats
174 uint64_t clat_percentiles;
175 uint64_t percentile_precision;
176 fio_fp64_t percentile_list[FIO_IO_U_LIST_MAX_LEN];
178 uint32_t io_u_map[FIO_IO_U_MAP_NR];
179 uint32_t io_u_submit[FIO_IO_U_MAP_NR];
180 uint32_t io_u_complete[FIO_IO_U_MAP_NR];
181 uint32_t io_u_lat_u[FIO_IO_U_LAT_U_NR];
182 uint32_t io_u_lat_m[FIO_IO_U_LAT_M_NR];
183 uint32_t io_u_plat[DDIR_RWDIR_CNT][FIO_IO_U_PLAT_NR];
186 uint64_t total_io_u[DDIR_RWDIR_CNT];
187 uint64_t short_io_u[DDIR_RWDIR_CNT];
188 uint64_t drop_io_u[DDIR_RWDIR_CNT];
189 uint64_t total_submit;
190 uint64_t total_complete;
192 uint64_t io_bytes[DDIR_RWDIR_CNT];
193 uint64_t runtime[DDIR_RWDIR_CNT];
194 uint64_t total_run_time;
197 * IO Error related stats
200 uint16_t continue_on_error;
203 uint64_t total_err_count;
204 uint32_t first_error;
206 uint64_t nr_block_infos;
207 uint32_t block_infos[MAX_NR_BLOCK_INFOS];
212 uint32_t latency_depth;
213 uint64_t latency_target;
214 fio_fp64_t latency_percentile;
215 uint64_t latency_window;
216 } __attribute__((packed));
223 uint32_t nr_setting_up;
227 uint32_t m_rate[DDIR_RWDIR_CNT], t_rate[DDIR_RWDIR_CNT];
228 uint32_t m_iops[DDIR_RWDIR_CNT], t_iops[DDIR_RWDIR_CNT];
229 uint32_t rate[DDIR_RWDIR_CNT];
230 uint32_t iops[DDIR_RWDIR_CNT];
231 uint64_t elapsed_sec;
237 * Network 'copy' of run_str[]
241 } __attribute__((packed));
243 struct io_u_plat_entry {
244 struct flist_head list;
245 unsigned int io_u_plat[FIO_IO_U_PLAT_NR];
248 extern struct fio_mutex *stat_mutex;
250 extern struct jobs_eta *get_jobs_eta(bool force, size_t *size);
252 extern void stat_init(void);
253 extern void stat_exit(void);
255 extern struct json_object * show_thread_status(struct thread_stat *ts, struct group_run_stats *rs, struct flist_head *, struct buf_output *);
256 extern void show_group_stats(struct group_run_stats *rs, struct buf_output *);
257 extern bool calc_thread_status(struct jobs_eta *je, int force);
258 extern void display_thread_status(struct jobs_eta *je);
259 extern void show_run_stats(void);
260 extern void __show_run_stats(void);
261 extern void __show_running_run_stats(void);
262 extern void show_running_run_stats(void);
263 extern void check_for_running_stats(void);
264 extern void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src, bool first);
265 extern void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src);
266 extern void init_thread_stat(struct thread_stat *ts);
267 extern void init_group_run_stat(struct group_run_stats *gs);
268 extern void eta_to_str(char *str, unsigned long eta_sec);
269 extern bool calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max, double *mean, double *dev);
270 extern unsigned int calc_clat_percentiles(unsigned int *io_u_plat, unsigned long nr, fio_fp64_t *plist, unsigned int **output, unsigned int *maxv, unsigned int *minv);
271 extern void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat);
272 extern void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat);
273 extern void stat_calc_dist(unsigned int *map, unsigned long total, double *io_u_dist);
274 extern void reset_io_stats(struct thread_data *);
275 extern void update_rusage_stat(struct thread_data *);
276 extern void clear_rusage_stat(struct thread_data *);
278 extern void add_lat_sample(struct thread_data *, enum fio_ddir, unsigned long,
279 unsigned int, uint64_t);
280 extern void add_clat_sample(struct thread_data *, enum fio_ddir, unsigned long,
281 unsigned int, uint64_t);
282 extern void add_slat_sample(struct thread_data *, enum fio_ddir, unsigned long,
283 unsigned int, uint64_t);
284 extern void add_agg_sample(unsigned long, enum fio_ddir, unsigned int);
285 extern void add_iops_sample(struct thread_data *, struct io_u *,
287 extern void add_bw_sample(struct thread_data *, struct io_u *,
288 unsigned int, unsigned long);
289 extern int calc_log_samples(void);
291 extern struct io_log *agg_io_log[DDIR_RWDIR_CNT];
292 extern int write_bw_log;
294 static inline bool usec_to_msec(unsigned long *min, unsigned long *max,
295 double *mean, double *dev)
297 if (*min > 1000 && *max > 1000 && *mean > 1000.0 && *dev > 1000.0) {
308 * Worst level condensing would be 1:5, so allow enough room for that
310 #define __THREAD_RUNSTR_SZ(nr) ((nr) * 5)
311 #define THREAD_RUNSTR_SZ __THREAD_RUNSTR_SZ(thread_number)
313 uint32_t *io_u_block_info(struct thread_data *td, struct io_u *io_u);