5 #include "lib/output_buffer.h"
7 struct group_run_stats {
8 uint64_t max_run[DDIR_RWDIR_CNT], min_run[DDIR_RWDIR_CNT];
9 uint64_t max_bw[DDIR_RWDIR_CNT], min_bw[DDIR_RWDIR_CNT];
10 uint64_t iobytes[DDIR_RWDIR_CNT];
11 uint64_t agg[DDIR_RWDIR_CNT];
16 uint32_t unified_rw_rep;
17 } __attribute__((packed));
20 * How many depth levels to log
22 #define FIO_IO_U_MAP_NR 7
23 #define FIO_IO_U_LAT_N_NR 10
24 #define FIO_IO_U_LAT_U_NR 10
25 #define FIO_IO_U_LAT_M_NR 12
28 * Constants for clat percentiles
30 #define FIO_IO_U_PLAT_BITS 6
31 #define FIO_IO_U_PLAT_VAL (1 << FIO_IO_U_PLAT_BITS)
32 #define FIO_IO_U_PLAT_GROUP_NR 29
33 #define FIO_IO_U_PLAT_NR (FIO_IO_U_PLAT_GROUP_NR * FIO_IO_U_PLAT_VAL)
34 #define FIO_IO_U_LIST_MAX_LEN 20 /* The size of the default and user-specified
35 list of percentiles */
38 * Aggregate clat samples to report percentile(s) of them.
42 * FIO_IO_U_PLAT_BITS determines the maximum statistical error on the
43 * value of resulting percentiles. The error will be approximately
44 * 1/2^(FIO_IO_U_PLAT_BITS+1) of the value.
46 * FIO_IO_U_PLAT_GROUP_NR and FIO_IO_U_PLAT_BITS determine the maximum
47 * range being tracked for latency samples. The maximum value tracked
48 * accurately will be 2^(GROUP_NR + PLAT_BITS - 1) nanoseconds.
50 * FIO_IO_U_PLAT_GROUP_NR and FIO_IO_U_PLAT_BITS determine the memory
51 * requirement of storing those aggregate counts. The memory used will
52 * be (FIO_IO_U_PLAT_GROUP_NR * 2^FIO_IO_U_PLAT_BITS) * sizeof(int)
55 * FIO_IO_U_PLAT_NR is the total number of buckets.
59 * Suppose the clat varies from 0 to 999 (usec), the straightforward
60 * method is to keep an array of (999 + 1) buckets, in which a counter
61 * keeps the count of samples which fall in the bucket, e.g.,
62 * {[0],[1],...,[999]}. However this consumes a huge amount of space,
63 * and can be avoided if an approximation is acceptable.
65 * One such method is to let the range of the bucket to be greater
66 * than one. This method has low accuracy when the value is small. For
67 * example, let the buckets be {[0,99],[100,199],...,[900,999]}, and
68 * the represented value of each bucket be the mean of the range. Then
69 * a value 0 has an round-off error of 49.5. To improve on this, we
70 * use buckets with non-uniform ranges, while bounding the error of
71 * each bucket within a ratio of the sample value. A simple example
72 * would be when error_bound = 0.005, buckets are {
73 * {[0],[1],...,[99]}, {[100,101],[102,103],...,[198,199]},..,
74 * {[900,909],[910,919]...} }. The total range is partitioned into
75 * groups with different ranges, then buckets with uniform ranges. An
76 * upper bound of the error is (range_of_bucket/2)/value_of_bucket
78 * For better efficiency, we implement this using base two. We group
79 * samples by their Most Significant Bit (MSB), extract the next M bit
80 * of them as an index within the group, and discard the rest of the
83 * E.g., assume a sample 'x' whose MSB is bit n (starting from bit 0),
84 * and use M bit for indexing
86 * | n | M bits | bit (n-M-1) ... bit 0 |
88 * Because x is at least 2^n, and bit 0 to bit (n-M-1) is at most
89 * (2^(n-M) - 1), discarding bit 0 to (n-M-1) makes the round-off
93 * e <= --------- <= ------- = ---
96 * Furthermore, we use "mean" of the range to represent the bucket,
97 * the error e can be lowered by half to 1 / 2^(M+1). By using M bits
98 * as the index, each group must contains 2^M buckets.
100 * E.g. Let M (FIO_IO_U_PLAT_BITS) be 6
101 * Error bound is 1/2^(6+1) = 0.0078125 (< 1%)
103 * Group MSB #discarded range of #buckets
105 * ----------------------------------------------------------------
110 * 4 9 3 [512,1023] 64
111 * ... ... ... [...,...] ...
112 * 28 33 27 [8589934592,+inf]** 64
114 * * Special cases: when n < (M-1) or when n == (M-1), in both cases,
115 * the value cannot be rounded off. Use all bits of the sample as
118 * ** If a sample's MSB is greater than 33, it will be counted as 33.
122 * Trim cycle count measurements
124 #define MAX_NR_BLOCK_INFOS 8192
125 #define BLOCK_INFO_STATE_SHIFT 29
126 #define BLOCK_INFO_TRIMS(block_info) \
127 ((block_info) & ((1 << BLOCK_INFO_STATE_SHIFT) - 1))
128 #define BLOCK_INFO_STATE(block_info) \
129 ((block_info) >> BLOCK_INFO_STATE_SHIFT)
130 #define BLOCK_INFO(state, trim_cycles) \
131 ((trim_cycles) | ((unsigned int) (state) << BLOCK_INFO_STATE_SHIFT))
132 #define BLOCK_INFO_SET_STATE(block_info, state) \
133 BLOCK_INFO(state, BLOCK_INFO_TRIMS(block_info))
134 enum block_info_state {
138 BLOCK_STATE_TRIM_FAILURE,
139 BLOCK_STATE_WRITE_FAILURE,
143 #define MAX_PATTERN_SIZE 512
144 #define FIO_JOBNAME_SIZE 128
145 #define FIO_JOBDESC_SIZE 256
146 #define FIO_VERROR_SIZE 128
149 char name[FIO_JOBNAME_SIZE];
150 char verror[FIO_VERROR_SIZE];
152 uint32_t thread_number;
155 char description[FIO_JOBDESC_SIZE];
157 uint32_t unified_rw_rep;
160 * bandwidth and latency stats
162 struct io_stat sync_stat __attribute__((aligned(8)));/* fsync etc stats */
163 struct io_stat clat_stat[DDIR_RWDIR_CNT]; /* completion latency */
164 struct io_stat slat_stat[DDIR_RWDIR_CNT]; /* submission latency */
165 struct io_stat lat_stat[DDIR_RWDIR_CNT]; /* total latency */
166 struct io_stat bw_stat[DDIR_RWDIR_CNT]; /* bandwidth stats */
167 struct io_stat iops_stat[DDIR_RWDIR_CNT]; /* IOPS stats */
170 * fio system usage accounting
178 * IO depth and latency stats
180 uint32_t clat_percentiles;
181 uint32_t lat_percentiles;
182 uint64_t percentile_precision;
183 fio_fp64_t percentile_list[FIO_IO_U_LIST_MAX_LEN];
185 uint64_t io_u_map[FIO_IO_U_MAP_NR];
186 uint64_t io_u_submit[FIO_IO_U_MAP_NR];
187 uint64_t io_u_complete[FIO_IO_U_MAP_NR];
188 uint64_t io_u_lat_n[FIO_IO_U_LAT_N_NR];
189 uint64_t io_u_lat_u[FIO_IO_U_LAT_U_NR];
190 uint64_t io_u_lat_m[FIO_IO_U_LAT_M_NR];
191 uint64_t io_u_plat[DDIR_RWDIR_CNT][FIO_IO_U_PLAT_NR];
192 uint64_t io_u_sync_plat[FIO_IO_U_PLAT_NR];
194 uint64_t total_io_u[DDIR_RWDIR_SYNC_CNT];
195 uint64_t short_io_u[DDIR_RWDIR_CNT];
196 uint64_t drop_io_u[DDIR_RWDIR_CNT];
197 uint64_t total_submit;
198 uint64_t total_complete;
200 uint64_t io_bytes[DDIR_RWDIR_CNT];
201 uint64_t runtime[DDIR_RWDIR_CNT];
202 uint64_t total_run_time;
205 * IO Error related stats
208 uint16_t continue_on_error;
211 uint32_t first_error;
212 uint64_t total_err_count;
215 uint64_t nr_zone_resets;
217 uint64_t nr_block_infos;
218 uint32_t block_infos[MAX_NR_BLOCK_INFOS];
223 uint32_t latency_depth;
225 uint64_t latency_target;
226 fio_fp64_t latency_percentile;
227 uint64_t latency_window;
237 fio_fp64_t ss_deviation;
238 fio_fp64_t ss_criterion;
241 uint64_t *ss_iops_data;
246 uint64_t *ss_bw_data;
252 } __attribute__((packed));
255 uint32_t nr_running; \
258 uint32_t nr_pending; \
259 uint32_t nr_setting_up; \
261 uint64_t m_rate[DDIR_RWDIR_CNT]; \
262 uint64_t t_rate[DDIR_RWDIR_CNT]; \
263 uint64_t rate[DDIR_RWDIR_CNT]; \
264 uint32_t m_iops[DDIR_RWDIR_CNT]; \
265 uint32_t t_iops[DDIR_RWDIR_CNT]; \
266 uint32_t iops[DDIR_RWDIR_CNT]; \
268 uint64_t elapsed_sec; \
271 uint32_t unit_base; \
275 uint32_t files_open; \
278 * Network 'copy' of run_str[] \
280 uint32_t nr_threads; \
285 struct jobs_eta JOBS_ETA;
286 struct jobs_eta_packed JOBS_ETA __attribute__((packed));
288 struct io_u_plat_entry {
289 struct flist_head list;
290 uint64_t io_u_plat[FIO_IO_U_PLAT_NR];
293 extern struct fio_sem *stat_sem;
295 extern struct jobs_eta *get_jobs_eta(bool force, size_t *size);
297 extern void stat_init(void);
298 extern void stat_exit(void);
300 extern struct json_object * show_thread_status(struct thread_stat *ts, struct group_run_stats *rs, struct flist_head *, struct buf_output *);
301 extern void show_group_stats(struct group_run_stats *rs, struct buf_output *);
302 extern bool calc_thread_status(struct jobs_eta *je, int force);
303 extern void display_thread_status(struct jobs_eta *je);
304 extern void __show_run_stats(void);
305 extern void __show_running_run_stats(void);
306 extern void show_running_run_stats(void);
307 extern void check_for_running_stats(void);
308 extern void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src, bool first);
309 extern void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src);
310 extern void init_thread_stat(struct thread_stat *ts);
311 extern void init_group_run_stat(struct group_run_stats *gs);
312 extern void eta_to_str(char *str, unsigned long eta_sec);
313 extern bool calc_lat(struct io_stat *is, unsigned long long *min, unsigned long long *max, double *mean, double *dev);
314 extern unsigned int calc_clat_percentiles(uint64_t *io_u_plat, unsigned long long nr, fio_fp64_t *plist, unsigned long long **output, unsigned long long *maxv, unsigned long long *minv);
315 extern void stat_calc_lat_n(struct thread_stat *ts, double *io_u_lat);
316 extern void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat);
317 extern void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat);
318 extern void stat_calc_dist(uint64_t *map, unsigned long total, double *io_u_dist);
319 extern void reset_io_stats(struct thread_data *);
320 extern void update_rusage_stat(struct thread_data *);
321 extern void clear_rusage_stat(struct thread_data *);
323 extern void add_lat_sample(struct thread_data *, enum fio_ddir, unsigned long long,
324 unsigned long long, uint64_t);
325 extern void add_clat_sample(struct thread_data *, enum fio_ddir, unsigned long long,
326 unsigned long long, uint64_t);
327 extern void add_slat_sample(struct thread_data *, enum fio_ddir, unsigned long,
328 unsigned long long, uint64_t);
329 extern void add_agg_sample(union io_sample_data, enum fio_ddir, unsigned long long);
330 extern void add_iops_sample(struct thread_data *, struct io_u *,
332 extern void add_bw_sample(struct thread_data *, struct io_u *,
333 unsigned int, unsigned long long);
334 extern void add_sync_clat_sample(struct thread_stat *ts,
335 unsigned long long nsec);
336 extern int calc_log_samples(void);
338 extern struct io_log *agg_io_log[DDIR_RWDIR_CNT];
339 extern bool write_bw_log;
341 static inline bool nsec_to_usec(unsigned long long *min,
342 unsigned long long *max, double *mean,
345 if (*min > 2000 && *max > 99999 && *dev > 1000.0) {
356 static inline bool nsec_to_msec(unsigned long long *min,
357 unsigned long long *max, double *mean,
360 if (*min > 2000000 && *max > 99999999ULL && *dev > 1000000.0) {
372 * Worst level condensing would be 1:5, so allow enough room for that
374 #define __THREAD_RUNSTR_SZ(nr) ((nr) * 5)
375 #define THREAD_RUNSTR_SZ __THREAD_RUNSTR_SZ(thread_number)
377 uint32_t *io_u_block_info(struct thread_data *td, struct io_u *io_u);