6 struct group_run_stats {
7 uint64_t max_run[DDIR_RWDIR_CNT], min_run[DDIR_RWDIR_CNT];
8 uint64_t max_bw[DDIR_RWDIR_CNT], min_bw[DDIR_RWDIR_CNT];
9 uint64_t io_kb[DDIR_RWDIR_CNT];
10 uint64_t agg[DDIR_RWDIR_CNT];
14 uint32_t unified_rw_rep;
15 } __attribute__((packed));
18 * How many depth levels to log
20 #define FIO_IO_U_MAP_NR 7
21 #define FIO_IO_U_LAT_U_NR 10
22 #define FIO_IO_U_LAT_M_NR 12
25 * Aggregate clat samples to report percentile(s) of them.
29 * FIO_IO_U_PLAT_BITS determines the maximum statistical error on the
30 * value of resulting percentiles. The error will be approximately
31 * 1/2^(FIO_IO_U_PLAT_BITS+1) of the value.
33 * FIO_IO_U_PLAT_GROUP_NR and FIO_IO_U_PLAT_BITS determine the maximum
34 * range being tracked for latency samples. The maximum value tracked
35 * accurately will be 2^(GROUP_NR + PLAT_BITS -1) microseconds.
37 * FIO_IO_U_PLAT_GROUP_NR and FIO_IO_U_PLAT_BITS determine the memory
38 * requirement of storing those aggregate counts. The memory used will
39 * be (FIO_IO_U_PLAT_GROUP_NR * 2^FIO_IO_U_PLAT_BITS) * sizeof(int)
42 * FIO_IO_U_PLAT_NR is the total number of buckets.
46 * Suppose the clat varies from 0 to 999 (usec), the straightforward
47 * method is to keep an array of (999 + 1) buckets, in which a counter
48 * keeps the count of samples which fall in the bucket, e.g.,
49 * {[0],[1],...,[999]}. However this consumes a huge amount of space,
50 * and can be avoided if an approximation is acceptable.
52 * One such method is to let the range of the bucket to be greater
53 * than one. This method has low accuracy when the value is small. For
54 * example, let the buckets be {[0,99],[100,199],...,[900,999]}, and
55 * the represented value of each bucket be the mean of the range. Then
56 * a value 0 has an round-off error of 49.5. To improve on this, we
57 * use buckets with non-uniform ranges, while bounding the error of
58 * each bucket within a ratio of the sample value. A simple example
59 * would be when error_bound = 0.005, buckets are {
60 * {[0],[1],...,[99]}, {[100,101],[102,103],...,[198,199]},..,
61 * {[900,909],[910,919]...} }. The total range is partitioned into
62 * groups with different ranges, then buckets with uniform ranges. An
63 * upper bound of the error is (range_of_bucket/2)/value_of_bucket
65 * For better efficiency, we implement this using base two. We group
66 * samples by their Most Significant Bit (MSB), extract the next M bit
67 * of them as an index within the group, and discard the rest of the
70 * E.g., assume a sample 'x' whose MSB is bit n (starting from bit 0),
71 * and use M bit for indexing
73 * | n | M bits | bit (n-M-1) ... bit 0 |
75 * Because x is at least 2^n, and bit 0 to bit (n-M-1) is at most
76 * (2^(n-M) - 1), discarding bit 0 to (n-M-1) makes the round-off
80 * e <= --------- <= ------- = ---
83 * Furthermore, we use "mean" of the range to represent the bucket,
84 * the error e can be lowered by half to 1 / 2^(M+1). By using M bits
85 * as the index, each group must contains 2^M buckets.
87 * E.g. Let M (FIO_IO_U_PLAT_BITS) be 6
88 * Error bound is 1/2^(6+1) = 0.0078125 (< 1%)
90 * Group MSB #discarded range of #buckets
92 * ----------------------------------------------------------------
98 * ... ... ... [...,...] ...
99 * 18 23 17 [8838608,+inf]** 64
101 * * Special cases: when n < (M-1) or when n == (M-1), in both cases,
102 * the value cannot be rounded off. Use all bits of the sample as
105 * ** If a sample's MSB is greater than 23, it will be counted as 23.
108 #define FIO_IO_U_PLAT_BITS 6
109 #define FIO_IO_U_PLAT_VAL (1 << FIO_IO_U_PLAT_BITS)
110 #define FIO_IO_U_PLAT_GROUP_NR 19
111 #define FIO_IO_U_PLAT_NR (FIO_IO_U_PLAT_GROUP_NR * FIO_IO_U_PLAT_VAL)
112 #define FIO_IO_U_LIST_MAX_LEN 20 /* The size of the default and user-specified
113 list of percentiles */
115 #define MAX_PATTERN_SIZE 512
116 #define FIO_JOBNAME_SIZE 128
117 #define FIO_JOBDESC_SIZE 256
118 #define FIO_VERROR_SIZE 128
121 char name[FIO_JOBNAME_SIZE];
122 char verror[FIO_VERROR_SIZE];
124 uint32_t thread_number;
127 char description[FIO_JOBDESC_SIZE];
129 uint32_t unified_rw_rep;
132 * bandwidth and latency stats
134 struct io_stat clat_stat[DDIR_RWDIR_CNT]; /* completion latency */
135 struct io_stat slat_stat[DDIR_RWDIR_CNT]; /* submission latency */
136 struct io_stat lat_stat[DDIR_RWDIR_CNT]; /* total latency */
137 struct io_stat bw_stat[DDIR_RWDIR_CNT]; /* bandwidth stats */
138 struct io_stat iops_stat[DDIR_RWDIR_CNT]; /* IOPS stats */
141 * fio system usage accounting
149 * IO depth and latency stats
151 uint64_t clat_percentiles;
152 uint64_t percentile_precision;
153 fio_fp64_t percentile_list[FIO_IO_U_LIST_MAX_LEN];
155 uint32_t io_u_map[FIO_IO_U_MAP_NR];
156 uint32_t io_u_submit[FIO_IO_U_MAP_NR];
157 uint32_t io_u_complete[FIO_IO_U_MAP_NR];
158 uint32_t io_u_lat_u[FIO_IO_U_LAT_U_NR];
159 uint32_t io_u_lat_m[FIO_IO_U_LAT_M_NR];
160 uint32_t io_u_plat[DDIR_RWDIR_CNT][FIO_IO_U_PLAT_NR];
163 uint64_t total_io_u[3];
164 uint64_t short_io_u[3];
165 uint64_t drop_io_u[3];
166 uint64_t total_submit;
167 uint64_t total_complete;
169 uint64_t io_bytes[DDIR_RWDIR_CNT];
170 uint64_t runtime[DDIR_RWDIR_CNT];
171 uint64_t total_run_time;
174 * IO Error related stats
177 uint16_t continue_on_error;
180 uint64_t total_err_count;
181 uint32_t first_error;
186 uint32_t latency_depth;
187 uint64_t latency_target;
188 fio_fp64_t latency_percentile;
189 uint64_t latency_window;
190 } __attribute__((packed));
197 uint32_t nr_setting_up;
201 uint32_t m_rate[DDIR_RWDIR_CNT], t_rate[DDIR_RWDIR_CNT];
202 uint32_t m_iops[DDIR_RWDIR_CNT], t_iops[DDIR_RWDIR_CNT];
203 uint32_t rate[DDIR_RWDIR_CNT];
204 uint32_t iops[DDIR_RWDIR_CNT];
205 uint64_t elapsed_sec;
211 * Network 'copy' of run_str[]
215 } __attribute__((packed));
217 extern struct fio_mutex *stat_mutex;
219 extern struct jobs_eta *get_jobs_eta(int force, size_t *size);
221 extern void stat_init(void);
222 extern void stat_exit(void);
224 extern struct json_object * show_thread_status(struct thread_stat *ts, struct group_run_stats *rs);
225 extern void show_group_stats(struct group_run_stats *rs);
226 extern int calc_thread_status(struct jobs_eta *je, int force);
227 extern void display_thread_status(struct jobs_eta *je);
228 extern void show_run_stats(void);
229 extern void __show_run_stats(void);
230 extern void __show_running_run_stats(void);
231 extern void show_running_run_stats(void);
232 extern void check_for_running_stats(void);
233 extern void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src, int nr);
234 extern void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src);
235 extern void init_thread_stat(struct thread_stat *ts);
236 extern void init_group_run_stat(struct group_run_stats *gs);
237 extern void eta_to_str(char *str, unsigned long eta_sec);
238 extern int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max, double *mean, double *dev);
239 extern unsigned int calc_clat_percentiles(unsigned int *io_u_plat, unsigned long nr, fio_fp64_t *plist, unsigned int **output, unsigned int *maxv, unsigned int *minv);
240 extern void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat);
241 extern void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat);
242 extern void stat_calc_dist(unsigned int *map, unsigned long total, double *io_u_dist);
243 extern void reset_io_stats(struct thread_data *);
245 static inline int usec_to_msec(unsigned long *min, unsigned long *max,
246 double *mean, double *dev)
248 if (*min > 1000 && *max > 1000 && *mean > 1000.0 && *dev > 1000.0) {
259 * Worst level condensing would be 1:5, so allow enough room for that
261 #define __THREAD_RUNSTR_SZ(nr) ((nr) * 5)
262 #define THREAD_RUNSTR_SZ __THREAD_RUNSTR_SZ(thread_number)