5 #include "lib/output_buffer.h"
9 struct group_run_stats {
10 uint64_t max_run[DDIR_RWDIR_CNT], min_run[DDIR_RWDIR_CNT];
11 uint64_t max_bw[DDIR_RWDIR_CNT], min_bw[DDIR_RWDIR_CNT];
12 uint64_t iobytes[DDIR_RWDIR_CNT];
13 uint64_t agg[DDIR_RWDIR_CNT];
18 uint32_t unified_rw_rep;
19 } __attribute__((packed));
22 * How many depth levels to log
24 #define FIO_IO_U_MAP_NR 7
25 #define FIO_IO_U_LAT_N_NR 10
26 #define FIO_IO_U_LAT_U_NR 10
27 #define FIO_IO_U_LAT_M_NR 12
30 * Constants for clat percentiles
32 #define FIO_IO_U_PLAT_BITS 6
33 #define FIO_IO_U_PLAT_VAL (1 << FIO_IO_U_PLAT_BITS)
34 #define FIO_IO_U_PLAT_GROUP_NR 29
35 #define FIO_IO_U_PLAT_NR (FIO_IO_U_PLAT_GROUP_NR * FIO_IO_U_PLAT_VAL)
36 #define FIO_IO_U_LIST_MAX_LEN 20 /* The size of the default and user-specified
37 list of percentiles */
40 * Aggregate latency samples for reporting percentile(s).
44 * FIO_IO_U_PLAT_BITS determines the maximum statistical error on the
45 * value of resulting percentiles. The error will be approximately
46 * 1/2^(FIO_IO_U_PLAT_BITS+1) of the value.
48 * FIO_IO_U_PLAT_GROUP_NR and FIO_IO_U_PLAT_BITS determine the maximum
49 * range being tracked for latency samples. The maximum value tracked
50 * accurately will be 2^(GROUP_NR + PLAT_BITS - 1) nanoseconds.
52 * FIO_IO_U_PLAT_GROUP_NR and FIO_IO_U_PLAT_BITS determine the memory
53 * requirement of storing those aggregate counts. The memory used will
54 * be (FIO_IO_U_PLAT_GROUP_NR * 2^FIO_IO_U_PLAT_BITS) * sizeof(int)
57 * FIO_IO_U_PLAT_NR is the total number of buckets.
61 * Suppose the lat varies from 0 to 999 (usec), the straightforward
62 * method is to keep an array of (999 + 1) buckets, in which a counter
63 * keeps the count of samples which fall in the bucket, e.g.,
64 * {[0],[1],...,[999]}. However this consumes a huge amount of space,
65 * and can be avoided if an approximation is acceptable.
67 * One such method is to let the range of the bucket to be greater
68 * than one. This method has low accuracy when the value is small. For
69 * example, let the buckets be {[0,99],[100,199],...,[900,999]}, and
70 * the represented value of each bucket be the mean of the range. Then
71 * a value 0 has an round-off error of 49.5. To improve on this, we
72 * use buckets with non-uniform ranges, while bounding the error of
73 * each bucket within a ratio of the sample value. A simple example
74 * would be when error_bound = 0.005, buckets are {
75 * {[0],[1],...,[99]}, {[100,101],[102,103],...,[198,199]},..,
76 * {[900,909],[910,919]...} }. The total range is partitioned into
77 * groups with different ranges, then buckets with uniform ranges. An
78 * upper bound of the error is (range_of_bucket/2)/value_of_bucket
80 * For better efficiency, we implement this using base two. We group
81 * samples by their Most Significant Bit (MSB), extract the next M bit
82 * of them as an index within the group, and discard the rest of the
85 * E.g., assume a sample 'x' whose MSB is bit n (starting from bit 0),
86 * and use M bit for indexing
88 * | n | M bits | bit (n-M-1) ... bit 0 |
90 * Because x is at least 2^n, and bit 0 to bit (n-M-1) is at most
91 * (2^(n-M) - 1), discarding bit 0 to (n-M-1) makes the round-off
95 * e <= --------- <= ------- = ---
98 * Furthermore, we use "mean" of the range to represent the bucket,
99 * the error e can be lowered by half to 1 / 2^(M+1). By using M bits
100 * as the index, each group must contains 2^M buckets.
102 * E.g. Let M (FIO_IO_U_PLAT_BITS) be 6
103 * Error bound is 1/2^(6+1) = 0.0078125 (< 1%)
105 * Group MSB #discarded range of #buckets
107 * ----------------------------------------------------------------
112 * 4 9 3 [512,1023] 64
113 * ... ... ... [...,...] ...
114 * 28 33 27 [8589934592,+inf]** 64
116 * * Special cases: when n < (M-1) or when n == (M-1), in both cases,
117 * the value cannot be rounded off. Use all bits of the sample as
120 * ** If a sample's MSB is greater than 33, it will be counted as 33.
124 * Trim cycle count measurements
126 #define MAX_NR_BLOCK_INFOS 8192
127 #define BLOCK_INFO_STATE_SHIFT 29
128 #define BLOCK_INFO_TRIMS(block_info) \
129 ((block_info) & ((1 << BLOCK_INFO_STATE_SHIFT) - 1))
130 #define BLOCK_INFO_STATE(block_info) \
131 ((block_info) >> BLOCK_INFO_STATE_SHIFT)
132 #define BLOCK_INFO(state, trim_cycles) \
133 ((trim_cycles) | ((unsigned int) (state) << BLOCK_INFO_STATE_SHIFT))
134 #define BLOCK_INFO_SET_STATE(block_info, state) \
135 BLOCK_INFO(state, BLOCK_INFO_TRIMS(block_info))
136 enum block_info_state {
140 BLOCK_STATE_TRIM_FAILURE,
141 BLOCK_STATE_WRITE_FAILURE,
145 #define MAX_PATTERN_SIZE 512
146 #define FIO_JOBNAME_SIZE 128
147 #define FIO_JOBDESC_SIZE 256
148 #define FIO_VERROR_SIZE 128
149 #define UNIFIED_SPLIT 0
150 #define UNIFIED_MIXED 1
151 #define UNIFIED_BOTH 2
161 struct clat_prio_stat {
162 uint64_t io_u_plat[FIO_IO_U_PLAT_NR];
163 struct io_stat clat_stat;
168 char name[FIO_JOBNAME_SIZE];
169 char verror[FIO_VERROR_SIZE];
171 uint32_t thread_number;
174 char description[FIO_JOBDESC_SIZE];
176 uint32_t unified_rw_rep;
179 * bandwidth and latency stats
181 struct io_stat sync_stat __attribute__((aligned(8)));/* fsync etc stats */
182 struct io_stat clat_stat[DDIR_RWDIR_CNT]; /* completion latency */
183 struct io_stat slat_stat[DDIR_RWDIR_CNT]; /* submission latency */
184 struct io_stat lat_stat[DDIR_RWDIR_CNT]; /* total latency */
185 struct io_stat bw_stat[DDIR_RWDIR_CNT]; /* bandwidth stats */
186 struct io_stat iops_stat[DDIR_RWDIR_CNT]; /* IOPS stats */
189 * fio system usage accounting
197 * IO depth and latency stats
199 uint32_t clat_percentiles;
200 uint32_t lat_percentiles;
201 uint32_t slat_percentiles;
203 uint64_t percentile_precision;
204 fio_fp64_t percentile_list[FIO_IO_U_LIST_MAX_LEN];
206 uint64_t io_u_map[FIO_IO_U_MAP_NR];
207 uint64_t io_u_submit[FIO_IO_U_MAP_NR];
208 uint64_t io_u_complete[FIO_IO_U_MAP_NR];
209 uint64_t io_u_lat_n[FIO_IO_U_LAT_N_NR];
210 uint64_t io_u_lat_u[FIO_IO_U_LAT_U_NR];
211 uint64_t io_u_lat_m[FIO_IO_U_LAT_M_NR];
212 uint64_t io_u_plat[FIO_LAT_CNT][DDIR_RWDIR_CNT][FIO_IO_U_PLAT_NR];
213 uint64_t io_u_sync_plat[FIO_IO_U_PLAT_NR];
215 uint64_t total_io_u[DDIR_RWDIR_SYNC_CNT];
216 uint64_t short_io_u[DDIR_RWDIR_CNT];
217 uint64_t drop_io_u[DDIR_RWDIR_CNT];
218 uint64_t total_submit;
219 uint64_t total_complete;
221 uint64_t io_bytes[DDIR_RWDIR_CNT];
222 uint64_t runtime[DDIR_RWDIR_CNT];
223 uint64_t total_run_time;
226 * IO Error related stats
229 uint16_t continue_on_error;
232 uint32_t first_error;
233 uint64_t total_err_count;
236 uint64_t nr_zone_resets;
238 uint64_t nr_block_infos;
239 uint32_t block_infos[MAX_NR_BLOCK_INFOS];
244 uint32_t latency_depth;
246 uint64_t latency_target;
247 fio_fp64_t latency_percentile;
248 uint64_t latency_window;
258 fio_fp64_t ss_deviation;
259 fio_fp64_t ss_criterion;
261 /* A mirror of td->ioprio. */
264 uint64_t io_u_plat_high_prio[DDIR_RWDIR_CNT][FIO_IO_U_PLAT_NR] __attribute__((aligned(8)));;
265 uint64_t io_u_plat_low_prio[DDIR_RWDIR_CNT][FIO_IO_U_PLAT_NR];
266 struct io_stat clat_high_prio_stat[DDIR_RWDIR_CNT] __attribute__((aligned(8)));
267 struct io_stat clat_low_prio_stat[DDIR_RWDIR_CNT];
270 uint64_t *ss_iops_data;
272 * For FIO_NET_CMD_TS, the pointed to data will temporarily
273 * be stored at this offset from the start of the payload.
275 uint64_t ss_iops_data_offset;
280 uint64_t *ss_bw_data;
282 * For FIO_NET_CMD_TS, the pointed to data will temporarily
283 * be stored at this offset from the start of the payload.
285 uint64_t ss_bw_data_offset;
290 struct clat_prio_stat *clat_prio[DDIR_RWDIR_CNT];
292 * For FIO_NET_CMD_TS, the pointed to data will temporarily
293 * be stored at this offset from the start of the payload.
295 uint64_t clat_prio_offset[DDIR_RWDIR_CNT];
298 uint32_t nr_clat_prio[DDIR_RWDIR_CNT];
302 } __attribute__((packed));
305 uint32_t nr_running; \
308 uint32_t nr_pending; \
309 uint32_t nr_setting_up; \
311 uint64_t m_rate[DDIR_RWDIR_CNT]; \
312 uint64_t t_rate[DDIR_RWDIR_CNT]; \
313 uint64_t rate[DDIR_RWDIR_CNT]; \
314 uint32_t m_iops[DDIR_RWDIR_CNT]; \
315 uint32_t t_iops[DDIR_RWDIR_CNT]; \
316 uint32_t iops[DDIR_RWDIR_CNT]; \
318 uint64_t elapsed_sec; \
321 uint32_t unit_base; \
325 uint32_t files_open; \
328 * Network 'copy' of run_str[] \
330 uint32_t nr_threads; \
335 struct jobs_eta JOBS_ETA;
336 struct jobs_eta_packed JOBS_ETA __attribute__((packed));
338 struct io_u_plat_entry {
339 struct flist_head list;
340 uint64_t io_u_plat[FIO_IO_U_PLAT_NR];
343 extern struct fio_sem *stat_sem;
345 extern struct jobs_eta *get_jobs_eta(bool force, size_t *size);
347 extern void stat_init(void);
348 extern void stat_exit(void);
350 extern struct json_object * show_thread_status(struct thread_stat *ts, struct group_run_stats *rs, struct flist_head *, struct buf_output *);
351 extern void show_group_stats(struct group_run_stats *rs, struct buf_output *);
352 extern bool calc_thread_status(struct jobs_eta *je, int force);
353 extern void display_thread_status(struct jobs_eta *je);
354 extern void __show_run_stats(void);
355 extern int __show_running_run_stats(void);
356 extern void show_running_run_stats(void);
357 extern void check_for_running_stats(void);
358 extern void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src);
359 extern void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src);
360 extern void init_thread_stat_min_vals(struct thread_stat *ts);
361 extern void init_thread_stat(struct thread_stat *ts);
362 extern void init_group_run_stat(struct group_run_stats *gs);
363 extern void eta_to_str(char *str, unsigned long eta_sec);
364 extern bool calc_lat(struct io_stat *is, unsigned long long *min, unsigned long long *max, double *mean, double *dev);
365 extern unsigned int calc_clat_percentiles(uint64_t *io_u_plat, unsigned long long nr, fio_fp64_t *plist, unsigned long long **output, unsigned long long *maxv, unsigned long long *minv);
366 extern void stat_calc_lat_n(struct thread_stat *ts, double *io_u_lat);
367 extern void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat);
368 extern void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat);
369 extern void stat_calc_dist(uint64_t *map, unsigned long total, double *io_u_dist);
370 extern void reset_io_stats(struct thread_data *);
371 extern void update_rusage_stat(struct thread_data *);
372 extern void clear_rusage_stat(struct thread_data *);
374 extern void add_lat_sample(struct thread_data *, enum fio_ddir, unsigned long long,
375 unsigned long long, uint64_t, unsigned int, bool);
376 extern void add_clat_sample(struct thread_data *, enum fio_ddir, unsigned long long,
377 unsigned long long, uint64_t, unsigned int, bool);
378 extern void add_slat_sample(struct thread_data *, enum fio_ddir, unsigned long long,
379 unsigned long long, uint64_t, unsigned int);
380 extern void add_agg_sample(union io_sample_data, enum fio_ddir, unsigned long long);
381 extern void add_iops_sample(struct thread_data *, struct io_u *,
383 extern void add_bw_sample(struct thread_data *, struct io_u *,
384 unsigned int, unsigned long long);
385 extern void add_sync_clat_sample(struct thread_stat *ts,
386 unsigned long long nsec);
387 extern int calc_log_samples(void);
388 extern void free_clat_prio_stats(struct thread_stat *);
389 extern int alloc_clat_prio_stat_ddir(struct thread_stat *, enum fio_ddir, int);
391 extern void print_disk_util(struct disk_util_stat *, struct disk_util_agg *, int terse, struct buf_output *);
392 extern void json_array_add_disk_util(struct disk_util_stat *dus,
393 struct disk_util_agg *agg, struct json_array *parent);
395 extern struct io_log *agg_io_log[DDIR_RWDIR_CNT];
396 extern bool write_bw_log;
398 static inline bool nsec_to_usec(unsigned long long *min,
399 unsigned long long *max, double *mean,
402 if (*min > 2000 && *max > 99999 && *dev > 1000.0) {
413 static inline bool nsec_to_msec(unsigned long long *min,
414 unsigned long long *max, double *mean,
417 if (*min > 2000000 && *max > 99999999ULL && *dev > 1000000.0) {
429 * Worst level condensing would be 1:5, so allow enough room for that
431 #define __THREAD_RUNSTR_SZ(nr) ((nr) * 5)
432 #define THREAD_RUNSTR_SZ __THREAD_RUNSTR_SZ(thread_number)
434 uint32_t *io_u_block_info(struct thread_data *td, struct io_u *io_u);