Add support for latency probing over an interval of load
[fio.git] / stat.h
CommitLineData
a64e88da
JA
1#ifndef FIO_STAT_H
2#define FIO_STAT_H
3
ec41265e 4#include "iolog.h"
a666cab8 5#include "lib/output_buffer.h"
ec41265e 6
1a7081c7
JA
7struct lat_step_stats {
8 uint64_t iops[DDIR_RWDIR_CNT];
9 fio_fp64_t avg[DDIR_RWDIR_CNT];
10};
11
a64e88da 12struct group_run_stats {
6eaf09d6
SL
13 uint64_t max_run[DDIR_RWDIR_CNT], min_run[DDIR_RWDIR_CNT];
14 uint64_t max_bw[DDIR_RWDIR_CNT], min_bw[DDIR_RWDIR_CNT];
af7f87cb 15 uint64_t iobytes[DDIR_RWDIR_CNT];
6eaf09d6 16 uint64_t agg[DDIR_RWDIR_CNT];
a64e88da 17 uint32_t kb_base;
ad705bcb 18 uint32_t unit_base;
e883cb35 19 uint32_t sig_figs;
a64e88da 20 uint32_t groupid;
771e58be 21 uint32_t unified_rw_rep;
e5021b34 22} __attribute__((packed));
a64e88da
JA
23
24/*
25 * How many depth levels to log
26 */
27#define FIO_IO_U_MAP_NR 7
d6bb626e 28#define FIO_IO_U_LAT_N_NR 10
a64e88da
JA
29#define FIO_IO_U_LAT_U_NR 10
30#define FIO_IO_U_LAT_M_NR 12
31
37b08652
VF
32/*
33 * Constants for clat percentiles
34 */
35#define FIO_IO_U_PLAT_BITS 6
36#define FIO_IO_U_PLAT_VAL (1 << FIO_IO_U_PLAT_BITS)
37#define FIO_IO_U_PLAT_GROUP_NR 29
38#define FIO_IO_U_PLAT_NR (FIO_IO_U_PLAT_GROUP_NR * FIO_IO_U_PLAT_VAL)
39#define FIO_IO_U_LIST_MAX_LEN 20 /* The size of the default and user-specified
40 list of percentiles */
41
a64e88da
JA
42/*
43 * Aggregate clat samples to report percentile(s) of them.
44 *
45 * EXECUTIVE SUMMARY
46 *
47 * FIO_IO_U_PLAT_BITS determines the maximum statistical error on the
48 * value of resulting percentiles. The error will be approximately
49 * 1/2^(FIO_IO_U_PLAT_BITS+1) of the value.
50 *
51 * FIO_IO_U_PLAT_GROUP_NR and FIO_IO_U_PLAT_BITS determine the maximum
52 * range being tracked for latency samples. The maximum value tracked
37b08652 53 * accurately will be 2^(GROUP_NR + PLAT_BITS - 1) nanoseconds.
a64e88da
JA
54 *
55 * FIO_IO_U_PLAT_GROUP_NR and FIO_IO_U_PLAT_BITS determine the memory
56 * requirement of storing those aggregate counts. The memory used will
57 * be (FIO_IO_U_PLAT_GROUP_NR * 2^FIO_IO_U_PLAT_BITS) * sizeof(int)
58 * bytes.
59 *
60 * FIO_IO_U_PLAT_NR is the total number of buckets.
61 *
62 * DETAILS
63 *
64 * Suppose the clat varies from 0 to 999 (usec), the straightforward
65 * method is to keep an array of (999 + 1) buckets, in which a counter
66 * keeps the count of samples which fall in the bucket, e.g.,
67 * {[0],[1],...,[999]}. However this consumes a huge amount of space,
68 * and can be avoided if an approximation is acceptable.
69 *
70 * One such method is to let the range of the bucket to be greater
71 * than one. This method has low accuracy when the value is small. For
72 * example, let the buckets be {[0,99],[100,199],...,[900,999]}, and
73 * the represented value of each bucket be the mean of the range. Then
74 * a value 0 has an round-off error of 49.5. To improve on this, we
75 * use buckets with non-uniform ranges, while bounding the error of
76 * each bucket within a ratio of the sample value. A simple example
77 * would be when error_bound = 0.005, buckets are {
78 * {[0],[1],...,[99]}, {[100,101],[102,103],...,[198,199]},..,
79 * {[900,909],[910,919]...} }. The total range is partitioned into
80 * groups with different ranges, then buckets with uniform ranges. An
81 * upper bound of the error is (range_of_bucket/2)/value_of_bucket
82 *
83 * For better efficiency, we implement this using base two. We group
84 * samples by their Most Significant Bit (MSB), extract the next M bit
85 * of them as an index within the group, and discard the rest of the
86 * bits.
87 *
88 * E.g., assume a sample 'x' whose MSB is bit n (starting from bit 0),
89 * and use M bit for indexing
90 *
91 * | n | M bits | bit (n-M-1) ... bit 0 |
92 *
93 * Because x is at least 2^n, and bit 0 to bit (n-M-1) is at most
94 * (2^(n-M) - 1), discarding bit 0 to (n-M-1) makes the round-off
95 * error
96 *
97 * 2^(n-M)-1 2^(n-M) 1
98 * e <= --------- <= ------- = ---
99 * 2^n 2^n 2^M
100 *
101 * Furthermore, we use "mean" of the range to represent the bucket,
102 * the error e can be lowered by half to 1 / 2^(M+1). By using M bits
103 * as the index, each group must contains 2^M buckets.
104 *
105 * E.g. Let M (FIO_IO_U_PLAT_BITS) be 6
106 * Error bound is 1/2^(6+1) = 0.0078125 (< 1%)
107 *
108 * Group MSB #discarded range of #buckets
109 * error_bits value
110 * ----------------------------------------------------------------
111 * 0* 0~5 0 [0,63] 64
112 * 1* 6 0 [64,127] 64
113 * 2 7 1 [128,255] 64
114 * 3 8 2 [256,511] 64
115 * 4 9 3 [512,1023] 64
116 * ... ... ... [...,...] ...
37b08652 117 * 28 33 27 [8589934592,+inf]** 64
a64e88da
JA
118 *
119 * * Special cases: when n < (M-1) or when n == (M-1), in both cases,
120 * the value cannot be rounded off. Use all bits of the sample as
121 * index.
122 *
37b08652 123 * ** If a sample's MSB is greater than 33, it will be counted as 33.
a64e88da
JA
124 */
125
66347cfa
DE
126/*
127 * Trim cycle count measurements
128 */
129#define MAX_NR_BLOCK_INFOS 8192
130#define BLOCK_INFO_STATE_SHIFT 29
131#define BLOCK_INFO_TRIMS(block_info) \
132 ((block_info) & ((1 << BLOCK_INFO_STATE_SHIFT) - 1))
133#define BLOCK_INFO_STATE(block_info) \
134 ((block_info) >> BLOCK_INFO_STATE_SHIFT)
135#define BLOCK_INFO(state, trim_cycles) \
fc8d6d05 136 ((trim_cycles) | ((unsigned int) (state) << BLOCK_INFO_STATE_SHIFT))
66347cfa
DE
137#define BLOCK_INFO_SET_STATE(block_info, state) \
138 BLOCK_INFO(state, BLOCK_INFO_TRIMS(block_info))
139enum block_info_state {
140 BLOCK_STATE_UNINIT,
141 BLOCK_STATE_TRIMMED,
142 BLOCK_STATE_WRITTEN,
143 BLOCK_STATE_TRIM_FAILURE,
144 BLOCK_STATE_WRITE_FAILURE,
145 BLOCK_STATE_COUNT,
1d6d3455 146};
66347cfa 147
a64e88da
JA
148#define MAX_PATTERN_SIZE 512
149#define FIO_JOBNAME_SIZE 128
4e59d0f3 150#define FIO_JOBDESC_SIZE 256
a64e88da
JA
151#define FIO_VERROR_SIZE 128
152
1a7081c7
JA
153#define MAX_STEP_STATS 64
154
a64e88da
JA
155struct thread_stat {
156 char name[FIO_JOBNAME_SIZE];
157 char verror[FIO_VERROR_SIZE];
ddcc0b69 158 uint32_t error;
2f122b13 159 uint32_t thread_number;
ddcc0b69 160 uint32_t groupid;
a64e88da 161 uint32_t pid;
4e59d0f3 162 char description[FIO_JOBDESC_SIZE];
a64e88da 163 uint32_t members;
771e58be 164 uint32_t unified_rw_rep;
a64e88da
JA
165
166 /*
167 * bandwidth and latency stats
168 */
1e900e2d 169 struct io_stat sync_stat __attribute__((aligned(8)));/* fsync etc stats */
6eaf09d6
SL
170 struct io_stat clat_stat[DDIR_RWDIR_CNT]; /* completion latency */
171 struct io_stat slat_stat[DDIR_RWDIR_CNT]; /* submission latency */
172 struct io_stat lat_stat[DDIR_RWDIR_CNT]; /* total latency */
173 struct io_stat bw_stat[DDIR_RWDIR_CNT]; /* bandwidth stats */
174 struct io_stat iops_stat[DDIR_RWDIR_CNT]; /* IOPS stats */
a64e88da
JA
175
176 /*
177 * fio system usage accounting
178 */
179 uint64_t usr_time;
180 uint64_t sys_time;
181 uint64_t ctx;
182 uint64_t minf, majf;
183
184 /*
185 * IO depth and latency stats
186 */
b599759b
JA
187 uint32_t clat_percentiles;
188 uint32_t lat_percentiles;
435d195a 189 uint64_t percentile_precision;
802ad4a8 190 fio_fp64_t percentile_list[FIO_IO_U_LIST_MAX_LEN];
a64e88da 191
6cc0e5aa
AL
192 uint64_t io_u_map[FIO_IO_U_MAP_NR];
193 uint64_t io_u_submit[FIO_IO_U_MAP_NR];
194 uint64_t io_u_complete[FIO_IO_U_MAP_NR];
195 uint64_t io_u_lat_n[FIO_IO_U_LAT_N_NR];
196 uint64_t io_u_lat_u[FIO_IO_U_LAT_U_NR];
197 uint64_t io_u_lat_m[FIO_IO_U_LAT_M_NR];
198 uint64_t io_u_plat[DDIR_RWDIR_CNT][FIO_IO_U_PLAT_NR];
199 uint64_t io_u_sync_plat[FIO_IO_U_PLAT_NR];
3d0ebb30 200
7f3ecee2 201 uint64_t total_io_u[DDIR_RWDIR_SYNC_CNT];
de5cdfea
JA
202 uint64_t short_io_u[DDIR_RWDIR_CNT];
203 uint64_t drop_io_u[DDIR_RWDIR_CNT];
a64e88da
JA
204 uint64_t total_submit;
205 uint64_t total_complete;
206
6eaf09d6
SL
207 uint64_t io_bytes[DDIR_RWDIR_CNT];
208 uint64_t runtime[DDIR_RWDIR_CNT];
a64e88da
JA
209 uint64_t total_run_time;
210
211 /*
212 * IO Error related stats
213 */
3d0ebb30
GG
214 union {
215 uint16_t continue_on_error;
11e955e3 216 uint32_t pad2;
3d0ebb30 217 };
ddcc0b69 218 uint32_t first_error;
11e955e3 219 uint64_t total_err_count;
a64e88da 220
fd5d733f
BVA
221 /* ZBD stats */
222 uint64_t nr_zone_resets;
223
66347cfa
DE
224 uint64_t nr_block_infos;
225 uint32_t block_infos[MAX_NR_BLOCK_INFOS];
226
a64e88da 227 uint32_t kb_base;
ad705bcb 228 uint32_t unit_base;
3e260a46
JA
229
230 uint32_t latency_depth;
11e955e3 231 uint32_t pad3;
3e260a46
JA
232 uint64_t latency_target;
233 fio_fp64_t latency_percentile;
234 uint64_t latency_window;
16e56d25 235
e883cb35 236 uint32_t sig_figs;
1a7081c7
JA
237 uint32_t pad4;
238
239 struct lat_step_stats step_stats[MAX_STEP_STATS];
e883cb35 240
bb49c8bd
VF
241 uint64_t ss_dur;
242 uint32_t ss_state;
243 uint32_t ss_head;
bb49c8bd 244
bb49c8bd
VF
245 fio_fp64_t ss_limit;
246 fio_fp64_t ss_slope;
247 fio_fp64_t ss_deviation;
248 fio_fp64_t ss_criterion;
cb84f1fa
VF
249
250 union {
251 uint64_t *ss_iops_data;
1a7081c7 252 uint64_t pad5;
cb84f1fa
VF
253 };
254
255 union {
256 uint64_t *ss_bw_data;
1a7081c7 257 uint64_t pad6;
cb84f1fa 258 };
e5021b34 259} __attribute__((packed));
a64e88da 260
b75a394f
JA
261struct jobs_eta {
262 uint32_t nr_running;
263 uint32_t nr_ramp;
819db5f5 264
b75a394f 265 uint32_t nr_pending;
714e85f3 266 uint32_t nr_setting_up;
819db5f5 267
90eff1c9 268 uint64_t m_rate[DDIR_RWDIR_CNT], t_rate[DDIR_RWDIR_CNT];
90eff1c9 269 uint64_t rate[DDIR_RWDIR_CNT];
dbf285f7 270 uint32_t m_iops[DDIR_RWDIR_CNT], t_iops[DDIR_RWDIR_CNT];
6eaf09d6 271 uint32_t iops[DDIR_RWDIR_CNT];
b75a394f
JA
272 uint64_t elapsed_sec;
273 uint64_t eta_sec;
b7f05eb0 274 uint32_t is_pow2;
ad705bcb 275 uint32_t unit_base;
1d1f45ae 276
e883cb35
JF
277 uint32_t sig_figs;
278
dbf285f7
SW
279 uint32_t files_open;
280
1d1f45ae
JA
281 /*
282 * Network 'copy' of run_str[]
283 */
284 uint32_t nr_threads;
372aecb9 285 uint8_t run_str[];
e5021b34 286} __attribute__((packed));
b75a394f 287
65a4d15c
KC
288struct io_u_plat_entry {
289 struct flist_head list;
6cc0e5aa 290 uint64_t io_u_plat[FIO_IO_U_PLAT_NR];
65a4d15c
KC
291};
292
971caeb1 293extern struct fio_sem *stat_sem;
e5437a07 294
c5103619 295extern struct jobs_eta *get_jobs_eta(bool force, size_t *size);
723297c9 296
cef9175e
JA
297extern void stat_init(void);
298extern void stat_exit(void);
299
0279b880 300extern struct json_object * show_thread_status(struct thread_stat *ts, struct group_run_stats *rs, struct flist_head *, struct buf_output *);
a666cab8 301extern void show_group_stats(struct group_run_stats *rs, struct buf_output *);
8aa89d70 302extern bool calc_thread_status(struct jobs_eta *je, int force);
cf451d1e 303extern void display_thread_status(struct jobs_eta *je);
83f7b64e 304extern void __show_run_stats(void);
5ddc6707 305extern void __show_running_run_stats(void);
b852e7cf 306extern void show_running_run_stats(void);
06464907 307extern void check_for_running_stats(void);
fd595830 308extern void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src, bool first);
37f0c1ae
JA
309extern void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src);
310extern void init_thread_stat(struct thread_stat *ts);
311extern void init_group_run_stat(struct group_run_stats *gs);
3e47bd25 312extern void eta_to_str(char *str, unsigned long eta_sec);
d6bb626e 313extern bool calc_lat(struct io_stat *is, unsigned long long *min, unsigned long long *max, double *mean, double *dev);
6cc0e5aa 314extern unsigned int calc_clat_percentiles(uint64_t *io_u_plat, unsigned long long nr, fio_fp64_t *plist, unsigned long long **output, unsigned long long *maxv, unsigned long long *minv);
fea76a0c 315extern void stat_calc_lat_n(struct thread_stat *ts, double *io_u_lat);
e5bd1347
JA
316extern void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat);
317extern void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat);
6cc0e5aa 318extern void stat_calc_dist(uint64_t *map, unsigned long total, double *io_u_dist);
6bb58215 319extern void reset_io_stats(struct thread_data *);
210dd0fc
JA
320extern void update_rusage_stat(struct thread_data *);
321extern void clear_rusage_stat(struct thread_data *);
2e33101f 322
d6bb626e 323extern void add_lat_sample(struct thread_data *, enum fio_ddir, unsigned long long,
5fff9543 324 unsigned long long, uint64_t);
d6bb626e 325extern void add_clat_sample(struct thread_data *, enum fio_ddir, unsigned long long,
5fff9543 326 unsigned long long, uint64_t);
cf8f852a 327extern void add_slat_sample(struct thread_data *, enum fio_ddir, unsigned long,
5fff9543
JF
328 unsigned long long, uint64_t);
329extern void add_agg_sample(union io_sample_data, enum fio_ddir, unsigned long long);
a47591e4
JA
330extern void add_iops_sample(struct thread_data *, struct io_u *,
331 unsigned int);
332extern void add_bw_sample(struct thread_data *, struct io_u *,
d6bb626e 333 unsigned int, unsigned long long);
b2b3eefe
JA
334extern void add_sync_clat_sample(struct thread_stat *ts,
335 unsigned long long nsec);
a47591e4 336extern int calc_log_samples(void);
cf8f852a
JA
337
338extern struct io_log *agg_io_log[DDIR_RWDIR_CNT];
f1867a7f 339extern bool write_bw_log;
cf8f852a 340
74558486
JA
341static inline bool nsec_to_usec(unsigned long long *min,
342 unsigned long long *max, double *mean,
343 double *dev)
b29ad562 344{
d6bb626e 345 if (*min > 2000 && *max > 99999 && *dev > 1000.0) {
b29ad562
JA
346 *min /= 1000;
347 *max /= 1000;
348 *mean /= 1000.0;
349 *dev /= 1000.0;
8aa89d70 350 return true;
b29ad562
JA
351 }
352
8aa89d70 353 return false;
b29ad562 354}
d6bb626e 355
74558486
JA
356static inline bool nsec_to_msec(unsigned long long *min,
357 unsigned long long *max, double *mean,
358 double *dev)
d6bb626e
VF
359{
360 if (*min > 2000000 && *max > 99999999ULL && *dev > 1000000.0) {
361 *min /= 1000000;
362 *max /= 1000000;
363 *mean /= 1000000.0;
364 *dev /= 1000000.0;
365 return true;
366 }
367
368 return false;
369}
74558486 370
723297c9
JA
371/*
372 * Worst level condensing would be 1:5, so allow enough room for that
373 */
374#define __THREAD_RUNSTR_SZ(nr) ((nr) * 5)
11155857
JA
375#define THREAD_RUNSTR_SZ __THREAD_RUNSTR_SZ(thread_number)
376
66347cfa
DE
377uint32_t *io_u_block_info(struct thread_data *td, struct io_u *io_u);
378
a64e88da 379#endif