engines/io_uring: use fixed opcodes for pre-mapped buffers
[fio.git] / stat.c
... / ...
CommitLineData
1#include <stdio.h>
2#include <string.h>
3#include <sys/time.h>
4#include <sys/stat.h>
5#include <math.h>
6
7#include "fio.h"
8#include "diskutil.h"
9#include "lib/ieee754.h"
10#include "json.h"
11#include "lib/getrusage.h"
12#include "idletime.h"
13#include "lib/pow2.h"
14#include "lib/output_buffer.h"
15#include "helper_thread.h"
16#include "smalloc.h"
17#include "zbd.h"
18#include "oslib/asprintf.h"
19
20#define LOG_MSEC_SLACK 1
21
22struct fio_sem *stat_sem;
23
24void clear_rusage_stat(struct thread_data *td)
25{
26 struct thread_stat *ts = &td->ts;
27
28 fio_getrusage(&td->ru_start);
29 ts->usr_time = ts->sys_time = 0;
30 ts->ctx = 0;
31 ts->minf = ts->majf = 0;
32}
33
34void update_rusage_stat(struct thread_data *td)
35{
36 struct thread_stat *ts = &td->ts;
37
38 fio_getrusage(&td->ru_end);
39 ts->usr_time += mtime_since_tv(&td->ru_start.ru_utime,
40 &td->ru_end.ru_utime);
41 ts->sys_time += mtime_since_tv(&td->ru_start.ru_stime,
42 &td->ru_end.ru_stime);
43 ts->ctx += td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw
44 - (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
45 ts->minf += td->ru_end.ru_minflt - td->ru_start.ru_minflt;
46 ts->majf += td->ru_end.ru_majflt - td->ru_start.ru_majflt;
47
48 memcpy(&td->ru_start, &td->ru_end, sizeof(td->ru_end));
49}
50
51/*
52 * Given a latency, return the index of the corresponding bucket in
53 * the structure tracking percentiles.
54 *
55 * (1) find the group (and error bits) that the value (latency)
56 * belongs to by looking at its MSB. (2) find the bucket number in the
57 * group by looking at the index bits.
58 *
59 */
60static unsigned int plat_val_to_idx(unsigned long long val)
61{
62 unsigned int msb, error_bits, base, offset, idx;
63
64 /* Find MSB starting from bit 0 */
65 if (val == 0)
66 msb = 0;
67 else
68 msb = (sizeof(val)*8) - __builtin_clzll(val) - 1;
69
70 /*
71 * MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
72 * all bits of the sample as index
73 */
74 if (msb <= FIO_IO_U_PLAT_BITS)
75 return val;
76
77 /* Compute the number of error bits to discard*/
78 error_bits = msb - FIO_IO_U_PLAT_BITS;
79
80 /* Compute the number of buckets before the group */
81 base = (error_bits + 1) << FIO_IO_U_PLAT_BITS;
82
83 /*
84 * Discard the error bits and apply the mask to find the
85 * index for the buckets in the group
86 */
87 offset = (FIO_IO_U_PLAT_VAL - 1) & (val >> error_bits);
88
89 /* Make sure the index does not exceed (array size - 1) */
90 idx = (base + offset) < (FIO_IO_U_PLAT_NR - 1) ?
91 (base + offset) : (FIO_IO_U_PLAT_NR - 1);
92
93 return idx;
94}
95
96/*
97 * Convert the given index of the bucket array to the value
98 * represented by the bucket
99 */
100static unsigned long long plat_idx_to_val(unsigned int idx)
101{
102 unsigned int error_bits;
103 unsigned long long k, base;
104
105 assert(idx < FIO_IO_U_PLAT_NR);
106
107 /* MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
108 * all bits of the sample as index */
109 if (idx < (FIO_IO_U_PLAT_VAL << 1))
110 return idx;
111
112 /* Find the group and compute the minimum value of that group */
113 error_bits = (idx >> FIO_IO_U_PLAT_BITS) - 1;
114 base = ((unsigned long long) 1) << (error_bits + FIO_IO_U_PLAT_BITS);
115
116 /* Find its bucket number of the group */
117 k = idx % FIO_IO_U_PLAT_VAL;
118
119 /* Return the mean of the range of the bucket */
120 return base + ((k + 0.5) * (1 << error_bits));
121}
122
123static int double_cmp(const void *a, const void *b)
124{
125 const fio_fp64_t fa = *(const fio_fp64_t *) a;
126 const fio_fp64_t fb = *(const fio_fp64_t *) b;
127 int cmp = 0;
128
129 if (fa.u.f > fb.u.f)
130 cmp = 1;
131 else if (fa.u.f < fb.u.f)
132 cmp = -1;
133
134 return cmp;
135}
136
137unsigned int calc_clat_percentiles(uint64_t *io_u_plat, unsigned long long nr,
138 fio_fp64_t *plist, unsigned long long **output,
139 unsigned long long *maxv, unsigned long long *minv)
140{
141 unsigned long long sum = 0;
142 unsigned int len, i, j = 0;
143 unsigned long long *ovals = NULL;
144 bool is_last;
145
146 *minv = -1ULL;
147 *maxv = 0;
148
149 len = 0;
150 while (len < FIO_IO_U_LIST_MAX_LEN && plist[len].u.f != 0.0)
151 len++;
152
153 if (!len)
154 return 0;
155
156 /*
157 * Sort the percentile list. Note that it may already be sorted if
158 * we are using the default values, but since it's a short list this
159 * isn't a worry. Also note that this does not work for NaN values.
160 */
161 if (len > 1)
162 qsort((void *)plist, len, sizeof(plist[0]), double_cmp);
163
164 ovals = malloc(len * sizeof(*ovals));
165 if (!ovals)
166 return 0;
167
168 /*
169 * Calculate bucket values, note down max and min values
170 */
171 is_last = false;
172 for (i = 0; i < FIO_IO_U_PLAT_NR && !is_last; i++) {
173 sum += io_u_plat[i];
174 while (sum >= ((long double) plist[j].u.f / 100.0 * nr)) {
175 assert(plist[j].u.f <= 100.0);
176
177 ovals[j] = plat_idx_to_val(i);
178 if (ovals[j] < *minv)
179 *minv = ovals[j];
180 if (ovals[j] > *maxv)
181 *maxv = ovals[j];
182
183 is_last = (j == len - 1) != 0;
184 if (is_last)
185 break;
186
187 j++;
188 }
189 }
190
191 if (!is_last)
192 log_err("fio: error calculating latency percentiles\n");
193
194 *output = ovals;
195 return len;
196}
197
198/*
199 * Find and display the p-th percentile of clat
200 */
201static void show_clat_percentiles(uint64_t *io_u_plat, unsigned long long nr,
202 fio_fp64_t *plist, unsigned int precision,
203 const char *pre, struct buf_output *out)
204{
205 unsigned int divisor, len, i, j = 0;
206 unsigned long long minv, maxv;
207 unsigned long long *ovals;
208 int per_line, scale_down, time_width;
209 bool is_last;
210 char fmt[32];
211
212 len = calc_clat_percentiles(io_u_plat, nr, plist, &ovals, &maxv, &minv);
213 if (!len || !ovals)
214 goto out;
215
216 /*
217 * We default to nsecs, but if the value range is such that we
218 * should scale down to usecs or msecs, do that.
219 */
220 if (minv > 2000000 && maxv > 99999999ULL) {
221 scale_down = 2;
222 divisor = 1000000;
223 log_buf(out, " %s percentiles (msec):\n |", pre);
224 } else if (minv > 2000 && maxv > 99999) {
225 scale_down = 1;
226 divisor = 1000;
227 log_buf(out, " %s percentiles (usec):\n |", pre);
228 } else {
229 scale_down = 0;
230 divisor = 1;
231 log_buf(out, " %s percentiles (nsec):\n |", pre);
232 }
233
234
235 time_width = max(5, (int) (log10(maxv / divisor) + 1));
236 snprintf(fmt, sizeof(fmt), " %%%u.%ufth=[%%%dllu]%%c", precision + 3,
237 precision, time_width);
238 /* fmt will be something like " %5.2fth=[%4llu]%c" */
239 per_line = (80 - 7) / (precision + 10 + time_width);
240
241 for (j = 0; j < len; j++) {
242 /* for formatting */
243 if (j != 0 && (j % per_line) == 0)
244 log_buf(out, " |");
245
246 /* end of the list */
247 is_last = (j == len - 1) != 0;
248
249 for (i = 0; i < scale_down; i++)
250 ovals[j] = (ovals[j] + 999) / 1000;
251
252 log_buf(out, fmt, plist[j].u.f, ovals[j], is_last ? '\n' : ',');
253
254 if (is_last)
255 break;
256
257 if ((j % per_line) == per_line - 1) /* for formatting */
258 log_buf(out, "\n");
259 }
260
261out:
262 if (ovals)
263 free(ovals);
264}
265
266bool calc_lat(struct io_stat *is, unsigned long long *min,
267 unsigned long long *max, double *mean, double *dev)
268{
269 double n = (double) is->samples;
270
271 if (n == 0)
272 return false;
273
274 *min = is->min_val;
275 *max = is->max_val;
276 *mean = is->mean.u.f;
277
278 if (n > 1.0)
279 *dev = sqrt(is->S.u.f / (n - 1.0));
280 else
281 *dev = 0;
282
283 return true;
284}
285
286void show_group_stats(struct group_run_stats *rs, struct buf_output *out)
287{
288 char *io, *agg, *min, *max;
289 char *ioalt, *aggalt, *minalt, *maxalt;
290 const char *str[] = { " READ", " WRITE" , " TRIM"};
291 int i;
292
293 log_buf(out, "\nRun status group %d (all jobs):\n", rs->groupid);
294
295 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
296 const int i2p = is_power_of_2(rs->kb_base);
297
298 if (!rs->max_run[i])
299 continue;
300
301 io = num2str(rs->iobytes[i], rs->sig_figs, 1, i2p, N2S_BYTE);
302 ioalt = num2str(rs->iobytes[i], rs->sig_figs, 1, !i2p, N2S_BYTE);
303 agg = num2str(rs->agg[i], rs->sig_figs, 1, i2p, rs->unit_base);
304 aggalt = num2str(rs->agg[i], rs->sig_figs, 1, !i2p, rs->unit_base);
305 min = num2str(rs->min_bw[i], rs->sig_figs, 1, i2p, rs->unit_base);
306 minalt = num2str(rs->min_bw[i], rs->sig_figs, 1, !i2p, rs->unit_base);
307 max = num2str(rs->max_bw[i], rs->sig_figs, 1, i2p, rs->unit_base);
308 maxalt = num2str(rs->max_bw[i], rs->sig_figs, 1, !i2p, rs->unit_base);
309 log_buf(out, "%s: bw=%s (%s), %s-%s (%s-%s), io=%s (%s), run=%llu-%llumsec\n",
310 rs->unified_rw_rep ? " MIXED" : str[i],
311 agg, aggalt, min, max, minalt, maxalt, io, ioalt,
312 (unsigned long long) rs->min_run[i],
313 (unsigned long long) rs->max_run[i]);
314
315 free(io);
316 free(agg);
317 free(min);
318 free(max);
319 free(ioalt);
320 free(aggalt);
321 free(minalt);
322 free(maxalt);
323 }
324}
325
326void stat_calc_dist(uint64_t *map, unsigned long total, double *io_u_dist)
327{
328 int i;
329
330 /*
331 * Do depth distribution calculations
332 */
333 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
334 if (total) {
335 io_u_dist[i] = (double) map[i] / (double) total;
336 io_u_dist[i] *= 100.0;
337 if (io_u_dist[i] < 0.1 && map[i])
338 io_u_dist[i] = 0.1;
339 } else
340 io_u_dist[i] = 0.0;
341 }
342}
343
344static void stat_calc_lat(struct thread_stat *ts, double *dst,
345 uint64_t *src, int nr)
346{
347 unsigned long total = ddir_rw_sum(ts->total_io_u);
348 int i;
349
350 /*
351 * Do latency distribution calculations
352 */
353 for (i = 0; i < nr; i++) {
354 if (total) {
355 dst[i] = (double) src[i] / (double) total;
356 dst[i] *= 100.0;
357 if (dst[i] < 0.01 && src[i])
358 dst[i] = 0.01;
359 } else
360 dst[i] = 0.0;
361 }
362}
363
364/*
365 * To keep the terse format unaltered, add all of the ns latency
366 * buckets to the first us latency bucket
367 */
368static void stat_calc_lat_nu(struct thread_stat *ts, double *io_u_lat_u)
369{
370 unsigned long ntotal = 0, total = ddir_rw_sum(ts->total_io_u);
371 int i;
372
373 stat_calc_lat(ts, io_u_lat_u, ts->io_u_lat_u, FIO_IO_U_LAT_U_NR);
374
375 for (i = 0; i < FIO_IO_U_LAT_N_NR; i++)
376 ntotal += ts->io_u_lat_n[i];
377
378 io_u_lat_u[0] += 100.0 * (double) ntotal / (double) total;
379}
380
381void stat_calc_lat_n(struct thread_stat *ts, double *io_u_lat)
382{
383 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_n, FIO_IO_U_LAT_N_NR);
384}
385
386void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat)
387{
388 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_u, FIO_IO_U_LAT_U_NR);
389}
390
391void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat)
392{
393 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_m, FIO_IO_U_LAT_M_NR);
394}
395
396static void display_lat(const char *name, unsigned long long min,
397 unsigned long long max, double mean, double dev,
398 struct buf_output *out)
399{
400 const char *base = "(nsec)";
401 char *minp, *maxp;
402
403 if (nsec_to_msec(&min, &max, &mean, &dev))
404 base = "(msec)";
405 else if (nsec_to_usec(&min, &max, &mean, &dev))
406 base = "(usec)";
407
408 minp = num2str(min, 6, 1, 0, N2S_NONE);
409 maxp = num2str(max, 6, 1, 0, N2S_NONE);
410
411 log_buf(out, " %s %s: min=%s, max=%s, avg=%5.02f,"
412 " stdev=%5.02f\n", name, base, minp, maxp, mean, dev);
413
414 free(minp);
415 free(maxp);
416}
417
418static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
419 int ddir, struct buf_output *out)
420{
421 unsigned long runt;
422 unsigned long long min, max, bw, iops;
423 double mean, dev;
424 char *io_p, *bw_p, *bw_p_alt, *iops_p, *post_st = NULL;
425 int i2p;
426
427 if (ddir_sync(ddir)) {
428 if (calc_lat(&ts->sync_stat, &min, &max, &mean, &dev)) {
429 log_buf(out, " %s:\n", "fsync/fdatasync/sync_file_range");
430 display_lat(io_ddir_name(ddir), min, max, mean, dev, out);
431 show_clat_percentiles(ts->io_u_sync_plat,
432 ts->sync_stat.samples,
433 ts->percentile_list,
434 ts->percentile_precision,
435 io_ddir_name(ddir), out);
436 }
437 return;
438 }
439
440 assert(ddir_rw(ddir));
441
442 if (!ts->runtime[ddir])
443 return;
444
445 i2p = is_power_of_2(rs->kb_base);
446 runt = ts->runtime[ddir];
447
448 bw = (1000 * ts->io_bytes[ddir]) / runt;
449 io_p = num2str(ts->io_bytes[ddir], ts->sig_figs, 1, i2p, N2S_BYTE);
450 bw_p = num2str(bw, ts->sig_figs, 1, i2p, ts->unit_base);
451 bw_p_alt = num2str(bw, ts->sig_figs, 1, !i2p, ts->unit_base);
452
453 iops = (1000 * (uint64_t)ts->total_io_u[ddir]) / runt;
454 iops_p = num2str(iops, ts->sig_figs, 1, 0, N2S_NONE);
455 if (ddir == DDIR_WRITE)
456 post_st = zbd_write_status(ts);
457 else if (ddir == DDIR_READ && ts->cachehit && ts->cachemiss) {
458 uint64_t total;
459 double hit;
460
461 total = ts->cachehit + ts->cachemiss;
462 hit = (double) ts->cachehit / (double) total;
463 hit *= 100.0;
464 if (asprintf(&post_st, "; Cachehit=%0.2f%%", hit) < 0)
465 post_st = NULL;
466 }
467
468 log_buf(out, " %s: IOPS=%s, BW=%s (%s)(%s/%llumsec)%s\n",
469 rs->unified_rw_rep ? "mixed" : io_ddir_name(ddir),
470 iops_p, bw_p, bw_p_alt, io_p,
471 (unsigned long long) ts->runtime[ddir],
472 post_st ? : "");
473
474 free(post_st);
475 free(io_p);
476 free(bw_p);
477 free(bw_p_alt);
478 free(iops_p);
479
480 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
481 display_lat("slat", min, max, mean, dev, out);
482 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
483 display_lat("clat", min, max, mean, dev, out);
484 if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev))
485 display_lat(" lat", min, max, mean, dev, out);
486
487 if (ts->clat_percentiles || ts->lat_percentiles) {
488 const char *name = ts->clat_percentiles ? "clat" : " lat";
489 uint64_t samples;
490
491 if (ts->clat_percentiles)
492 samples = ts->clat_stat[ddir].samples;
493 else
494 samples = ts->lat_stat[ddir].samples;
495
496 show_clat_percentiles(ts->io_u_plat[ddir],
497 samples,
498 ts->percentile_list,
499 ts->percentile_precision, name, out);
500 }
501 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
502 double p_of_agg = 100.0, fkb_base = (double)rs->kb_base;
503 const char *bw_str;
504
505 if ((rs->unit_base == 1) && i2p)
506 bw_str = "Kibit";
507 else if (rs->unit_base == 1)
508 bw_str = "kbit";
509 else if (i2p)
510 bw_str = "KiB";
511 else
512 bw_str = "kB";
513
514 if (rs->agg[ddir]) {
515 p_of_agg = mean * 100 / (double) (rs->agg[ddir] / 1024);
516 if (p_of_agg > 100.0)
517 p_of_agg = 100.0;
518 }
519
520 if (rs->unit_base == 1) {
521 min *= 8.0;
522 max *= 8.0;
523 mean *= 8.0;
524 dev *= 8.0;
525 }
526
527 if (mean > fkb_base * fkb_base) {
528 min /= fkb_base;
529 max /= fkb_base;
530 mean /= fkb_base;
531 dev /= fkb_base;
532 bw_str = (rs->unit_base == 1 ? "Mibit" : "MiB");
533 }
534
535 log_buf(out, " bw (%5s/s): min=%5llu, max=%5llu, per=%3.2f%%, "
536 "avg=%5.02f, stdev=%5.02f, samples=%" PRIu64 "\n",
537 bw_str, min, max, p_of_agg, mean, dev,
538 (&ts->bw_stat[ddir])->samples);
539 }
540 if (calc_lat(&ts->iops_stat[ddir], &min, &max, &mean, &dev)) {
541 log_buf(out, " iops : min=%5llu, max=%5llu, "
542 "avg=%5.02f, stdev=%5.02f, samples=%" PRIu64 "\n",
543 min, max, mean, dev, (&ts->iops_stat[ddir])->samples);
544 }
545}
546
547static bool show_lat(double *io_u_lat, int nr, const char **ranges,
548 const char *msg, struct buf_output *out)
549{
550 bool new_line = true, shown = false;
551 int i, line = 0;
552
553 for (i = 0; i < nr; i++) {
554 if (io_u_lat[i] <= 0.0)
555 continue;
556 shown = true;
557 if (new_line) {
558 if (line)
559 log_buf(out, "\n");
560 log_buf(out, " lat (%s) : ", msg);
561 new_line = false;
562 line = 0;
563 }
564 if (line)
565 log_buf(out, ", ");
566 log_buf(out, "%s%3.2f%%", ranges[i], io_u_lat[i]);
567 line++;
568 if (line == 5)
569 new_line = true;
570 }
571
572 if (shown)
573 log_buf(out, "\n");
574
575 return true;
576}
577
578static void show_lat_n(double *io_u_lat_n, struct buf_output *out)
579{
580 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
581 "250=", "500=", "750=", "1000=", };
582
583 show_lat(io_u_lat_n, FIO_IO_U_LAT_N_NR, ranges, "nsec", out);
584}
585
586static void show_lat_u(double *io_u_lat_u, struct buf_output *out)
587{
588 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
589 "250=", "500=", "750=", "1000=", };
590
591 show_lat(io_u_lat_u, FIO_IO_U_LAT_U_NR, ranges, "usec", out);
592}
593
594static void show_lat_m(double *io_u_lat_m, struct buf_output *out)
595{
596 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
597 "250=", "500=", "750=", "1000=", "2000=",
598 ">=2000=", };
599
600 show_lat(io_u_lat_m, FIO_IO_U_LAT_M_NR, ranges, "msec", out);
601}
602
603static void show_latencies(struct thread_stat *ts, struct buf_output *out)
604{
605 double io_u_lat_n[FIO_IO_U_LAT_N_NR];
606 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
607 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
608
609 stat_calc_lat_n(ts, io_u_lat_n);
610 stat_calc_lat_u(ts, io_u_lat_u);
611 stat_calc_lat_m(ts, io_u_lat_m);
612
613 show_lat_n(io_u_lat_n, out);
614 show_lat_u(io_u_lat_u, out);
615 show_lat_m(io_u_lat_m, out);
616}
617
618static int block_state_category(int block_state)
619{
620 switch (block_state) {
621 case BLOCK_STATE_UNINIT:
622 return 0;
623 case BLOCK_STATE_TRIMMED:
624 case BLOCK_STATE_WRITTEN:
625 return 1;
626 case BLOCK_STATE_WRITE_FAILURE:
627 case BLOCK_STATE_TRIM_FAILURE:
628 return 2;
629 default:
630 /* Silence compile warning on some BSDs and have a return */
631 assert(0);
632 return -1;
633 }
634}
635
636static int compare_block_infos(const void *bs1, const void *bs2)
637{
638 uint64_t block1 = *(uint64_t *)bs1;
639 uint64_t block2 = *(uint64_t *)bs2;
640 int state1 = BLOCK_INFO_STATE(block1);
641 int state2 = BLOCK_INFO_STATE(block2);
642 int bscat1 = block_state_category(state1);
643 int bscat2 = block_state_category(state2);
644 int cycles1 = BLOCK_INFO_TRIMS(block1);
645 int cycles2 = BLOCK_INFO_TRIMS(block2);
646
647 if (bscat1 < bscat2)
648 return -1;
649 if (bscat1 > bscat2)
650 return 1;
651
652 if (cycles1 < cycles2)
653 return -1;
654 if (cycles1 > cycles2)
655 return 1;
656
657 if (state1 < state2)
658 return -1;
659 if (state1 > state2)
660 return 1;
661
662 assert(block1 == block2);
663 return 0;
664}
665
666static int calc_block_percentiles(int nr_block_infos, uint32_t *block_infos,
667 fio_fp64_t *plist, unsigned int **percentiles,
668 unsigned int *types)
669{
670 int len = 0;
671 int i, nr_uninit;
672
673 qsort(block_infos, nr_block_infos, sizeof(uint32_t), compare_block_infos);
674
675 while (len < FIO_IO_U_LIST_MAX_LEN && plist[len].u.f != 0.0)
676 len++;
677
678 if (!len)
679 return 0;
680
681 /*
682 * Sort the percentile list. Note that it may already be sorted if
683 * we are using the default values, but since it's a short list this
684 * isn't a worry. Also note that this does not work for NaN values.
685 */
686 if (len > 1)
687 qsort((void *)plist, len, sizeof(plist[0]), double_cmp);
688
689 /* Start only after the uninit entries end */
690 for (nr_uninit = 0;
691 nr_uninit < nr_block_infos
692 && BLOCK_INFO_STATE(block_infos[nr_uninit]) == BLOCK_STATE_UNINIT;
693 nr_uninit ++)
694 ;
695
696 if (nr_uninit == nr_block_infos)
697 return 0;
698
699 *percentiles = calloc(len, sizeof(**percentiles));
700
701 for (i = 0; i < len; i++) {
702 int idx = (plist[i].u.f * (nr_block_infos - nr_uninit) / 100)
703 + nr_uninit;
704 (*percentiles)[i] = BLOCK_INFO_TRIMS(block_infos[idx]);
705 }
706
707 memset(types, 0, sizeof(*types) * BLOCK_STATE_COUNT);
708 for (i = 0; i < nr_block_infos; i++)
709 types[BLOCK_INFO_STATE(block_infos[i])]++;
710
711 return len;
712}
713
714static const char *block_state_names[] = {
715 [BLOCK_STATE_UNINIT] = "unwritten",
716 [BLOCK_STATE_TRIMMED] = "trimmed",
717 [BLOCK_STATE_WRITTEN] = "written",
718 [BLOCK_STATE_TRIM_FAILURE] = "trim failure",
719 [BLOCK_STATE_WRITE_FAILURE] = "write failure",
720};
721
722static void show_block_infos(int nr_block_infos, uint32_t *block_infos,
723 fio_fp64_t *plist, struct buf_output *out)
724{
725 int len, pos, i;
726 unsigned int *percentiles = NULL;
727 unsigned int block_state_counts[BLOCK_STATE_COUNT];
728
729 len = calc_block_percentiles(nr_block_infos, block_infos, plist,
730 &percentiles, block_state_counts);
731
732 log_buf(out, " block lifetime percentiles :\n |");
733 pos = 0;
734 for (i = 0; i < len; i++) {
735 uint32_t block_info = percentiles[i];
736#define LINE_LENGTH 75
737 char str[LINE_LENGTH];
738 int strln = snprintf(str, LINE_LENGTH, " %3.2fth=%u%c",
739 plist[i].u.f, block_info,
740 i == len - 1 ? '\n' : ',');
741 assert(strln < LINE_LENGTH);
742 if (pos + strln > LINE_LENGTH) {
743 pos = 0;
744 log_buf(out, "\n |");
745 }
746 log_buf(out, "%s", str);
747 pos += strln;
748#undef LINE_LENGTH
749 }
750 if (percentiles)
751 free(percentiles);
752
753 log_buf(out, " states :");
754 for (i = 0; i < BLOCK_STATE_COUNT; i++)
755 log_buf(out, " %s=%u%c",
756 block_state_names[i], block_state_counts[i],
757 i == BLOCK_STATE_COUNT - 1 ? '\n' : ',');
758}
759
760static void show_ss_normal(struct thread_stat *ts, struct buf_output *out)
761{
762 char *p1, *p1alt, *p2;
763 unsigned long long bw_mean, iops_mean;
764 const int i2p = is_power_of_2(ts->kb_base);
765
766 if (!ts->ss_dur)
767 return;
768
769 bw_mean = steadystate_bw_mean(ts);
770 iops_mean = steadystate_iops_mean(ts);
771
772 p1 = num2str(bw_mean / ts->kb_base, ts->sig_figs, ts->kb_base, i2p, ts->unit_base);
773 p1alt = num2str(bw_mean / ts->kb_base, ts->sig_figs, ts->kb_base, !i2p, ts->unit_base);
774 p2 = num2str(iops_mean, ts->sig_figs, 1, 0, N2S_NONE);
775
776 log_buf(out, " steadystate : attained=%s, bw=%s (%s), iops=%s, %s%s=%.3f%s\n",
777 ts->ss_state & FIO_SS_ATTAINED ? "yes" : "no",
778 p1, p1alt, p2,
779 ts->ss_state & FIO_SS_IOPS ? "iops" : "bw",
780 ts->ss_state & FIO_SS_SLOPE ? " slope": " mean dev",
781 ts->ss_criterion.u.f,
782 ts->ss_state & FIO_SS_PCT ? "%" : "");
783
784 free(p1);
785 free(p1alt);
786 free(p2);
787}
788
789static void show_thread_status_normal(struct thread_stat *ts,
790 struct group_run_stats *rs,
791 struct buf_output *out)
792{
793 double usr_cpu, sys_cpu;
794 unsigned long runtime;
795 double io_u_dist[FIO_IO_U_MAP_NR];
796 time_t time_p;
797 char time_buf[32];
798
799 if (!ddir_rw_sum(ts->io_bytes) && !ddir_rw_sum(ts->total_io_u))
800 return;
801
802 memset(time_buf, 0, sizeof(time_buf));
803
804 time(&time_p);
805 os_ctime_r((const time_t *) &time_p, time_buf, sizeof(time_buf));
806
807 if (!ts->error) {
808 log_buf(out, "%s: (groupid=%d, jobs=%d): err=%2d: pid=%d: %s",
809 ts->name, ts->groupid, ts->members,
810 ts->error, (int) ts->pid, time_buf);
811 } else {
812 log_buf(out, "%s: (groupid=%d, jobs=%d): err=%2d (%s): pid=%d: %s",
813 ts->name, ts->groupid, ts->members,
814 ts->error, ts->verror, (int) ts->pid,
815 time_buf);
816 }
817
818 if (strlen(ts->description))
819 log_buf(out, " Description : [%s]\n", ts->description);
820
821 if (ts->io_bytes[DDIR_READ])
822 show_ddir_status(rs, ts, DDIR_READ, out);
823 if (ts->io_bytes[DDIR_WRITE])
824 show_ddir_status(rs, ts, DDIR_WRITE, out);
825 if (ts->io_bytes[DDIR_TRIM])
826 show_ddir_status(rs, ts, DDIR_TRIM, out);
827
828 show_latencies(ts, out);
829
830 if (ts->sync_stat.samples)
831 show_ddir_status(rs, ts, DDIR_SYNC, out);
832
833 runtime = ts->total_run_time;
834 if (runtime) {
835 double runt = (double) runtime;
836
837 usr_cpu = (double) ts->usr_time * 100 / runt;
838 sys_cpu = (double) ts->sys_time * 100 / runt;
839 } else {
840 usr_cpu = 0;
841 sys_cpu = 0;
842 }
843
844 log_buf(out, " cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%llu,"
845 " majf=%llu, minf=%llu\n", usr_cpu, sys_cpu,
846 (unsigned long long) ts->ctx,
847 (unsigned long long) ts->majf,
848 (unsigned long long) ts->minf);
849
850 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
851 log_buf(out, " IO depths : 1=%3.1f%%, 2=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%,"
852 " 16=%3.1f%%, 32=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
853 io_u_dist[1], io_u_dist[2],
854 io_u_dist[3], io_u_dist[4],
855 io_u_dist[5], io_u_dist[6]);
856
857 stat_calc_dist(ts->io_u_submit, ts->total_submit, io_u_dist);
858 log_buf(out, " submit : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
859 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
860 io_u_dist[1], io_u_dist[2],
861 io_u_dist[3], io_u_dist[4],
862 io_u_dist[5], io_u_dist[6]);
863 stat_calc_dist(ts->io_u_complete, ts->total_complete, io_u_dist);
864 log_buf(out, " complete : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
865 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
866 io_u_dist[1], io_u_dist[2],
867 io_u_dist[3], io_u_dist[4],
868 io_u_dist[5], io_u_dist[6]);
869 log_buf(out, " issued rwts: total=%llu,%llu,%llu,%llu"
870 " short=%llu,%llu,%llu,0"
871 " dropped=%llu,%llu,%llu,0\n",
872 (unsigned long long) ts->total_io_u[0],
873 (unsigned long long) ts->total_io_u[1],
874 (unsigned long long) ts->total_io_u[2],
875 (unsigned long long) ts->total_io_u[3],
876 (unsigned long long) ts->short_io_u[0],
877 (unsigned long long) ts->short_io_u[1],
878 (unsigned long long) ts->short_io_u[2],
879 (unsigned long long) ts->drop_io_u[0],
880 (unsigned long long) ts->drop_io_u[1],
881 (unsigned long long) ts->drop_io_u[2]);
882 if (ts->continue_on_error) {
883 log_buf(out, " errors : total=%llu, first_error=%d/<%s>\n",
884 (unsigned long long)ts->total_err_count,
885 ts->first_error,
886 strerror(ts->first_error));
887 }
888 if (ts->latency_depth) {
889 log_buf(out, " latency : target=%llu, window=%llu, percentile=%.2f%%, depth=%u\n",
890 (unsigned long long)ts->latency_target,
891 (unsigned long long)ts->latency_window,
892 ts->latency_percentile.u.f,
893 ts->latency_depth);
894 }
895
896 if (ts->nr_block_infos)
897 show_block_infos(ts->nr_block_infos, ts->block_infos,
898 ts->percentile_list, out);
899
900 if (ts->ss_dur)
901 show_ss_normal(ts, out);
902}
903
904static void show_ddir_status_terse(struct thread_stat *ts,
905 struct group_run_stats *rs, int ddir,
906 int ver, struct buf_output *out)
907{
908 unsigned long long min, max, minv, maxv, bw, iops;
909 unsigned long long *ovals = NULL;
910 double mean, dev;
911 unsigned int len;
912 int i, bw_stat;
913
914 assert(ddir_rw(ddir));
915
916 iops = bw = 0;
917 if (ts->runtime[ddir]) {
918 uint64_t runt = ts->runtime[ddir];
919
920 bw = ((1000 * ts->io_bytes[ddir]) / runt) / 1024; /* KiB/s */
921 iops = (1000 * (uint64_t) ts->total_io_u[ddir]) / runt;
922 }
923
924 log_buf(out, ";%llu;%llu;%llu;%llu",
925 (unsigned long long) ts->io_bytes[ddir] >> 10, bw, iops,
926 (unsigned long long) ts->runtime[ddir]);
927
928 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
929 log_buf(out, ";%llu;%llu;%f;%f", min/1000, max/1000, mean/1000, dev/1000);
930 else
931 log_buf(out, ";%llu;%llu;%f;%f", 0ULL, 0ULL, 0.0, 0.0);
932
933 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
934 log_buf(out, ";%llu;%llu;%f;%f", min/1000, max/1000, mean/1000, dev/1000);
935 else
936 log_buf(out, ";%llu;%llu;%f;%f", 0ULL, 0ULL, 0.0, 0.0);
937
938 if (ts->clat_percentiles || ts->lat_percentiles) {
939 len = calc_clat_percentiles(ts->io_u_plat[ddir],
940 ts->clat_stat[ddir].samples,
941 ts->percentile_list, &ovals, &maxv,
942 &minv);
943 } else
944 len = 0;
945
946 for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
947 if (i >= len) {
948 log_buf(out, ";0%%=0");
949 continue;
950 }
951 log_buf(out, ";%f%%=%llu", ts->percentile_list[i].u.f, ovals[i]/1000);
952 }
953
954 if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev))
955 log_buf(out, ";%llu;%llu;%f;%f", min/1000, max/1000, mean/1000, dev/1000);
956 else
957 log_buf(out, ";%llu;%llu;%f;%f", 0ULL, 0ULL, 0.0, 0.0);
958
959 if (ovals)
960 free(ovals);
961
962 bw_stat = calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev);
963 if (bw_stat) {
964 double p_of_agg = 100.0;
965
966 if (rs->agg[ddir]) {
967 p_of_agg = mean * 100 / (double) (rs->agg[ddir] / 1024);
968 if (p_of_agg > 100.0)
969 p_of_agg = 100.0;
970 }
971
972 log_buf(out, ";%llu;%llu;%f%%;%f;%f", min, max, p_of_agg, mean, dev);
973 } else
974 log_buf(out, ";%llu;%llu;%f%%;%f;%f", 0ULL, 0ULL, 0.0, 0.0, 0.0);
975
976 if (ver == 5) {
977 if (bw_stat)
978 log_buf(out, ";%" PRIu64, (&ts->bw_stat[ddir])->samples);
979 else
980 log_buf(out, ";%lu", 0UL);
981
982 if (calc_lat(&ts->iops_stat[ddir], &min, &max, &mean, &dev))
983 log_buf(out, ";%llu;%llu;%f;%f;%" PRIu64, min, max,
984 mean, dev, (&ts->iops_stat[ddir])->samples);
985 else
986 log_buf(out, ";%llu;%llu;%f;%f;%lu", 0ULL, 0ULL, 0.0, 0.0, 0UL);
987 }
988}
989
990static void add_ddir_status_json(struct thread_stat *ts,
991 struct group_run_stats *rs, int ddir, struct json_object *parent)
992{
993 unsigned long long min, max, minv, maxv;
994 unsigned long long bw_bytes, bw;
995 unsigned long long *ovals = NULL;
996 double mean, dev, iops;
997 unsigned int len;
998 int i;
999 struct json_object *dir_object, *tmp_object, *percentile_object, *clat_bins_object = NULL;
1000 char buf[120];
1001 double p_of_agg = 100.0;
1002
1003 assert(ddir_rw(ddir) || ddir_sync(ddir));
1004
1005 if (ts->unified_rw_rep && ddir != DDIR_READ)
1006 return;
1007
1008 dir_object = json_create_object();
1009 json_object_add_value_object(parent,
1010 ts->unified_rw_rep ? "mixed" : io_ddir_name(ddir), dir_object);
1011
1012 if (ddir_rw(ddir)) {
1013 bw_bytes = 0;
1014 bw = 0;
1015 iops = 0.0;
1016 if (ts->runtime[ddir]) {
1017 uint64_t runt = ts->runtime[ddir];
1018
1019 bw_bytes = ((1000 * ts->io_bytes[ddir]) / runt); /* Bytes/s */
1020 bw = bw_bytes / 1024; /* KiB/s */
1021 iops = (1000.0 * (uint64_t) ts->total_io_u[ddir]) / runt;
1022 }
1023
1024 json_object_add_value_int(dir_object, "io_bytes", ts->io_bytes[ddir]);
1025 json_object_add_value_int(dir_object, "io_kbytes", ts->io_bytes[ddir] >> 10);
1026 json_object_add_value_int(dir_object, "bw_bytes", bw_bytes);
1027 json_object_add_value_int(dir_object, "bw", bw);
1028 json_object_add_value_float(dir_object, "iops", iops);
1029 json_object_add_value_int(dir_object, "runtime", ts->runtime[ddir]);
1030 json_object_add_value_int(dir_object, "total_ios", ts->total_io_u[ddir]);
1031 json_object_add_value_int(dir_object, "short_ios", ts->short_io_u[ddir]);
1032 json_object_add_value_int(dir_object, "drop_ios", ts->drop_io_u[ddir]);
1033
1034 if (!calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev)) {
1035 min = max = 0;
1036 mean = dev = 0.0;
1037 }
1038 tmp_object = json_create_object();
1039 json_object_add_value_object(dir_object, "slat_ns", tmp_object);
1040 json_object_add_value_int(tmp_object, "min", min);
1041 json_object_add_value_int(tmp_object, "max", max);
1042 json_object_add_value_float(tmp_object, "mean", mean);
1043 json_object_add_value_float(tmp_object, "stddev", dev);
1044
1045 if (!calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev)) {
1046 min = max = 0;
1047 mean = dev = 0.0;
1048 }
1049 tmp_object = json_create_object();
1050 json_object_add_value_object(dir_object, "clat_ns", tmp_object);
1051 json_object_add_value_int(tmp_object, "min", min);
1052 json_object_add_value_int(tmp_object, "max", max);
1053 json_object_add_value_float(tmp_object, "mean", mean);
1054 json_object_add_value_float(tmp_object, "stddev", dev);
1055 } else {
1056 if (!calc_lat(&ts->sync_stat, &min, &max, &mean, &dev)) {
1057 min = max = 0;
1058 mean = dev = 0.0;
1059 }
1060
1061 tmp_object = json_create_object();
1062 json_object_add_value_object(dir_object, "lat_ns", tmp_object);
1063 json_object_add_value_int(dir_object, "total_ios", ts->total_io_u[DDIR_SYNC]);
1064 json_object_add_value_int(tmp_object, "min", min);
1065 json_object_add_value_int(tmp_object, "max", max);
1066 json_object_add_value_float(tmp_object, "mean", mean);
1067 json_object_add_value_float(tmp_object, "stddev", dev);
1068 }
1069
1070 if (ts->clat_percentiles || ts->lat_percentiles) {
1071 if (ddir_rw(ddir)) {
1072 uint64_t samples;
1073
1074 if (ts->clat_percentiles)
1075 samples = ts->clat_stat[ddir].samples;
1076 else
1077 samples = ts->lat_stat[ddir].samples;
1078
1079 len = calc_clat_percentiles(ts->io_u_plat[ddir],
1080 samples, ts->percentile_list, &ovals,
1081 &maxv, &minv);
1082 } else {
1083 len = calc_clat_percentiles(ts->io_u_sync_plat,
1084 ts->sync_stat.samples,
1085 ts->percentile_list, &ovals, &maxv,
1086 &minv);
1087 }
1088
1089 if (len > FIO_IO_U_LIST_MAX_LEN)
1090 len = FIO_IO_U_LIST_MAX_LEN;
1091 } else
1092 len = 0;
1093
1094 percentile_object = json_create_object();
1095 if (ts->clat_percentiles)
1096 json_object_add_value_object(tmp_object, "percentile", percentile_object);
1097 for (i = 0; i < len; i++) {
1098 snprintf(buf, sizeof(buf), "%f", ts->percentile_list[i].u.f);
1099 json_object_add_value_int(percentile_object, (const char *)buf, ovals[i]);
1100 }
1101
1102 if (output_format & FIO_OUTPUT_JSON_PLUS) {
1103 clat_bins_object = json_create_object();
1104 if (ts->clat_percentiles)
1105 json_object_add_value_object(tmp_object, "bins", clat_bins_object);
1106
1107 for(i = 0; i < FIO_IO_U_PLAT_NR; i++) {
1108 if (ddir_rw(ddir)) {
1109 if (ts->io_u_plat[ddir][i]) {
1110 snprintf(buf, sizeof(buf), "%llu", plat_idx_to_val(i));
1111 json_object_add_value_int(clat_bins_object, (const char *)buf, ts->io_u_plat[ddir][i]);
1112 }
1113 } else {
1114 if (ts->io_u_sync_plat[i]) {
1115 snprintf(buf, sizeof(buf), "%llu", plat_idx_to_val(i));
1116 json_object_add_value_int(clat_bins_object, (const char *)buf, ts->io_u_sync_plat[i]);
1117 }
1118 }
1119 }
1120 }
1121
1122 if (!ddir_rw(ddir))
1123 return;
1124
1125 if (!calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev)) {
1126 min = max = 0;
1127 mean = dev = 0.0;
1128 }
1129 tmp_object = json_create_object();
1130 json_object_add_value_object(dir_object, "lat_ns", tmp_object);
1131 json_object_add_value_int(tmp_object, "min", min);
1132 json_object_add_value_int(tmp_object, "max", max);
1133 json_object_add_value_float(tmp_object, "mean", mean);
1134 json_object_add_value_float(tmp_object, "stddev", dev);
1135 if (ts->lat_percentiles)
1136 json_object_add_value_object(tmp_object, "percentile", percentile_object);
1137 if (output_format & FIO_OUTPUT_JSON_PLUS && ts->lat_percentiles)
1138 json_object_add_value_object(tmp_object, "bins", clat_bins_object);
1139
1140 if (ovals)
1141 free(ovals);
1142
1143 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
1144 if (rs->agg[ddir]) {
1145 p_of_agg = mean * 100 / (double) (rs->agg[ddir] / 1024);
1146 if (p_of_agg > 100.0)
1147 p_of_agg = 100.0;
1148 }
1149 } else {
1150 min = max = 0;
1151 p_of_agg = mean = dev = 0.0;
1152 }
1153 json_object_add_value_int(dir_object, "bw_min", min);
1154 json_object_add_value_int(dir_object, "bw_max", max);
1155 json_object_add_value_float(dir_object, "bw_agg", p_of_agg);
1156 json_object_add_value_float(dir_object, "bw_mean", mean);
1157 json_object_add_value_float(dir_object, "bw_dev", dev);
1158 json_object_add_value_int(dir_object, "bw_samples",
1159 (&ts->bw_stat[ddir])->samples);
1160
1161 if (!calc_lat(&ts->iops_stat[ddir], &min, &max, &mean, &dev)) {
1162 min = max = 0;
1163 mean = dev = 0.0;
1164 }
1165 json_object_add_value_int(dir_object, "iops_min", min);
1166 json_object_add_value_int(dir_object, "iops_max", max);
1167 json_object_add_value_float(dir_object, "iops_mean", mean);
1168 json_object_add_value_float(dir_object, "iops_stddev", dev);
1169 json_object_add_value_int(dir_object, "iops_samples",
1170 (&ts->iops_stat[ddir])->samples);
1171
1172 if (ts->cachehit + ts->cachemiss) {
1173 uint64_t total;
1174 double hit;
1175
1176 total = ts->cachehit + ts->cachemiss;
1177 hit = (double) ts->cachehit / (double) total;
1178 hit *= 100.0;
1179 json_object_add_value_float(dir_object, "cachehit", hit);
1180 }
1181}
1182
1183static void show_thread_status_terse_all(struct thread_stat *ts,
1184 struct group_run_stats *rs, int ver,
1185 struct buf_output *out)
1186{
1187 double io_u_dist[FIO_IO_U_MAP_NR];
1188 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
1189 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
1190 double usr_cpu, sys_cpu;
1191 int i;
1192
1193 /* General Info */
1194 if (ver == 2)
1195 log_buf(out, "2;%s;%d;%d", ts->name, ts->groupid, ts->error);
1196 else
1197 log_buf(out, "%d;%s;%s;%d;%d", ver, fio_version_string,
1198 ts->name, ts->groupid, ts->error);
1199
1200 /* Log Read Status */
1201 show_ddir_status_terse(ts, rs, DDIR_READ, ver, out);
1202 /* Log Write Status */
1203 show_ddir_status_terse(ts, rs, DDIR_WRITE, ver, out);
1204 /* Log Trim Status */
1205 if (ver == 2 || ver == 4 || ver == 5)
1206 show_ddir_status_terse(ts, rs, DDIR_TRIM, ver, out);
1207
1208 /* CPU Usage */
1209 if (ts->total_run_time) {
1210 double runt = (double) ts->total_run_time;
1211
1212 usr_cpu = (double) ts->usr_time * 100 / runt;
1213 sys_cpu = (double) ts->sys_time * 100 / runt;
1214 } else {
1215 usr_cpu = 0;
1216 sys_cpu = 0;
1217 }
1218
1219 log_buf(out, ";%f%%;%f%%;%llu;%llu;%llu", usr_cpu, sys_cpu,
1220 (unsigned long long) ts->ctx,
1221 (unsigned long long) ts->majf,
1222 (unsigned long long) ts->minf);
1223
1224 /* Calc % distribution of IO depths, usecond, msecond latency */
1225 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
1226 stat_calc_lat_nu(ts, io_u_lat_u);
1227 stat_calc_lat_m(ts, io_u_lat_m);
1228
1229 /* Only show fixed 7 I/O depth levels*/
1230 log_buf(out, ";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%",
1231 io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3],
1232 io_u_dist[4], io_u_dist[5], io_u_dist[6]);
1233
1234 /* Microsecond latency */
1235 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
1236 log_buf(out, ";%3.2f%%", io_u_lat_u[i]);
1237 /* Millisecond latency */
1238 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
1239 log_buf(out, ";%3.2f%%", io_u_lat_m[i]);
1240
1241 /* disk util stats, if any */
1242 if (ver >= 3 && is_running_backend())
1243 show_disk_util(1, NULL, out);
1244
1245 /* Additional output if continue_on_error set - default off*/
1246 if (ts->continue_on_error)
1247 log_buf(out, ";%llu;%d", (unsigned long long) ts->total_err_count, ts->first_error);
1248
1249 /* Additional output if description is set */
1250 if (strlen(ts->description)) {
1251 if (ver == 2)
1252 log_buf(out, "\n");
1253 log_buf(out, ";%s", ts->description);
1254 }
1255
1256 log_buf(out, "\n");
1257}
1258
1259static void json_add_job_opts(struct json_object *root, const char *name,
1260 struct flist_head *opt_list)
1261{
1262 struct json_object *dir_object;
1263 struct flist_head *entry;
1264 struct print_option *p;
1265
1266 if (flist_empty(opt_list))
1267 return;
1268
1269 dir_object = json_create_object();
1270 json_object_add_value_object(root, name, dir_object);
1271
1272 flist_for_each(entry, opt_list) {
1273 const char *pos = "";
1274
1275 p = flist_entry(entry, struct print_option, list);
1276 if (p->value)
1277 pos = p->value;
1278 json_object_add_value_string(dir_object, p->name, pos);
1279 }
1280}
1281
1282static struct json_object *show_thread_status_json(struct thread_stat *ts,
1283 struct group_run_stats *rs,
1284 struct flist_head *opt_list)
1285{
1286 struct json_object *root, *tmp;
1287 struct jobs_eta *je;
1288 double io_u_dist[FIO_IO_U_MAP_NR];
1289 double io_u_lat_n[FIO_IO_U_LAT_N_NR];
1290 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
1291 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
1292 double usr_cpu, sys_cpu;
1293 int i;
1294 size_t size;
1295
1296 root = json_create_object();
1297 json_object_add_value_string(root, "jobname", ts->name);
1298 json_object_add_value_int(root, "groupid", ts->groupid);
1299 json_object_add_value_int(root, "error", ts->error);
1300
1301 /* ETA Info */
1302 je = get_jobs_eta(true, &size);
1303 if (je) {
1304 json_object_add_value_int(root, "eta", je->eta_sec);
1305 json_object_add_value_int(root, "elapsed", je->elapsed_sec);
1306 }
1307
1308 if (opt_list)
1309 json_add_job_opts(root, "job options", opt_list);
1310
1311 add_ddir_status_json(ts, rs, DDIR_READ, root);
1312 add_ddir_status_json(ts, rs, DDIR_WRITE, root);
1313 add_ddir_status_json(ts, rs, DDIR_TRIM, root);
1314 add_ddir_status_json(ts, rs, DDIR_SYNC, root);
1315
1316 /* CPU Usage */
1317 if (ts->total_run_time) {
1318 double runt = (double) ts->total_run_time;
1319
1320 usr_cpu = (double) ts->usr_time * 100 / runt;
1321 sys_cpu = (double) ts->sys_time * 100 / runt;
1322 } else {
1323 usr_cpu = 0;
1324 sys_cpu = 0;
1325 }
1326 json_object_add_value_int(root, "job_runtime", ts->total_run_time);
1327 json_object_add_value_float(root, "usr_cpu", usr_cpu);
1328 json_object_add_value_float(root, "sys_cpu", sys_cpu);
1329 json_object_add_value_int(root, "ctx", ts->ctx);
1330 json_object_add_value_int(root, "majf", ts->majf);
1331 json_object_add_value_int(root, "minf", ts->minf);
1332
1333 /* Calc % distribution of IO depths */
1334 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
1335 tmp = json_create_object();
1336 json_object_add_value_object(root, "iodepth_level", tmp);
1337 /* Only show fixed 7 I/O depth levels*/
1338 for (i = 0; i < 7; i++) {
1339 char name[20];
1340 if (i < 6)
1341 snprintf(name, 20, "%d", 1 << i);
1342 else
1343 snprintf(name, 20, ">=%d", 1 << i);
1344 json_object_add_value_float(tmp, (const char *)name, io_u_dist[i]);
1345 }
1346
1347 /* Calc % distribution of submit IO depths */
1348 stat_calc_dist(ts->io_u_submit, ts->total_submit, io_u_dist);
1349 tmp = json_create_object();
1350 json_object_add_value_object(root, "iodepth_submit", tmp);
1351 /* Only show fixed 7 I/O depth levels*/
1352 for (i = 0; i < 7; i++) {
1353 char name[20];
1354 if (i == 0)
1355 snprintf(name, 20, "0");
1356 else if (i < 6)
1357 snprintf(name, 20, "%d", 1 << (i+1));
1358 else
1359 snprintf(name, 20, ">=%d", 1 << i);
1360 json_object_add_value_float(tmp, (const char *)name, io_u_dist[i]);
1361 }
1362
1363 /* Calc % distribution of completion IO depths */
1364 stat_calc_dist(ts->io_u_complete, ts->total_complete, io_u_dist);
1365 tmp = json_create_object();
1366 json_object_add_value_object(root, "iodepth_complete", tmp);
1367 /* Only show fixed 7 I/O depth levels*/
1368 for (i = 0; i < 7; i++) {
1369 char name[20];
1370 if (i == 0)
1371 snprintf(name, 20, "0");
1372 else if (i < 6)
1373 snprintf(name, 20, "%d", 1 << (i+1));
1374 else
1375 snprintf(name, 20, ">=%d", 1 << i);
1376 json_object_add_value_float(tmp, (const char *)name, io_u_dist[i]);
1377 }
1378
1379 /* Calc % distribution of nsecond, usecond, msecond latency */
1380 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
1381 stat_calc_lat_n(ts, io_u_lat_n);
1382 stat_calc_lat_u(ts, io_u_lat_u);
1383 stat_calc_lat_m(ts, io_u_lat_m);
1384
1385 /* Nanosecond latency */
1386 tmp = json_create_object();
1387 json_object_add_value_object(root, "latency_ns", tmp);
1388 for (i = 0; i < FIO_IO_U_LAT_N_NR; i++) {
1389 const char *ranges[] = { "2", "4", "10", "20", "50", "100",
1390 "250", "500", "750", "1000", };
1391 json_object_add_value_float(tmp, ranges[i], io_u_lat_n[i]);
1392 }
1393 /* Microsecond latency */
1394 tmp = json_create_object();
1395 json_object_add_value_object(root, "latency_us", tmp);
1396 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++) {
1397 const char *ranges[] = { "2", "4", "10", "20", "50", "100",
1398 "250", "500", "750", "1000", };
1399 json_object_add_value_float(tmp, ranges[i], io_u_lat_u[i]);
1400 }
1401 /* Millisecond latency */
1402 tmp = json_create_object();
1403 json_object_add_value_object(root, "latency_ms", tmp);
1404 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++) {
1405 const char *ranges[] = { "2", "4", "10", "20", "50", "100",
1406 "250", "500", "750", "1000", "2000",
1407 ">=2000", };
1408 json_object_add_value_float(tmp, ranges[i], io_u_lat_m[i]);
1409 }
1410
1411 /* Additional output if continue_on_error set - default off*/
1412 if (ts->continue_on_error) {
1413 json_object_add_value_int(root, "total_err", ts->total_err_count);
1414 json_object_add_value_int(root, "first_error", ts->first_error);
1415 }
1416
1417 if (ts->latency_depth) {
1418 json_object_add_value_int(root, "latency_depth", ts->latency_depth);
1419 json_object_add_value_int(root, "latency_target", ts->latency_target);
1420 json_object_add_value_float(root, "latency_percentile", ts->latency_percentile.u.f);
1421 json_object_add_value_int(root, "latency_window", ts->latency_window);
1422 }
1423
1424 /* Additional output if description is set */
1425 if (strlen(ts->description))
1426 json_object_add_value_string(root, "desc", ts->description);
1427
1428 if (ts->nr_block_infos) {
1429 /* Block error histogram and types */
1430 int len;
1431 unsigned int *percentiles = NULL;
1432 unsigned int block_state_counts[BLOCK_STATE_COUNT];
1433
1434 len = calc_block_percentiles(ts->nr_block_infos, ts->block_infos,
1435 ts->percentile_list,
1436 &percentiles, block_state_counts);
1437
1438 if (len) {
1439 struct json_object *block, *percentile_object, *states;
1440 int state;
1441 block = json_create_object();
1442 json_object_add_value_object(root, "block", block);
1443
1444 percentile_object = json_create_object();
1445 json_object_add_value_object(block, "percentiles",
1446 percentile_object);
1447 for (i = 0; i < len; i++) {
1448 char buf[20];
1449 snprintf(buf, sizeof(buf), "%f",
1450 ts->percentile_list[i].u.f);
1451 json_object_add_value_int(percentile_object,
1452 (const char *)buf,
1453 percentiles[i]);
1454 }
1455
1456 states = json_create_object();
1457 json_object_add_value_object(block, "states", states);
1458 for (state = 0; state < BLOCK_STATE_COUNT; state++) {
1459 json_object_add_value_int(states,
1460 block_state_names[state],
1461 block_state_counts[state]);
1462 }
1463 free(percentiles);
1464 }
1465 }
1466
1467 if (ts->ss_dur) {
1468 struct json_object *data;
1469 struct json_array *iops, *bw;
1470 int j, k, l;
1471 char ss_buf[64];
1472
1473 snprintf(ss_buf, sizeof(ss_buf), "%s%s:%f%s",
1474 ts->ss_state & FIO_SS_IOPS ? "iops" : "bw",
1475 ts->ss_state & FIO_SS_SLOPE ? "_slope" : "",
1476 (float) ts->ss_limit.u.f,
1477 ts->ss_state & FIO_SS_PCT ? "%" : "");
1478
1479 tmp = json_create_object();
1480 json_object_add_value_object(root, "steadystate", tmp);
1481 json_object_add_value_string(tmp, "ss", ss_buf);
1482 json_object_add_value_int(tmp, "duration", (int)ts->ss_dur);
1483 json_object_add_value_int(tmp, "attained", (ts->ss_state & FIO_SS_ATTAINED) > 0);
1484
1485 snprintf(ss_buf, sizeof(ss_buf), "%f%s", (float) ts->ss_criterion.u.f,
1486 ts->ss_state & FIO_SS_PCT ? "%" : "");
1487 json_object_add_value_string(tmp, "criterion", ss_buf);
1488 json_object_add_value_float(tmp, "max_deviation", ts->ss_deviation.u.f);
1489 json_object_add_value_float(tmp, "slope", ts->ss_slope.u.f);
1490
1491 data = json_create_object();
1492 json_object_add_value_object(tmp, "data", data);
1493 bw = json_create_array();
1494 iops = json_create_array();
1495
1496 /*
1497 ** if ss was attained or the buffer is not full,
1498 ** ss->head points to the first element in the list.
1499 ** otherwise it actually points to the second element
1500 ** in the list
1501 */
1502 if ((ts->ss_state & FIO_SS_ATTAINED) || !(ts->ss_state & FIO_SS_BUFFER_FULL))
1503 j = ts->ss_head;
1504 else
1505 j = ts->ss_head == 0 ? ts->ss_dur - 1 : ts->ss_head - 1;
1506 for (l = 0; l < ts->ss_dur; l++) {
1507 k = (j + l) % ts->ss_dur;
1508 json_array_add_value_int(bw, ts->ss_bw_data[k]);
1509 json_array_add_value_int(iops, ts->ss_iops_data[k]);
1510 }
1511 json_object_add_value_int(data, "bw_mean", steadystate_bw_mean(ts));
1512 json_object_add_value_int(data, "iops_mean", steadystate_iops_mean(ts));
1513 json_object_add_value_array(data, "iops", iops);
1514 json_object_add_value_array(data, "bw", bw);
1515 }
1516
1517 return root;
1518}
1519
1520static void show_thread_status_terse(struct thread_stat *ts,
1521 struct group_run_stats *rs,
1522 struct buf_output *out)
1523{
1524 if (terse_version >= 2 && terse_version <= 5)
1525 show_thread_status_terse_all(ts, rs, terse_version, out);
1526 else
1527 log_err("fio: bad terse version!? %d\n", terse_version);
1528}
1529
1530struct json_object *show_thread_status(struct thread_stat *ts,
1531 struct group_run_stats *rs,
1532 struct flist_head *opt_list,
1533 struct buf_output *out)
1534{
1535 struct json_object *ret = NULL;
1536
1537 if (output_format & FIO_OUTPUT_TERSE)
1538 show_thread_status_terse(ts, rs, out);
1539 if (output_format & FIO_OUTPUT_JSON)
1540 ret = show_thread_status_json(ts, rs, opt_list);
1541 if (output_format & FIO_OUTPUT_NORMAL)
1542 show_thread_status_normal(ts, rs, out);
1543
1544 return ret;
1545}
1546
1547static void __sum_stat(struct io_stat *dst, struct io_stat *src, bool first)
1548{
1549 double mean, S;
1550
1551 dst->min_val = min(dst->min_val, src->min_val);
1552 dst->max_val = max(dst->max_val, src->max_val);
1553
1554 /*
1555 * Compute new mean and S after the merge
1556 * <http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
1557 * #Parallel_algorithm>
1558 */
1559 if (first) {
1560 mean = src->mean.u.f;
1561 S = src->S.u.f;
1562 } else {
1563 double delta = src->mean.u.f - dst->mean.u.f;
1564
1565 mean = ((src->mean.u.f * src->samples) +
1566 (dst->mean.u.f * dst->samples)) /
1567 (dst->samples + src->samples);
1568
1569 S = src->S.u.f + dst->S.u.f + pow(delta, 2.0) *
1570 (dst->samples * src->samples) /
1571 (dst->samples + src->samples);
1572 }
1573
1574 dst->samples += src->samples;
1575 dst->mean.u.f = mean;
1576 dst->S.u.f = S;
1577
1578}
1579
1580/*
1581 * We sum two kinds of stats - one that is time based, in which case we
1582 * apply the proper summing technique, and then one that is iops/bw
1583 * numbers. For group_reporting, we should just add those up, not make
1584 * them the mean of everything.
1585 */
1586static void sum_stat(struct io_stat *dst, struct io_stat *src, bool first,
1587 bool pure_sum)
1588{
1589 if (src->samples == 0)
1590 return;
1591
1592 if (!pure_sum) {
1593 __sum_stat(dst, src, first);
1594 return;
1595 }
1596
1597 if (first) {
1598 dst->min_val = src->min_val;
1599 dst->max_val = src->max_val;
1600 dst->samples = src->samples;
1601 dst->mean.u.f = src->mean.u.f;
1602 dst->S.u.f = src->S.u.f;
1603 } else {
1604 dst->min_val += src->min_val;
1605 dst->max_val += src->max_val;
1606 dst->samples += src->samples;
1607 dst->mean.u.f += src->mean.u.f;
1608 dst->S.u.f += src->S.u.f;
1609 }
1610}
1611
1612void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src)
1613{
1614 int i;
1615
1616 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1617 if (dst->max_run[i] < src->max_run[i])
1618 dst->max_run[i] = src->max_run[i];
1619 if (dst->min_run[i] && dst->min_run[i] > src->min_run[i])
1620 dst->min_run[i] = src->min_run[i];
1621 if (dst->max_bw[i] < src->max_bw[i])
1622 dst->max_bw[i] = src->max_bw[i];
1623 if (dst->min_bw[i] && dst->min_bw[i] > src->min_bw[i])
1624 dst->min_bw[i] = src->min_bw[i];
1625
1626 dst->iobytes[i] += src->iobytes[i];
1627 dst->agg[i] += src->agg[i];
1628 }
1629
1630 if (!dst->kb_base)
1631 dst->kb_base = src->kb_base;
1632 if (!dst->unit_base)
1633 dst->unit_base = src->unit_base;
1634 if (!dst->sig_figs)
1635 dst->sig_figs = src->sig_figs;
1636}
1637
1638void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src,
1639 bool first)
1640{
1641 int l, k;
1642
1643 for (l = 0; l < DDIR_RWDIR_CNT; l++) {
1644 if (!dst->unified_rw_rep) {
1645 sum_stat(&dst->clat_stat[l], &src->clat_stat[l], first, false);
1646 sum_stat(&dst->slat_stat[l], &src->slat_stat[l], first, false);
1647 sum_stat(&dst->lat_stat[l], &src->lat_stat[l], first, false);
1648 sum_stat(&dst->bw_stat[l], &src->bw_stat[l], first, true);
1649 sum_stat(&dst->iops_stat[l], &src->iops_stat[l], first, true);
1650
1651 dst->io_bytes[l] += src->io_bytes[l];
1652
1653 if (dst->runtime[l] < src->runtime[l])
1654 dst->runtime[l] = src->runtime[l];
1655 } else {
1656 sum_stat(&dst->clat_stat[0], &src->clat_stat[l], first, false);
1657 sum_stat(&dst->slat_stat[0], &src->slat_stat[l], first, false);
1658 sum_stat(&dst->lat_stat[0], &src->lat_stat[l], first, false);
1659 sum_stat(&dst->bw_stat[0], &src->bw_stat[l], first, true);
1660 sum_stat(&dst->iops_stat[0], &src->iops_stat[l], first, true);
1661
1662 dst->io_bytes[0] += src->io_bytes[l];
1663
1664 if (dst->runtime[0] < src->runtime[l])
1665 dst->runtime[0] = src->runtime[l];
1666
1667 /*
1668 * We're summing to the same destination, so override
1669 * 'first' after the first iteration of the loop
1670 */
1671 first = false;
1672 }
1673 }
1674
1675 sum_stat(&dst->sync_stat, &src->sync_stat, first, false);
1676 dst->usr_time += src->usr_time;
1677 dst->sys_time += src->sys_time;
1678 dst->ctx += src->ctx;
1679 dst->majf += src->majf;
1680 dst->minf += src->minf;
1681
1682 for (k = 0; k < FIO_IO_U_MAP_NR; k++) {
1683 dst->io_u_map[k] += src->io_u_map[k];
1684 dst->io_u_submit[k] += src->io_u_submit[k];
1685 dst->io_u_complete[k] += src->io_u_complete[k];
1686 }
1687
1688 for (k = 0; k < FIO_IO_U_LAT_N_NR; k++)
1689 dst->io_u_lat_n[k] += src->io_u_lat_n[k];
1690 for (k = 0; k < FIO_IO_U_LAT_U_NR; k++)
1691 dst->io_u_lat_u[k] += src->io_u_lat_u[k];
1692 for (k = 0; k < FIO_IO_U_LAT_M_NR; k++)
1693 dst->io_u_lat_m[k] += src->io_u_lat_m[k];
1694
1695 for (k = 0; k < FIO_IO_U_PLAT_NR; k++)
1696 dst->io_u_sync_plat[k] += src->io_u_sync_plat[k];
1697
1698 for (k = 0; k < DDIR_RWDIR_CNT; k++) {
1699 if (!dst->unified_rw_rep) {
1700 dst->total_io_u[k] += src->total_io_u[k];
1701 dst->short_io_u[k] += src->short_io_u[k];
1702 dst->drop_io_u[k] += src->drop_io_u[k];
1703 } else {
1704 dst->total_io_u[0] += src->total_io_u[k];
1705 dst->short_io_u[0] += src->short_io_u[k];
1706 dst->drop_io_u[0] += src->drop_io_u[k];
1707 }
1708 }
1709
1710 dst->total_io_u[DDIR_SYNC] += src->total_io_u[DDIR_SYNC];
1711
1712 for (k = 0; k < DDIR_RWDIR_CNT; k++) {
1713 int m;
1714
1715 for (m = 0; m < FIO_IO_U_PLAT_NR; m++) {
1716 if (!dst->unified_rw_rep)
1717 dst->io_u_plat[k][m] += src->io_u_plat[k][m];
1718 else
1719 dst->io_u_plat[0][m] += src->io_u_plat[k][m];
1720 }
1721 }
1722
1723 dst->total_run_time += src->total_run_time;
1724 dst->total_submit += src->total_submit;
1725 dst->total_complete += src->total_complete;
1726 dst->nr_zone_resets += src->nr_zone_resets;
1727 dst->cachehit += src->cachehit;
1728 dst->cachemiss += src->cachemiss;
1729}
1730
1731void init_group_run_stat(struct group_run_stats *gs)
1732{
1733 int i;
1734 memset(gs, 0, sizeof(*gs));
1735
1736 for (i = 0; i < DDIR_RWDIR_CNT; i++)
1737 gs->min_bw[i] = gs->min_run[i] = ~0UL;
1738}
1739
1740void init_thread_stat(struct thread_stat *ts)
1741{
1742 int j;
1743
1744 memset(ts, 0, sizeof(*ts));
1745
1746 for (j = 0; j < DDIR_RWDIR_CNT; j++) {
1747 ts->lat_stat[j].min_val = -1UL;
1748 ts->clat_stat[j].min_val = -1UL;
1749 ts->slat_stat[j].min_val = -1UL;
1750 ts->bw_stat[j].min_val = -1UL;
1751 ts->iops_stat[j].min_val = -1UL;
1752 }
1753 ts->sync_stat.min_val = -1UL;
1754 ts->groupid = -1;
1755}
1756
1757void __show_run_stats(void)
1758{
1759 struct group_run_stats *runstats, *rs;
1760 struct thread_data *td;
1761 struct thread_stat *threadstats, *ts;
1762 int i, j, k, nr_ts, last_ts, idx;
1763 bool kb_base_warned = false;
1764 bool unit_base_warned = false;
1765 struct json_object *root = NULL;
1766 struct json_array *array = NULL;
1767 struct buf_output output[FIO_OUTPUT_NR];
1768 struct flist_head **opt_lists;
1769
1770 runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
1771
1772 for (i = 0; i < groupid + 1; i++)
1773 init_group_run_stat(&runstats[i]);
1774
1775 /*
1776 * find out how many threads stats we need. if group reporting isn't
1777 * enabled, it's one-per-td.
1778 */
1779 nr_ts = 0;
1780 last_ts = -1;
1781 for_each_td(td, i) {
1782 if (!td->o.group_reporting) {
1783 nr_ts++;
1784 continue;
1785 }
1786 if (last_ts == td->groupid)
1787 continue;
1788 if (!td->o.stats)
1789 continue;
1790
1791 last_ts = td->groupid;
1792 nr_ts++;
1793 }
1794
1795 threadstats = malloc(nr_ts * sizeof(struct thread_stat));
1796 opt_lists = malloc(nr_ts * sizeof(struct flist_head *));
1797
1798 for (i = 0; i < nr_ts; i++) {
1799 init_thread_stat(&threadstats[i]);
1800 opt_lists[i] = NULL;
1801 }
1802
1803 j = 0;
1804 last_ts = -1;
1805 idx = 0;
1806 for_each_td(td, i) {
1807 if (!td->o.stats)
1808 continue;
1809 if (idx && (!td->o.group_reporting ||
1810 (td->o.group_reporting && last_ts != td->groupid))) {
1811 idx = 0;
1812 j++;
1813 }
1814
1815 last_ts = td->groupid;
1816
1817 ts = &threadstats[j];
1818
1819 ts->clat_percentiles = td->o.clat_percentiles;
1820 ts->lat_percentiles = td->o.lat_percentiles;
1821 ts->percentile_precision = td->o.percentile_precision;
1822 memcpy(ts->percentile_list, td->o.percentile_list, sizeof(td->o.percentile_list));
1823 opt_lists[j] = &td->opt_list;
1824
1825 idx++;
1826 ts->members++;
1827
1828 if (ts->groupid == -1) {
1829 /*
1830 * These are per-group shared already
1831 */
1832 snprintf(ts->name, sizeof(ts->name), "%s", td->o.name);
1833 if (td->o.description)
1834 snprintf(ts->description,
1835 sizeof(ts->description), "%s",
1836 td->o.description);
1837 else
1838 memset(ts->description, 0, FIO_JOBDESC_SIZE);
1839
1840 /*
1841 * If multiple entries in this group, this is
1842 * the first member.
1843 */
1844 ts->thread_number = td->thread_number;
1845 ts->groupid = td->groupid;
1846
1847 /*
1848 * first pid in group, not very useful...
1849 */
1850 ts->pid = td->pid;
1851
1852 ts->kb_base = td->o.kb_base;
1853 ts->unit_base = td->o.unit_base;
1854 ts->sig_figs = td->o.sig_figs;
1855 ts->unified_rw_rep = td->o.unified_rw_rep;
1856 } else if (ts->kb_base != td->o.kb_base && !kb_base_warned) {
1857 log_info("fio: kb_base differs for jobs in group, using"
1858 " %u as the base\n", ts->kb_base);
1859 kb_base_warned = true;
1860 } else if (ts->unit_base != td->o.unit_base && !unit_base_warned) {
1861 log_info("fio: unit_base differs for jobs in group, using"
1862 " %u as the base\n", ts->unit_base);
1863 unit_base_warned = true;
1864 }
1865
1866 ts->continue_on_error = td->o.continue_on_error;
1867 ts->total_err_count += td->total_err_count;
1868 ts->first_error = td->first_error;
1869 if (!ts->error) {
1870 if (!td->error && td->o.continue_on_error &&
1871 td->first_error) {
1872 ts->error = td->first_error;
1873 snprintf(ts->verror, sizeof(ts->verror), "%s",
1874 td->verror);
1875 } else if (td->error) {
1876 ts->error = td->error;
1877 snprintf(ts->verror, sizeof(ts->verror), "%s",
1878 td->verror);
1879 }
1880 }
1881
1882 ts->latency_depth = td->latency_qd;
1883 ts->latency_target = td->o.latency_target;
1884 ts->latency_percentile = td->o.latency_percentile;
1885 ts->latency_window = td->o.latency_window;
1886
1887 ts->nr_block_infos = td->ts.nr_block_infos;
1888 for (k = 0; k < ts->nr_block_infos; k++)
1889 ts->block_infos[k] = td->ts.block_infos[k];
1890
1891 sum_thread_stats(ts, &td->ts, idx == 1);
1892
1893 if (td->o.ss_dur) {
1894 ts->ss_state = td->ss.state;
1895 ts->ss_dur = td->ss.dur;
1896 ts->ss_head = td->ss.head;
1897 ts->ss_bw_data = td->ss.bw_data;
1898 ts->ss_iops_data = td->ss.iops_data;
1899 ts->ss_limit.u.f = td->ss.limit;
1900 ts->ss_slope.u.f = td->ss.slope;
1901 ts->ss_deviation.u.f = td->ss.deviation;
1902 ts->ss_criterion.u.f = td->ss.criterion;
1903 }
1904 else
1905 ts->ss_dur = ts->ss_state = 0;
1906 }
1907
1908 for (i = 0; i < nr_ts; i++) {
1909 unsigned long long bw;
1910
1911 ts = &threadstats[i];
1912 if (ts->groupid == -1)
1913 continue;
1914 rs = &runstats[ts->groupid];
1915 rs->kb_base = ts->kb_base;
1916 rs->unit_base = ts->unit_base;
1917 rs->sig_figs = ts->sig_figs;
1918 rs->unified_rw_rep += ts->unified_rw_rep;
1919
1920 for (j = 0; j < DDIR_RWDIR_CNT; j++) {
1921 if (!ts->runtime[j])
1922 continue;
1923 if (ts->runtime[j] < rs->min_run[j] || !rs->min_run[j])
1924 rs->min_run[j] = ts->runtime[j];
1925 if (ts->runtime[j] > rs->max_run[j])
1926 rs->max_run[j] = ts->runtime[j];
1927
1928 bw = 0;
1929 if (ts->runtime[j])
1930 bw = ts->io_bytes[j] * 1000 / ts->runtime[j];
1931 if (bw < rs->min_bw[j])
1932 rs->min_bw[j] = bw;
1933 if (bw > rs->max_bw[j])
1934 rs->max_bw[j] = bw;
1935
1936 rs->iobytes[j] += ts->io_bytes[j];
1937 }
1938 }
1939
1940 for (i = 0; i < groupid + 1; i++) {
1941 int ddir;
1942
1943 rs = &runstats[i];
1944
1945 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
1946 if (rs->max_run[ddir])
1947 rs->agg[ddir] = (rs->iobytes[ddir] * 1000) /
1948 rs->max_run[ddir];
1949 }
1950 }
1951
1952 for (i = 0; i < FIO_OUTPUT_NR; i++)
1953 buf_output_init(&output[i]);
1954
1955 /*
1956 * don't overwrite last signal output
1957 */
1958 if (output_format & FIO_OUTPUT_NORMAL)
1959 log_buf(&output[__FIO_OUTPUT_NORMAL], "\n");
1960 if (output_format & FIO_OUTPUT_JSON) {
1961 struct thread_data *global;
1962 char time_buf[32];
1963 struct timeval now;
1964 unsigned long long ms_since_epoch;
1965 time_t tv_sec;
1966
1967 gettimeofday(&now, NULL);
1968 ms_since_epoch = (unsigned long long)(now.tv_sec) * 1000 +
1969 (unsigned long long)(now.tv_usec) / 1000;
1970
1971 tv_sec = now.tv_sec;
1972 os_ctime_r(&tv_sec, time_buf, sizeof(time_buf));
1973 if (time_buf[strlen(time_buf) - 1] == '\n')
1974 time_buf[strlen(time_buf) - 1] = '\0';
1975
1976 root = json_create_object();
1977 json_object_add_value_string(root, "fio version", fio_version_string);
1978 json_object_add_value_int(root, "timestamp", now.tv_sec);
1979 json_object_add_value_int(root, "timestamp_ms", ms_since_epoch);
1980 json_object_add_value_string(root, "time", time_buf);
1981 global = get_global_options();
1982 json_add_job_opts(root, "global options", &global->opt_list);
1983 array = json_create_array();
1984 json_object_add_value_array(root, "jobs", array);
1985 }
1986
1987 if (is_backend)
1988 fio_server_send_job_options(&get_global_options()->opt_list, -1U);
1989
1990 for (i = 0; i < nr_ts; i++) {
1991 ts = &threadstats[i];
1992 rs = &runstats[ts->groupid];
1993
1994 if (is_backend) {
1995 fio_server_send_job_options(opt_lists[i], i);
1996 fio_server_send_ts(ts, rs);
1997 } else {
1998 if (output_format & FIO_OUTPUT_TERSE)
1999 show_thread_status_terse(ts, rs, &output[__FIO_OUTPUT_TERSE]);
2000 if (output_format & FIO_OUTPUT_JSON) {
2001 struct json_object *tmp = show_thread_status_json(ts, rs, opt_lists[i]);
2002 json_array_add_value_object(array, tmp);
2003 }
2004 if (output_format & FIO_OUTPUT_NORMAL)
2005 show_thread_status_normal(ts, rs, &output[__FIO_OUTPUT_NORMAL]);
2006 }
2007 }
2008 if (!is_backend && (output_format & FIO_OUTPUT_JSON)) {
2009 /* disk util stats, if any */
2010 show_disk_util(1, root, &output[__FIO_OUTPUT_JSON]);
2011
2012 show_idle_prof_stats(FIO_OUTPUT_JSON, root, &output[__FIO_OUTPUT_JSON]);
2013
2014 json_print_object(root, &output[__FIO_OUTPUT_JSON]);
2015 log_buf(&output[__FIO_OUTPUT_JSON], "\n");
2016 json_free_object(root);
2017 }
2018
2019 for (i = 0; i < groupid + 1; i++) {
2020 rs = &runstats[i];
2021
2022 rs->groupid = i;
2023 if (is_backend)
2024 fio_server_send_gs(rs);
2025 else if (output_format & FIO_OUTPUT_NORMAL)
2026 show_group_stats(rs, &output[__FIO_OUTPUT_NORMAL]);
2027 }
2028
2029 if (is_backend)
2030 fio_server_send_du();
2031 else if (output_format & FIO_OUTPUT_NORMAL) {
2032 show_disk_util(0, NULL, &output[__FIO_OUTPUT_NORMAL]);
2033 show_idle_prof_stats(FIO_OUTPUT_NORMAL, NULL, &output[__FIO_OUTPUT_NORMAL]);
2034 }
2035
2036 for (i = 0; i < FIO_OUTPUT_NR; i++) {
2037 struct buf_output *out = &output[i];
2038
2039 log_info_buf(out->buf, out->buflen);
2040 buf_output_free(out);
2041 }
2042
2043 fio_idle_prof_cleanup();
2044
2045 log_info_flush();
2046 free(runstats);
2047 free(threadstats);
2048 free(opt_lists);
2049}
2050
2051void __show_running_run_stats(void)
2052{
2053 struct thread_data *td;
2054 unsigned long long *rt;
2055 struct timespec ts;
2056 int i;
2057
2058 fio_sem_down(stat_sem);
2059
2060 rt = malloc(thread_number * sizeof(unsigned long long));
2061 fio_gettime(&ts, NULL);
2062
2063 for_each_td(td, i) {
2064 td->update_rusage = 1;
2065 td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
2066 td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
2067 td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
2068 td->ts.total_run_time = mtime_since(&td->epoch, &ts);
2069
2070 rt[i] = mtime_since(&td->start, &ts);
2071 if (td_read(td) && td->ts.io_bytes[DDIR_READ])
2072 td->ts.runtime[DDIR_READ] += rt[i];
2073 if (td_write(td) && td->ts.io_bytes[DDIR_WRITE])
2074 td->ts.runtime[DDIR_WRITE] += rt[i];
2075 if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM])
2076 td->ts.runtime[DDIR_TRIM] += rt[i];
2077 }
2078
2079 for_each_td(td, i) {
2080 if (td->runstate >= TD_EXITED)
2081 continue;
2082 if (td->rusage_sem) {
2083 td->update_rusage = 1;
2084 fio_sem_down(td->rusage_sem);
2085 }
2086 td->update_rusage = 0;
2087 }
2088
2089 __show_run_stats();
2090
2091 for_each_td(td, i) {
2092 if (td_read(td) && td->ts.io_bytes[DDIR_READ])
2093 td->ts.runtime[DDIR_READ] -= rt[i];
2094 if (td_write(td) && td->ts.io_bytes[DDIR_WRITE])
2095 td->ts.runtime[DDIR_WRITE] -= rt[i];
2096 if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM])
2097 td->ts.runtime[DDIR_TRIM] -= rt[i];
2098 }
2099
2100 free(rt);
2101 fio_sem_up(stat_sem);
2102}
2103
2104static bool status_interval_init;
2105static struct timespec status_time;
2106static bool status_file_disabled;
2107
2108#define FIO_STATUS_FILE "fio-dump-status"
2109
2110static int check_status_file(void)
2111{
2112 struct stat sb;
2113 const char *temp_dir;
2114 char fio_status_file_path[PATH_MAX];
2115
2116 if (status_file_disabled)
2117 return 0;
2118
2119 temp_dir = getenv("TMPDIR");
2120 if (temp_dir == NULL) {
2121 temp_dir = getenv("TEMP");
2122 if (temp_dir && strlen(temp_dir) >= PATH_MAX)
2123 temp_dir = NULL;
2124 }
2125 if (temp_dir == NULL)
2126 temp_dir = "/tmp";
2127#ifdef __COVERITY__
2128 __coverity_tainted_data_sanitize__(temp_dir);
2129#endif
2130
2131 snprintf(fio_status_file_path, sizeof(fio_status_file_path), "%s/%s", temp_dir, FIO_STATUS_FILE);
2132
2133 if (stat(fio_status_file_path, &sb))
2134 return 0;
2135
2136 if (unlink(fio_status_file_path) < 0) {
2137 log_err("fio: failed to unlink %s: %s\n", fio_status_file_path,
2138 strerror(errno));
2139 log_err("fio: disabling status file updates\n");
2140 status_file_disabled = true;
2141 }
2142
2143 return 1;
2144}
2145
2146void check_for_running_stats(void)
2147{
2148 if (status_interval) {
2149 if (!status_interval_init) {
2150 fio_gettime(&status_time, NULL);
2151 status_interval_init = true;
2152 } else if (mtime_since_now(&status_time) >= status_interval) {
2153 show_running_run_stats();
2154 fio_gettime(&status_time, NULL);
2155 return;
2156 }
2157 }
2158 if (check_status_file()) {
2159 show_running_run_stats();
2160 return;
2161 }
2162}
2163
2164static inline void add_stat_sample(struct io_stat *is, unsigned long long data)
2165{
2166 double val = data;
2167 double delta;
2168
2169 if (data > is->max_val)
2170 is->max_val = data;
2171 if (data < is->min_val)
2172 is->min_val = data;
2173
2174 delta = val - is->mean.u.f;
2175 if (delta) {
2176 is->mean.u.f += delta / (is->samples + 1.0);
2177 is->S.u.f += delta * (val - is->mean.u.f);
2178 }
2179
2180 is->samples++;
2181}
2182
2183/*
2184 * Return a struct io_logs, which is added to the tail of the log
2185 * list for 'iolog'.
2186 */
2187static struct io_logs *get_new_log(struct io_log *iolog)
2188{
2189 size_t new_size, new_samples;
2190 struct io_logs *cur_log;
2191
2192 /*
2193 * Cap the size at MAX_LOG_ENTRIES, so we don't keep doubling
2194 * forever
2195 */
2196 if (!iolog->cur_log_max)
2197 new_samples = DEF_LOG_ENTRIES;
2198 else {
2199 new_samples = iolog->cur_log_max * 2;
2200 if (new_samples > MAX_LOG_ENTRIES)
2201 new_samples = MAX_LOG_ENTRIES;
2202 }
2203
2204 new_size = new_samples * log_entry_sz(iolog);
2205
2206 cur_log = smalloc(sizeof(*cur_log));
2207 if (cur_log) {
2208 INIT_FLIST_HEAD(&cur_log->list);
2209 cur_log->log = malloc(new_size);
2210 if (cur_log->log) {
2211 cur_log->nr_samples = 0;
2212 cur_log->max_samples = new_samples;
2213 flist_add_tail(&cur_log->list, &iolog->io_logs);
2214 iolog->cur_log_max = new_samples;
2215 return cur_log;
2216 }
2217 sfree(cur_log);
2218 }
2219
2220 return NULL;
2221}
2222
2223/*
2224 * Add and return a new log chunk, or return current log if big enough
2225 */
2226static struct io_logs *regrow_log(struct io_log *iolog)
2227{
2228 struct io_logs *cur_log;
2229 int i;
2230
2231 if (!iolog || iolog->disabled)
2232 goto disable;
2233
2234 cur_log = iolog_cur_log(iolog);
2235 if (!cur_log) {
2236 cur_log = get_new_log(iolog);
2237 if (!cur_log)
2238 return NULL;
2239 }
2240
2241 if (cur_log->nr_samples < cur_log->max_samples)
2242 return cur_log;
2243
2244 /*
2245 * No room for a new sample. If we're compressing on the fly, flush
2246 * out the current chunk
2247 */
2248 if (iolog->log_gz) {
2249 if (iolog_cur_flush(iolog, cur_log)) {
2250 log_err("fio: failed flushing iolog! Will stop logging.\n");
2251 return NULL;
2252 }
2253 }
2254
2255 /*
2256 * Get a new log array, and add to our list
2257 */
2258 cur_log = get_new_log(iolog);
2259 if (!cur_log) {
2260 log_err("fio: failed extending iolog! Will stop logging.\n");
2261 return NULL;
2262 }
2263
2264 if (!iolog->pending || !iolog->pending->nr_samples)
2265 return cur_log;
2266
2267 /*
2268 * Flush pending items to new log
2269 */
2270 for (i = 0; i < iolog->pending->nr_samples; i++) {
2271 struct io_sample *src, *dst;
2272
2273 src = get_sample(iolog, iolog->pending, i);
2274 dst = get_sample(iolog, cur_log, i);
2275 memcpy(dst, src, log_entry_sz(iolog));
2276 }
2277 cur_log->nr_samples = iolog->pending->nr_samples;
2278
2279 iolog->pending->nr_samples = 0;
2280 return cur_log;
2281disable:
2282 if (iolog)
2283 iolog->disabled = true;
2284 return NULL;
2285}
2286
2287void regrow_logs(struct thread_data *td)
2288{
2289 regrow_log(td->slat_log);
2290 regrow_log(td->clat_log);
2291 regrow_log(td->clat_hist_log);
2292 regrow_log(td->lat_log);
2293 regrow_log(td->bw_log);
2294 regrow_log(td->iops_log);
2295 td->flags &= ~TD_F_REGROW_LOGS;
2296}
2297
2298static struct io_logs *get_cur_log(struct io_log *iolog)
2299{
2300 struct io_logs *cur_log;
2301
2302 cur_log = iolog_cur_log(iolog);
2303 if (!cur_log) {
2304 cur_log = get_new_log(iolog);
2305 if (!cur_log)
2306 return NULL;
2307 }
2308
2309 if (cur_log->nr_samples < cur_log->max_samples)
2310 return cur_log;
2311
2312 /*
2313 * Out of space. If we're in IO offload mode, or we're not doing
2314 * per unit logging (hence logging happens outside of the IO thread
2315 * as well), add a new log chunk inline. If we're doing inline
2316 * submissions, flag 'td' as needing a log regrow and we'll take
2317 * care of it on the submission side.
2318 */
2319 if ((iolog->td && iolog->td->o.io_submit_mode == IO_MODE_OFFLOAD) ||
2320 !per_unit_log(iolog))
2321 return regrow_log(iolog);
2322
2323 if (iolog->td)
2324 iolog->td->flags |= TD_F_REGROW_LOGS;
2325 if (iolog->pending)
2326 assert(iolog->pending->nr_samples < iolog->pending->max_samples);
2327 return iolog->pending;
2328}
2329
2330static void __add_log_sample(struct io_log *iolog, union io_sample_data data,
2331 enum fio_ddir ddir, unsigned long long bs,
2332 unsigned long t, uint64_t offset)
2333{
2334 struct io_logs *cur_log;
2335
2336 if (iolog->disabled)
2337 return;
2338 if (flist_empty(&iolog->io_logs))
2339 iolog->avg_last[ddir] = t;
2340
2341 cur_log = get_cur_log(iolog);
2342 if (cur_log) {
2343 struct io_sample *s;
2344
2345 s = get_sample(iolog, cur_log, cur_log->nr_samples);
2346
2347 s->data = data;
2348 s->time = t + (iolog->td ? iolog->td->unix_epoch : 0);
2349 io_sample_set_ddir(iolog, s, ddir);
2350 s->bs = bs;
2351
2352 if (iolog->log_offset) {
2353 struct io_sample_offset *so = (void *) s;
2354
2355 so->offset = offset;
2356 }
2357
2358 cur_log->nr_samples++;
2359 return;
2360 }
2361
2362 iolog->disabled = true;
2363}
2364
2365static inline void reset_io_stat(struct io_stat *ios)
2366{
2367 ios->min_val = -1ULL;
2368 ios->max_val = ios->samples = 0;
2369 ios->mean.u.f = ios->S.u.f = 0;
2370}
2371
2372void reset_io_stats(struct thread_data *td)
2373{
2374 struct thread_stat *ts = &td->ts;
2375 int i, j;
2376
2377 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
2378 reset_io_stat(&ts->clat_stat[i]);
2379 reset_io_stat(&ts->slat_stat[i]);
2380 reset_io_stat(&ts->lat_stat[i]);
2381 reset_io_stat(&ts->bw_stat[i]);
2382 reset_io_stat(&ts->iops_stat[i]);
2383
2384 ts->io_bytes[i] = 0;
2385 ts->runtime[i] = 0;
2386 ts->total_io_u[i] = 0;
2387 ts->short_io_u[i] = 0;
2388 ts->drop_io_u[i] = 0;
2389
2390 for (j = 0; j < FIO_IO_U_PLAT_NR; j++) {
2391 ts->io_u_plat[i][j] = 0;
2392 if (!i)
2393 ts->io_u_sync_plat[j] = 0;
2394 }
2395 }
2396
2397 ts->total_io_u[DDIR_SYNC] = 0;
2398
2399 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
2400 ts->io_u_map[i] = 0;
2401 ts->io_u_submit[i] = 0;
2402 ts->io_u_complete[i] = 0;
2403 }
2404
2405 for (i = 0; i < FIO_IO_U_LAT_N_NR; i++)
2406 ts->io_u_lat_n[i] = 0;
2407 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
2408 ts->io_u_lat_u[i] = 0;
2409 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
2410 ts->io_u_lat_m[i] = 0;
2411
2412 ts->total_submit = 0;
2413 ts->total_complete = 0;
2414 ts->nr_zone_resets = 0;
2415 ts->cachehit = ts->cachemiss = 0;
2416}
2417
2418static void __add_stat_to_log(struct io_log *iolog, enum fio_ddir ddir,
2419 unsigned long elapsed, bool log_max)
2420{
2421 /*
2422 * Note an entry in the log. Use the mean from the logged samples,
2423 * making sure to properly round up. Only write a log entry if we
2424 * had actual samples done.
2425 */
2426 if (iolog->avg_window[ddir].samples) {
2427 union io_sample_data data;
2428
2429 if (log_max)
2430 data.val = iolog->avg_window[ddir].max_val;
2431 else
2432 data.val = iolog->avg_window[ddir].mean.u.f + 0.50;
2433
2434 __add_log_sample(iolog, data, ddir, 0, elapsed, 0);
2435 }
2436
2437 reset_io_stat(&iolog->avg_window[ddir]);
2438}
2439
2440static void _add_stat_to_log(struct io_log *iolog, unsigned long elapsed,
2441 bool log_max)
2442{
2443 int ddir;
2444
2445 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
2446 __add_stat_to_log(iolog, ddir, elapsed, log_max);
2447}
2448
2449static unsigned long add_log_sample(struct thread_data *td,
2450 struct io_log *iolog,
2451 union io_sample_data data,
2452 enum fio_ddir ddir, unsigned long long bs,
2453 uint64_t offset)
2454{
2455 unsigned long elapsed, this_window;
2456
2457 if (!ddir_rw(ddir))
2458 return 0;
2459
2460 elapsed = mtime_since_now(&td->epoch);
2461
2462 /*
2463 * If no time averaging, just add the log sample.
2464 */
2465 if (!iolog->avg_msec) {
2466 __add_log_sample(iolog, data, ddir, bs, elapsed, offset);
2467 return 0;
2468 }
2469
2470 /*
2471 * Add the sample. If the time period has passed, then
2472 * add that entry to the log and clear.
2473 */
2474 add_stat_sample(&iolog->avg_window[ddir], data.val);
2475
2476 /*
2477 * If period hasn't passed, adding the above sample is all we
2478 * need to do.
2479 */
2480 this_window = elapsed - iolog->avg_last[ddir];
2481 if (elapsed < iolog->avg_last[ddir])
2482 return iolog->avg_last[ddir] - elapsed;
2483 else if (this_window < iolog->avg_msec) {
2484 unsigned long diff = iolog->avg_msec - this_window;
2485
2486 if (inline_log(iolog) || diff > LOG_MSEC_SLACK)
2487 return diff;
2488 }
2489
2490 __add_stat_to_log(iolog, ddir, elapsed, td->o.log_max != 0);
2491
2492 iolog->avg_last[ddir] = elapsed - (this_window - iolog->avg_msec);
2493 return iolog->avg_msec;
2494}
2495
2496void finalize_logs(struct thread_data *td, bool unit_logs)
2497{
2498 unsigned long elapsed;
2499
2500 elapsed = mtime_since_now(&td->epoch);
2501
2502 if (td->clat_log && unit_logs)
2503 _add_stat_to_log(td->clat_log, elapsed, td->o.log_max != 0);
2504 if (td->slat_log && unit_logs)
2505 _add_stat_to_log(td->slat_log, elapsed, td->o.log_max != 0);
2506 if (td->lat_log && unit_logs)
2507 _add_stat_to_log(td->lat_log, elapsed, td->o.log_max != 0);
2508 if (td->bw_log && (unit_logs == per_unit_log(td->bw_log)))
2509 _add_stat_to_log(td->bw_log, elapsed, td->o.log_max != 0);
2510 if (td->iops_log && (unit_logs == per_unit_log(td->iops_log)))
2511 _add_stat_to_log(td->iops_log, elapsed, td->o.log_max != 0);
2512}
2513
2514void add_agg_sample(union io_sample_data data, enum fio_ddir ddir, unsigned long long bs)
2515{
2516 struct io_log *iolog;
2517
2518 if (!ddir_rw(ddir))
2519 return;
2520
2521 iolog = agg_io_log[ddir];
2522 __add_log_sample(iolog, data, ddir, bs, mtime_since_genesis(), 0);
2523}
2524
2525void add_sync_clat_sample(struct thread_stat *ts, unsigned long long nsec)
2526{
2527 unsigned int idx = plat_val_to_idx(nsec);
2528 assert(idx < FIO_IO_U_PLAT_NR);
2529
2530 ts->io_u_sync_plat[idx]++;
2531 add_stat_sample(&ts->sync_stat, nsec);
2532}
2533
2534static void add_clat_percentile_sample(struct thread_stat *ts,
2535 unsigned long long nsec, enum fio_ddir ddir)
2536{
2537 unsigned int idx = plat_val_to_idx(nsec);
2538 assert(idx < FIO_IO_U_PLAT_NR);
2539
2540 ts->io_u_plat[ddir][idx]++;
2541}
2542
2543void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
2544 unsigned long long nsec, unsigned long long bs,
2545 uint64_t offset)
2546{
2547 const bool needs_lock = td_async_processing(td);
2548 unsigned long elapsed, this_window;
2549 struct thread_stat *ts = &td->ts;
2550 struct io_log *iolog = td->clat_hist_log;
2551
2552 if (needs_lock)
2553 __td_io_u_lock(td);
2554
2555 add_stat_sample(&ts->clat_stat[ddir], nsec);
2556
2557 if (td->clat_log)
2558 add_log_sample(td, td->clat_log, sample_val(nsec), ddir, bs,
2559 offset);
2560
2561 if (ts->clat_percentiles)
2562 add_clat_percentile_sample(ts, nsec, ddir);
2563
2564 if (iolog && iolog->hist_msec) {
2565 struct io_hist *hw = &iolog->hist_window[ddir];
2566
2567 hw->samples++;
2568 elapsed = mtime_since_now(&td->epoch);
2569 if (!hw->hist_last)
2570 hw->hist_last = elapsed;
2571 this_window = elapsed - hw->hist_last;
2572
2573 if (this_window >= iolog->hist_msec) {
2574 uint64_t *io_u_plat;
2575 struct io_u_plat_entry *dst;
2576
2577 /*
2578 * Make a byte-for-byte copy of the latency histogram
2579 * stored in td->ts.io_u_plat[ddir], recording it in a
2580 * log sample. Note that the matching call to free() is
2581 * located in iolog.c after printing this sample to the
2582 * log file.
2583 */
2584 io_u_plat = (uint64_t *) td->ts.io_u_plat[ddir];
2585 dst = malloc(sizeof(struct io_u_plat_entry));
2586 memcpy(&(dst->io_u_plat), io_u_plat,
2587 FIO_IO_U_PLAT_NR * sizeof(uint64_t));
2588 flist_add(&dst->list, &hw->list);
2589 __add_log_sample(iolog, sample_plat(dst), ddir, bs,
2590 elapsed, offset);
2591
2592 /*
2593 * Update the last time we recorded as being now, minus
2594 * any drift in time we encountered before actually
2595 * making the record.
2596 */
2597 hw->hist_last = elapsed - (this_window - iolog->hist_msec);
2598 hw->samples = 0;
2599 }
2600 }
2601
2602 if (needs_lock)
2603 __td_io_u_unlock(td);
2604}
2605
2606void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
2607 unsigned long usec, unsigned long long bs, uint64_t offset)
2608{
2609 const bool needs_lock = td_async_processing(td);
2610 struct thread_stat *ts = &td->ts;
2611
2612 if (!ddir_rw(ddir))
2613 return;
2614
2615 if (needs_lock)
2616 __td_io_u_lock(td);
2617
2618 add_stat_sample(&ts->slat_stat[ddir], usec);
2619
2620 if (td->slat_log)
2621 add_log_sample(td, td->slat_log, sample_val(usec), ddir, bs, offset);
2622
2623 if (needs_lock)
2624 __td_io_u_unlock(td);
2625}
2626
2627void add_lat_sample(struct thread_data *td, enum fio_ddir ddir,
2628 unsigned long long nsec, unsigned long long bs,
2629 uint64_t offset)
2630{
2631 const bool needs_lock = td_async_processing(td);
2632 struct thread_stat *ts = &td->ts;
2633
2634 if (!ddir_rw(ddir))
2635 return;
2636
2637 if (needs_lock)
2638 __td_io_u_lock(td);
2639
2640 add_stat_sample(&ts->lat_stat[ddir], nsec);
2641
2642 if (td->lat_log)
2643 add_log_sample(td, td->lat_log, sample_val(nsec), ddir, bs,
2644 offset);
2645
2646 if (ts->lat_percentiles)
2647 add_clat_percentile_sample(ts, nsec, ddir);
2648
2649 if (needs_lock)
2650 __td_io_u_unlock(td);
2651}
2652
2653void add_bw_sample(struct thread_data *td, struct io_u *io_u,
2654 unsigned int bytes, unsigned long long spent)
2655{
2656 const bool needs_lock = td_async_processing(td);
2657 struct thread_stat *ts = &td->ts;
2658 unsigned long rate;
2659
2660 if (spent)
2661 rate = (unsigned long) (bytes * 1000000ULL / spent);
2662 else
2663 rate = 0;
2664
2665 if (needs_lock)
2666 __td_io_u_lock(td);
2667
2668 add_stat_sample(&ts->bw_stat[io_u->ddir], rate);
2669
2670 if (td->bw_log)
2671 add_log_sample(td, td->bw_log, sample_val(rate), io_u->ddir,
2672 bytes, io_u->offset);
2673
2674 td->stat_io_bytes[io_u->ddir] = td->this_io_bytes[io_u->ddir];
2675
2676 if (needs_lock)
2677 __td_io_u_unlock(td);
2678}
2679
2680static int __add_samples(struct thread_data *td, struct timespec *parent_tv,
2681 struct timespec *t, unsigned int avg_time,
2682 uint64_t *this_io_bytes, uint64_t *stat_io_bytes,
2683 struct io_stat *stat, struct io_log *log,
2684 bool is_kb)
2685{
2686 const bool needs_lock = td_async_processing(td);
2687 unsigned long spent, rate;
2688 enum fio_ddir ddir;
2689 unsigned long next, next_log;
2690
2691 next_log = avg_time;
2692
2693 spent = mtime_since(parent_tv, t);
2694 if (spent < avg_time && avg_time - spent >= LOG_MSEC_SLACK)
2695 return avg_time - spent;
2696
2697 if (needs_lock)
2698 __td_io_u_lock(td);
2699
2700 /*
2701 * Compute both read and write rates for the interval.
2702 */
2703 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
2704 uint64_t delta;
2705
2706 delta = this_io_bytes[ddir] - stat_io_bytes[ddir];
2707 if (!delta)
2708 continue; /* No entries for interval */
2709
2710 if (spent) {
2711 if (is_kb)
2712 rate = delta * 1000 / spent / 1024; /* KiB/s */
2713 else
2714 rate = (delta * 1000) / spent;
2715 } else
2716 rate = 0;
2717
2718 add_stat_sample(&stat[ddir], rate);
2719
2720 if (log) {
2721 unsigned long long bs = 0;
2722
2723 if (td->o.min_bs[ddir] == td->o.max_bs[ddir])
2724 bs = td->o.min_bs[ddir];
2725
2726 next = add_log_sample(td, log, sample_val(rate), ddir, bs, 0);
2727 next_log = min(next_log, next);
2728 }
2729
2730 stat_io_bytes[ddir] = this_io_bytes[ddir];
2731 }
2732
2733 timespec_add_msec(parent_tv, avg_time);
2734
2735 if (needs_lock)
2736 __td_io_u_unlock(td);
2737
2738 if (spent <= avg_time)
2739 next = avg_time;
2740 else
2741 next = avg_time - (1 + spent - avg_time);
2742
2743 return min(next, next_log);
2744}
2745
2746static int add_bw_samples(struct thread_data *td, struct timespec *t)
2747{
2748 return __add_samples(td, &td->bw_sample_time, t, td->o.bw_avg_time,
2749 td->this_io_bytes, td->stat_io_bytes,
2750 td->ts.bw_stat, td->bw_log, true);
2751}
2752
2753void add_iops_sample(struct thread_data *td, struct io_u *io_u,
2754 unsigned int bytes)
2755{
2756 const bool needs_lock = td_async_processing(td);
2757 struct thread_stat *ts = &td->ts;
2758
2759 if (needs_lock)
2760 __td_io_u_lock(td);
2761
2762 add_stat_sample(&ts->iops_stat[io_u->ddir], 1);
2763
2764 if (td->iops_log)
2765 add_log_sample(td, td->iops_log, sample_val(1), io_u->ddir,
2766 bytes, io_u->offset);
2767
2768 td->stat_io_blocks[io_u->ddir] = td->this_io_blocks[io_u->ddir];
2769
2770 if (needs_lock)
2771 __td_io_u_unlock(td);
2772}
2773
2774static int add_iops_samples(struct thread_data *td, struct timespec *t)
2775{
2776 return __add_samples(td, &td->iops_sample_time, t, td->o.iops_avg_time,
2777 td->this_io_blocks, td->stat_io_blocks,
2778 td->ts.iops_stat, td->iops_log, false);
2779}
2780
2781/*
2782 * Returns msecs to next event
2783 */
2784int calc_log_samples(void)
2785{
2786 struct thread_data *td;
2787 unsigned int next = ~0U, tmp;
2788 struct timespec now;
2789 int i;
2790
2791 fio_gettime(&now, NULL);
2792
2793 for_each_td(td, i) {
2794 if (!td->o.stats)
2795 continue;
2796 if (in_ramp_time(td) ||
2797 !(td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING)) {
2798 next = min(td->o.iops_avg_time, td->o.bw_avg_time);
2799 continue;
2800 }
2801 if (!td->bw_log ||
2802 (td->bw_log && !per_unit_log(td->bw_log))) {
2803 tmp = add_bw_samples(td, &now);
2804 if (tmp < next)
2805 next = tmp;
2806 }
2807 if (!td->iops_log ||
2808 (td->iops_log && !per_unit_log(td->iops_log))) {
2809 tmp = add_iops_samples(td, &now);
2810 if (tmp < next)
2811 next = tmp;
2812 }
2813 }
2814
2815 return next == ~0U ? 0 : next;
2816}
2817
2818void stat_init(void)
2819{
2820 stat_sem = fio_sem_init(FIO_SEM_UNLOCKED);
2821}
2822
2823void stat_exit(void)
2824{
2825 /*
2826 * When we have the mutex, we know out-of-band access to it
2827 * have ended.
2828 */
2829 fio_sem_down(stat_sem);
2830 fio_sem_remove(stat_sem);
2831}
2832
2833/*
2834 * Called from signal handler. Wake up status thread.
2835 */
2836void show_running_run_stats(void)
2837{
2838 helper_do_stat();
2839}
2840
2841uint32_t *io_u_block_info(struct thread_data *td, struct io_u *io_u)
2842{
2843 /* Ignore io_u's which span multiple blocks--they will just get
2844 * inaccurate counts. */
2845 int idx = (io_u->offset - io_u->file->file_offset)
2846 / td->o.bs[DDIR_TRIM];
2847 uint32_t *info = &td->ts.block_infos[idx];
2848 assert(idx < td->ts.nr_block_infos);
2849 return info;
2850}