9 #include "lib/ieee754.h"
11 #include "lib/getrusage.h"
14 #include "lib/output_buffer.h"
15 #include "helper_thread.h"
18 #include "oslib/asprintf.h"
20 #define LOG_MSEC_SLACK 1
22 struct fio_sem *stat_sem;
24 void clear_rusage_stat(struct thread_data *td)
26 struct thread_stat *ts = &td->ts;
28 fio_getrusage(&td->ru_start);
29 ts->usr_time = ts->sys_time = 0;
31 ts->minf = ts->majf = 0;
34 void update_rusage_stat(struct thread_data *td)
36 struct thread_stat *ts = &td->ts;
38 fio_getrusage(&td->ru_end);
39 ts->usr_time += mtime_since_tv(&td->ru_start.ru_utime,
40 &td->ru_end.ru_utime);
41 ts->sys_time += mtime_since_tv(&td->ru_start.ru_stime,
42 &td->ru_end.ru_stime);
43 ts->ctx += td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw
44 - (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
45 ts->minf += td->ru_end.ru_minflt - td->ru_start.ru_minflt;
46 ts->majf += td->ru_end.ru_majflt - td->ru_start.ru_majflt;
48 memcpy(&td->ru_start, &td->ru_end, sizeof(td->ru_end));
52 * Given a latency, return the index of the corresponding bucket in
53 * the structure tracking percentiles.
55 * (1) find the group (and error bits) that the value (latency)
56 * belongs to by looking at its MSB. (2) find the bucket number in the
57 * group by looking at the index bits.
60 static unsigned int plat_val_to_idx(unsigned long long val)
62 unsigned int msb, error_bits, base, offset, idx;
64 /* Find MSB starting from bit 0 */
68 msb = (sizeof(val)*8) - __builtin_clzll(val) - 1;
71 * MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
72 * all bits of the sample as index
74 if (msb <= FIO_IO_U_PLAT_BITS)
77 /* Compute the number of error bits to discard*/
78 error_bits = msb - FIO_IO_U_PLAT_BITS;
80 /* Compute the number of buckets before the group */
81 base = (error_bits + 1) << FIO_IO_U_PLAT_BITS;
84 * Discard the error bits and apply the mask to find the
85 * index for the buckets in the group
87 offset = (FIO_IO_U_PLAT_VAL - 1) & (val >> error_bits);
89 /* Make sure the index does not exceed (array size - 1) */
90 idx = (base + offset) < (FIO_IO_U_PLAT_NR - 1) ?
91 (base + offset) : (FIO_IO_U_PLAT_NR - 1);
97 * Convert the given index of the bucket array to the value
98 * represented by the bucket
100 static unsigned long long plat_idx_to_val(unsigned int idx)
102 unsigned int error_bits;
103 unsigned long long k, base;
105 assert(idx < FIO_IO_U_PLAT_NR);
107 /* MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
108 * all bits of the sample as index */
109 if (idx < (FIO_IO_U_PLAT_VAL << 1))
112 /* Find the group and compute the minimum value of that group */
113 error_bits = (idx >> FIO_IO_U_PLAT_BITS) - 1;
114 base = ((unsigned long long) 1) << (error_bits + FIO_IO_U_PLAT_BITS);
116 /* Find its bucket number of the group */
117 k = idx % FIO_IO_U_PLAT_VAL;
119 /* Return the mean of the range of the bucket */
120 return base + ((k + 0.5) * (1 << error_bits));
123 static int double_cmp(const void *a, const void *b)
125 const fio_fp64_t fa = *(const fio_fp64_t *) a;
126 const fio_fp64_t fb = *(const fio_fp64_t *) b;
131 else if (fa.u.f < fb.u.f)
137 unsigned int calc_clat_percentiles(uint64_t *io_u_plat, unsigned long long nr,
138 fio_fp64_t *plist, unsigned long long **output,
139 unsigned long long *maxv, unsigned long long *minv)
141 unsigned long long sum = 0;
142 unsigned int len, i, j = 0;
143 unsigned long long *ovals = NULL;
150 while (len < FIO_IO_U_LIST_MAX_LEN && plist[len].u.f != 0.0)
157 * Sort the percentile list. Note that it may already be sorted if
158 * we are using the default values, but since it's a short list this
159 * isn't a worry. Also note that this does not work for NaN values.
162 qsort(plist, len, sizeof(plist[0]), double_cmp);
164 ovals = malloc(len * sizeof(*ovals));
169 * Calculate bucket values, note down max and min values
172 for (i = 0; i < FIO_IO_U_PLAT_NR && !is_last; i++) {
174 while (sum >= ((long double) plist[j].u.f / 100.0 * nr)) {
175 assert(plist[j].u.f <= 100.0);
177 ovals[j] = plat_idx_to_val(i);
178 if (ovals[j] < *minv)
180 if (ovals[j] > *maxv)
183 is_last = (j == len - 1) != 0;
192 log_err("fio: error calculating latency percentiles\n");
199 * Find and display the p-th percentile of clat
201 static void show_clat_percentiles(uint64_t *io_u_plat, unsigned long long nr,
202 fio_fp64_t *plist, unsigned int precision,
203 const char *pre, struct buf_output *out)
205 unsigned int divisor, len, i, j = 0;
206 unsigned long long minv, maxv;
207 unsigned long long *ovals;
208 int per_line, scale_down, time_width;
212 len = calc_clat_percentiles(io_u_plat, nr, plist, &ovals, &maxv, &minv);
217 * We default to nsecs, but if the value range is such that we
218 * should scale down to usecs or msecs, do that.
220 if (minv > 2000000 && maxv > 99999999ULL) {
223 log_buf(out, " %s percentiles (msec):\n |", pre);
224 } else if (minv > 2000 && maxv > 99999) {
227 log_buf(out, " %s percentiles (usec):\n |", pre);
231 log_buf(out, " %s percentiles (nsec):\n |", pre);
235 time_width = max(5, (int) (log10(maxv / divisor) + 1));
236 snprintf(fmt, sizeof(fmt), " %%%u.%ufth=[%%%dllu]%%c", precision + 3,
237 precision, time_width);
238 /* fmt will be something like " %5.2fth=[%4llu]%c" */
239 per_line = (80 - 7) / (precision + 10 + time_width);
241 for (j = 0; j < len; j++) {
243 if (j != 0 && (j % per_line) == 0)
246 /* end of the list */
247 is_last = (j == len - 1) != 0;
249 for (i = 0; i < scale_down; i++)
250 ovals[j] = (ovals[j] + 999) / 1000;
252 log_buf(out, fmt, plist[j].u.f, ovals[j], is_last ? '\n' : ',');
257 if ((j % per_line) == per_line - 1) /* for formatting */
265 bool calc_lat(struct io_stat *is, unsigned long long *min,
266 unsigned long long *max, double *mean, double *dev)
268 double n = (double) is->samples;
275 *mean = is->mean.u.f;
278 *dev = sqrt(is->S.u.f / (n - 1.0));
285 void show_group_stats(struct group_run_stats *rs, struct buf_output *out)
287 char *io, *agg, *min, *max;
288 char *ioalt, *aggalt, *minalt, *maxalt;
289 const char *str[] = { " READ", " WRITE" , " TRIM"};
292 log_buf(out, "\nRun status group %d (all jobs):\n", rs->groupid);
294 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
295 const int i2p = is_power_of_2(rs->kb_base);
300 io = num2str(rs->iobytes[i], rs->sig_figs, 1, i2p, N2S_BYTE);
301 ioalt = num2str(rs->iobytes[i], rs->sig_figs, 1, !i2p, N2S_BYTE);
302 agg = num2str(rs->agg[i], rs->sig_figs, 1, i2p, rs->unit_base);
303 aggalt = num2str(rs->agg[i], rs->sig_figs, 1, !i2p, rs->unit_base);
304 min = num2str(rs->min_bw[i], rs->sig_figs, 1, i2p, rs->unit_base);
305 minalt = num2str(rs->min_bw[i], rs->sig_figs, 1, !i2p, rs->unit_base);
306 max = num2str(rs->max_bw[i], rs->sig_figs, 1, i2p, rs->unit_base);
307 maxalt = num2str(rs->max_bw[i], rs->sig_figs, 1, !i2p, rs->unit_base);
308 log_buf(out, "%s: bw=%s (%s), %s-%s (%s-%s), io=%s (%s), run=%llu-%llumsec\n",
309 rs->unified_rw_rep ? " MIXED" : str[i],
310 agg, aggalt, min, max, minalt, maxalt, io, ioalt,
311 (unsigned long long) rs->min_run[i],
312 (unsigned long long) rs->max_run[i]);
325 void stat_calc_dist(uint64_t *map, unsigned long total, double *io_u_dist)
330 * Do depth distribution calculations
332 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
334 io_u_dist[i] = (double) map[i] / (double) total;
335 io_u_dist[i] *= 100.0;
336 if (io_u_dist[i] < 0.1 && map[i])
343 static void stat_calc_lat(struct thread_stat *ts, double *dst,
344 uint64_t *src, int nr)
346 unsigned long total = ddir_rw_sum(ts->total_io_u);
350 * Do latency distribution calculations
352 for (i = 0; i < nr; i++) {
354 dst[i] = (double) src[i] / (double) total;
356 if (dst[i] < 0.01 && src[i])
364 * To keep the terse format unaltered, add all of the ns latency
365 * buckets to the first us latency bucket
367 static void stat_calc_lat_nu(struct thread_stat *ts, double *io_u_lat_u)
369 unsigned long ntotal = 0, total = ddir_rw_sum(ts->total_io_u);
372 stat_calc_lat(ts, io_u_lat_u, ts->io_u_lat_u, FIO_IO_U_LAT_U_NR);
374 for (i = 0; i < FIO_IO_U_LAT_N_NR; i++)
375 ntotal += ts->io_u_lat_n[i];
377 io_u_lat_u[0] += 100.0 * (double) ntotal / (double) total;
380 void stat_calc_lat_n(struct thread_stat *ts, double *io_u_lat)
382 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_n, FIO_IO_U_LAT_N_NR);
385 void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat)
387 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_u, FIO_IO_U_LAT_U_NR);
390 void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat)
392 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_m, FIO_IO_U_LAT_M_NR);
395 static void display_lat(const char *name, unsigned long long min,
396 unsigned long long max, double mean, double dev,
397 struct buf_output *out)
399 const char *base = "(nsec)";
402 if (nsec_to_msec(&min, &max, &mean, &dev))
404 else if (nsec_to_usec(&min, &max, &mean, &dev))
407 minp = num2str(min, 6, 1, 0, N2S_NONE);
408 maxp = num2str(max, 6, 1, 0, N2S_NONE);
410 log_buf(out, " %s %s: min=%s, max=%s, avg=%5.02f,"
411 " stdev=%5.02f\n", name, base, minp, maxp, mean, dev);
417 static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
418 int ddir, struct buf_output *out)
421 unsigned long long min, max, bw, iops;
423 char *io_p, *bw_p, *bw_p_alt, *iops_p, *post_st = NULL;
426 if (ddir_sync(ddir)) {
427 if (calc_lat(&ts->sync_stat, &min, &max, &mean, &dev)) {
428 log_buf(out, " %s:\n", "fsync/fdatasync/sync_file_range");
429 display_lat(io_ddir_name(ddir), min, max, mean, dev, out);
430 show_clat_percentiles(ts->io_u_sync_plat,
431 ts->sync_stat.samples,
433 ts->percentile_precision,
434 io_ddir_name(ddir), out);
439 assert(ddir_rw(ddir));
441 if (!ts->runtime[ddir])
444 i2p = is_power_of_2(rs->kb_base);
445 runt = ts->runtime[ddir];
447 bw = (1000 * ts->io_bytes[ddir]) / runt;
448 io_p = num2str(ts->io_bytes[ddir], ts->sig_figs, 1, i2p, N2S_BYTE);
449 bw_p = num2str(bw, ts->sig_figs, 1, i2p, ts->unit_base);
450 bw_p_alt = num2str(bw, ts->sig_figs, 1, !i2p, ts->unit_base);
452 iops = (1000 * (uint64_t)ts->total_io_u[ddir]) / runt;
453 iops_p = num2str(iops, ts->sig_figs, 1, 0, N2S_NONE);
454 if (ddir == DDIR_WRITE)
455 post_st = zbd_write_status(ts);
456 else if (ddir == DDIR_READ && ts->cachehit && ts->cachemiss) {
460 total = ts->cachehit + ts->cachemiss;
461 hit = (double) ts->cachehit / (double) total;
463 if (asprintf(&post_st, "; Cachehit=%0.2f%%", hit) < 0)
467 log_buf(out, " %s: IOPS=%s, BW=%s (%s)(%s/%llumsec)%s\n",
468 rs->unified_rw_rep ? "mixed" : io_ddir_name(ddir),
469 iops_p, bw_p, bw_p_alt, io_p,
470 (unsigned long long) ts->runtime[ddir],
479 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
480 display_lat("slat", min, max, mean, dev, out);
481 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
482 display_lat("clat", min, max, mean, dev, out);
483 if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev))
484 display_lat(" lat", min, max, mean, dev, out);
485 if (calc_lat(&ts->clat_high_prio_stat[ddir], &min, &max, &mean, &dev))
486 display_lat(ts->lat_percentiles ? "prio_lat" : "prio_clat",
487 min, max, mean, dev, out);
489 if (ts->slat_percentiles && ts->slat_stat[ddir].samples > 0)
490 show_clat_percentiles(ts->io_u_plat[FIO_SLAT][ddir],
491 ts->slat_stat[ddir].samples,
493 ts->percentile_precision, "slat", out);
494 if (ts->clat_percentiles && ts->clat_stat[ddir].samples > 0)
495 show_clat_percentiles(ts->io_u_plat[FIO_CLAT][ddir],
496 ts->clat_stat[ddir].samples,
498 ts->percentile_precision, "clat", out);
499 if (ts->lat_percentiles && ts->lat_stat[ddir].samples > 0)
500 show_clat_percentiles(ts->io_u_plat[FIO_LAT][ddir],
501 ts->lat_stat[ddir].samples,
503 ts->percentile_precision, "lat", out);
505 if (ts->clat_percentiles || ts->lat_percentiles) {
506 const char *name = ts->lat_percentiles ? "lat" : "clat";
510 if (ts->lat_percentiles)
511 samples = ts->lat_stat[ddir].samples;
513 samples = ts->clat_stat[ddir].samples;
515 /* Only print this if some high and low priority stats were collected */
516 if (ts->clat_high_prio_stat[ddir].samples > 0 &&
517 ts->clat_prio_stat[ddir].samples > 0)
519 sprintf(prio_name, "high prio (%.2f%%) %s",
520 100. * (double) ts->clat_high_prio_stat[ddir].samples / (double) samples,
522 show_clat_percentiles(ts->io_u_plat_high_prio[ddir],
523 ts->clat_high_prio_stat[ddir].samples,
525 ts->percentile_precision, prio_name, out);
527 sprintf(prio_name, "low prio (%.2f%%) %s",
528 100. * (double) ts->clat_prio_stat[ddir].samples / (double) samples,
530 show_clat_percentiles(ts->io_u_plat_prio[ddir],
531 ts->clat_prio_stat[ddir].samples,
533 ts->percentile_precision, prio_name, out);
537 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
538 double p_of_agg = 100.0, fkb_base = (double)rs->kb_base;
541 if ((rs->unit_base == 1) && i2p)
543 else if (rs->unit_base == 1)
551 p_of_agg = mean * 100 / (double) (rs->agg[ddir] / 1024);
552 if (p_of_agg > 100.0)
556 if (rs->unit_base == 1) {
563 if (mean > fkb_base * fkb_base) {
568 bw_str = (rs->unit_base == 1 ? "Mibit" : "MiB");
571 log_buf(out, " bw (%5s/s): min=%5llu, max=%5llu, per=%3.2f%%, "
572 "avg=%5.02f, stdev=%5.02f, samples=%" PRIu64 "\n",
573 bw_str, min, max, p_of_agg, mean, dev,
574 (&ts->bw_stat[ddir])->samples);
576 if (calc_lat(&ts->iops_stat[ddir], &min, &max, &mean, &dev)) {
577 log_buf(out, " iops : min=%5llu, max=%5llu, "
578 "avg=%5.02f, stdev=%5.02f, samples=%" PRIu64 "\n",
579 min, max, mean, dev, (&ts->iops_stat[ddir])->samples);
583 static bool show_lat(double *io_u_lat, int nr, const char **ranges,
584 const char *msg, struct buf_output *out)
586 bool new_line = true, shown = false;
589 for (i = 0; i < nr; i++) {
590 if (io_u_lat[i] <= 0.0)
596 log_buf(out, " lat (%s) : ", msg);
602 log_buf(out, "%s%3.2f%%", ranges[i], io_u_lat[i]);
614 static void show_lat_n(double *io_u_lat_n, struct buf_output *out)
616 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
617 "250=", "500=", "750=", "1000=", };
619 show_lat(io_u_lat_n, FIO_IO_U_LAT_N_NR, ranges, "nsec", out);
622 static void show_lat_u(double *io_u_lat_u, struct buf_output *out)
624 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
625 "250=", "500=", "750=", "1000=", };
627 show_lat(io_u_lat_u, FIO_IO_U_LAT_U_NR, ranges, "usec", out);
630 static void show_lat_m(double *io_u_lat_m, struct buf_output *out)
632 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
633 "250=", "500=", "750=", "1000=", "2000=",
636 show_lat(io_u_lat_m, FIO_IO_U_LAT_M_NR, ranges, "msec", out);
639 static void show_latencies(struct thread_stat *ts, struct buf_output *out)
641 double io_u_lat_n[FIO_IO_U_LAT_N_NR];
642 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
643 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
645 stat_calc_lat_n(ts, io_u_lat_n);
646 stat_calc_lat_u(ts, io_u_lat_u);
647 stat_calc_lat_m(ts, io_u_lat_m);
649 show_lat_n(io_u_lat_n, out);
650 show_lat_u(io_u_lat_u, out);
651 show_lat_m(io_u_lat_m, out);
654 static int block_state_category(int block_state)
656 switch (block_state) {
657 case BLOCK_STATE_UNINIT:
659 case BLOCK_STATE_TRIMMED:
660 case BLOCK_STATE_WRITTEN:
662 case BLOCK_STATE_WRITE_FAILURE:
663 case BLOCK_STATE_TRIM_FAILURE:
666 /* Silence compile warning on some BSDs and have a return */
672 static int compare_block_infos(const void *bs1, const void *bs2)
674 uint64_t block1 = *(uint64_t *)bs1;
675 uint64_t block2 = *(uint64_t *)bs2;
676 int state1 = BLOCK_INFO_STATE(block1);
677 int state2 = BLOCK_INFO_STATE(block2);
678 int bscat1 = block_state_category(state1);
679 int bscat2 = block_state_category(state2);
680 int cycles1 = BLOCK_INFO_TRIMS(block1);
681 int cycles2 = BLOCK_INFO_TRIMS(block2);
688 if (cycles1 < cycles2)
690 if (cycles1 > cycles2)
698 assert(block1 == block2);
702 static int calc_block_percentiles(int nr_block_infos, uint32_t *block_infos,
703 fio_fp64_t *plist, unsigned int **percentiles,
709 qsort(block_infos, nr_block_infos, sizeof(uint32_t), compare_block_infos);
711 while (len < FIO_IO_U_LIST_MAX_LEN && plist[len].u.f != 0.0)
718 * Sort the percentile list. Note that it may already be sorted if
719 * we are using the default values, but since it's a short list this
720 * isn't a worry. Also note that this does not work for NaN values.
723 qsort(plist, len, sizeof(plist[0]), double_cmp);
725 /* Start only after the uninit entries end */
727 nr_uninit < nr_block_infos
728 && BLOCK_INFO_STATE(block_infos[nr_uninit]) == BLOCK_STATE_UNINIT;
732 if (nr_uninit == nr_block_infos)
735 *percentiles = calloc(len, sizeof(**percentiles));
737 for (i = 0; i < len; i++) {
738 int idx = (plist[i].u.f * (nr_block_infos - nr_uninit) / 100)
740 (*percentiles)[i] = BLOCK_INFO_TRIMS(block_infos[idx]);
743 memset(types, 0, sizeof(*types) * BLOCK_STATE_COUNT);
744 for (i = 0; i < nr_block_infos; i++)
745 types[BLOCK_INFO_STATE(block_infos[i])]++;
750 static const char *block_state_names[] = {
751 [BLOCK_STATE_UNINIT] = "unwritten",
752 [BLOCK_STATE_TRIMMED] = "trimmed",
753 [BLOCK_STATE_WRITTEN] = "written",
754 [BLOCK_STATE_TRIM_FAILURE] = "trim failure",
755 [BLOCK_STATE_WRITE_FAILURE] = "write failure",
758 static void show_block_infos(int nr_block_infos, uint32_t *block_infos,
759 fio_fp64_t *plist, struct buf_output *out)
762 unsigned int *percentiles = NULL;
763 unsigned int block_state_counts[BLOCK_STATE_COUNT];
765 len = calc_block_percentiles(nr_block_infos, block_infos, plist,
766 &percentiles, block_state_counts);
768 log_buf(out, " block lifetime percentiles :\n |");
770 for (i = 0; i < len; i++) {
771 uint32_t block_info = percentiles[i];
772 #define LINE_LENGTH 75
773 char str[LINE_LENGTH];
774 int strln = snprintf(str, LINE_LENGTH, " %3.2fth=%u%c",
775 plist[i].u.f, block_info,
776 i == len - 1 ? '\n' : ',');
777 assert(strln < LINE_LENGTH);
778 if (pos + strln > LINE_LENGTH) {
780 log_buf(out, "\n |");
782 log_buf(out, "%s", str);
789 log_buf(out, " states :");
790 for (i = 0; i < BLOCK_STATE_COUNT; i++)
791 log_buf(out, " %s=%u%c",
792 block_state_names[i], block_state_counts[i],
793 i == BLOCK_STATE_COUNT - 1 ? '\n' : ',');
796 static void show_ss_normal(struct thread_stat *ts, struct buf_output *out)
798 char *p1, *p1alt, *p2;
799 unsigned long long bw_mean, iops_mean;
800 const int i2p = is_power_of_2(ts->kb_base);
805 bw_mean = steadystate_bw_mean(ts);
806 iops_mean = steadystate_iops_mean(ts);
808 p1 = num2str(bw_mean / ts->kb_base, ts->sig_figs, ts->kb_base, i2p, ts->unit_base);
809 p1alt = num2str(bw_mean / ts->kb_base, ts->sig_figs, ts->kb_base, !i2p, ts->unit_base);
810 p2 = num2str(iops_mean, ts->sig_figs, 1, 0, N2S_NONE);
812 log_buf(out, " steadystate : attained=%s, bw=%s (%s), iops=%s, %s%s=%.3f%s\n",
813 ts->ss_state & FIO_SS_ATTAINED ? "yes" : "no",
815 ts->ss_state & FIO_SS_IOPS ? "iops" : "bw",
816 ts->ss_state & FIO_SS_SLOPE ? " slope": " mean dev",
817 ts->ss_criterion.u.f,
818 ts->ss_state & FIO_SS_PCT ? "%" : "");
825 static void show_agg_stats(struct disk_util_agg *agg, int terse,
826 struct buf_output *out)
828 if (!agg->slavecount)
832 log_buf(out, ", aggrios=%llu/%llu, aggrmerge=%llu/%llu, "
833 "aggrticks=%llu/%llu, aggrin_queue=%llu, "
835 (unsigned long long) agg->ios[0] / agg->slavecount,
836 (unsigned long long) agg->ios[1] / agg->slavecount,
837 (unsigned long long) agg->merges[0] / agg->slavecount,
838 (unsigned long long) agg->merges[1] / agg->slavecount,
839 (unsigned long long) agg->ticks[0] / agg->slavecount,
840 (unsigned long long) agg->ticks[1] / agg->slavecount,
841 (unsigned long long) agg->time_in_queue / agg->slavecount,
844 log_buf(out, ";slaves;%llu;%llu;%llu;%llu;%llu;%llu;%llu;%3.2f%%",
845 (unsigned long long) agg->ios[0] / agg->slavecount,
846 (unsigned long long) agg->ios[1] / agg->slavecount,
847 (unsigned long long) agg->merges[0] / agg->slavecount,
848 (unsigned long long) agg->merges[1] / agg->slavecount,
849 (unsigned long long) agg->ticks[0] / agg->slavecount,
850 (unsigned long long) agg->ticks[1] / agg->slavecount,
851 (unsigned long long) agg->time_in_queue / agg->slavecount,
856 static void aggregate_slaves_stats(struct disk_util *masterdu)
858 struct disk_util_agg *agg = &masterdu->agg;
859 struct disk_util_stat *dus;
860 struct flist_head *entry;
861 struct disk_util *slavedu;
864 flist_for_each(entry, &masterdu->slaves) {
865 slavedu = flist_entry(entry, struct disk_util, slavelist);
867 agg->ios[0] += dus->s.ios[0];
868 agg->ios[1] += dus->s.ios[1];
869 agg->merges[0] += dus->s.merges[0];
870 agg->merges[1] += dus->s.merges[1];
871 agg->sectors[0] += dus->s.sectors[0];
872 agg->sectors[1] += dus->s.sectors[1];
873 agg->ticks[0] += dus->s.ticks[0];
874 agg->ticks[1] += dus->s.ticks[1];
875 agg->time_in_queue += dus->s.time_in_queue;
878 util = (double) (100 * dus->s.io_ticks / (double) slavedu->dus.s.msec);
879 /* System utilization is the utilization of the
880 * component with the highest utilization.
882 if (util > agg->max_util.u.f)
883 agg->max_util.u.f = util;
887 if (agg->max_util.u.f > 100.0)
888 agg->max_util.u.f = 100.0;
891 void print_disk_util(struct disk_util_stat *dus, struct disk_util_agg *agg,
892 int terse, struct buf_output *out)
897 util = (double) 100 * dus->s.io_ticks / (double) dus->s.msec;
905 log_buf(out, " %s: ios=%llu/%llu, merge=%llu/%llu, "
906 "ticks=%llu/%llu, in_queue=%llu, util=%3.2f%%",
908 (unsigned long long) dus->s.ios[0],
909 (unsigned long long) dus->s.ios[1],
910 (unsigned long long) dus->s.merges[0],
911 (unsigned long long) dus->s.merges[1],
912 (unsigned long long) dus->s.ticks[0],
913 (unsigned long long) dus->s.ticks[1],
914 (unsigned long long) dus->s.time_in_queue,
917 log_buf(out, ";%s;%llu;%llu;%llu;%llu;%llu;%llu;%llu;%3.2f%%",
919 (unsigned long long) dus->s.ios[0],
920 (unsigned long long) dus->s.ios[1],
921 (unsigned long long) dus->s.merges[0],
922 (unsigned long long) dus->s.merges[1],
923 (unsigned long long) dus->s.ticks[0],
924 (unsigned long long) dus->s.ticks[1],
925 (unsigned long long) dus->s.time_in_queue,
930 * If the device has slaves, aggregate the stats for
931 * those slave devices also.
933 show_agg_stats(agg, terse, out);
939 void json_array_add_disk_util(struct disk_util_stat *dus,
940 struct disk_util_agg *agg, struct json_array *array)
942 struct json_object *obj;
946 util = (double) 100 * dus->s.io_ticks / (double) dus->s.msec;
950 obj = json_create_object();
951 json_array_add_value_object(array, obj);
953 json_object_add_value_string(obj, "name", dus->name);
954 json_object_add_value_int(obj, "read_ios", dus->s.ios[0]);
955 json_object_add_value_int(obj, "write_ios", dus->s.ios[1]);
956 json_object_add_value_int(obj, "read_merges", dus->s.merges[0]);
957 json_object_add_value_int(obj, "write_merges", dus->s.merges[1]);
958 json_object_add_value_int(obj, "read_ticks", dus->s.ticks[0]);
959 json_object_add_value_int(obj, "write_ticks", dus->s.ticks[1]);
960 json_object_add_value_int(obj, "in_queue", dus->s.time_in_queue);
961 json_object_add_value_float(obj, "util", util);
964 * If the device has slaves, aggregate the stats for
965 * those slave devices also.
967 if (!agg->slavecount)
969 json_object_add_value_int(obj, "aggr_read_ios",
970 agg->ios[0] / agg->slavecount);
971 json_object_add_value_int(obj, "aggr_write_ios",
972 agg->ios[1] / agg->slavecount);
973 json_object_add_value_int(obj, "aggr_read_merges",
974 agg->merges[0] / agg->slavecount);
975 json_object_add_value_int(obj, "aggr_write_merge",
976 agg->merges[1] / agg->slavecount);
977 json_object_add_value_int(obj, "aggr_read_ticks",
978 agg->ticks[0] / agg->slavecount);
979 json_object_add_value_int(obj, "aggr_write_ticks",
980 agg->ticks[1] / agg->slavecount);
981 json_object_add_value_int(obj, "aggr_in_queue",
982 agg->time_in_queue / agg->slavecount);
983 json_object_add_value_float(obj, "aggr_util", agg->max_util.u.f);
986 static void json_object_add_disk_utils(struct json_object *obj,
987 struct flist_head *head)
989 struct json_array *array = json_create_array();
990 struct flist_head *entry;
991 struct disk_util *du;
993 json_object_add_value_array(obj, "disk_util", array);
995 flist_for_each(entry, head) {
996 du = flist_entry(entry, struct disk_util, list);
998 aggregate_slaves_stats(du);
999 json_array_add_disk_util(&du->dus, &du->agg, array);
1003 void show_disk_util(int terse, struct json_object *parent,
1004 struct buf_output *out)
1006 struct flist_head *entry;
1007 struct disk_util *du;
1010 if (!is_running_backend())
1013 if (flist_empty(&disk_list)) {
1017 if ((output_format & FIO_OUTPUT_JSON) && parent)
1022 if (!terse && !do_json)
1023 log_buf(out, "\nDisk stats (read/write):\n");
1026 json_object_add_disk_utils(parent, &disk_list);
1027 else if (output_format & ~(FIO_OUTPUT_JSON | FIO_OUTPUT_JSON_PLUS)) {
1028 flist_for_each(entry, &disk_list) {
1029 du = flist_entry(entry, struct disk_util, list);
1031 aggregate_slaves_stats(du);
1032 print_disk_util(&du->dus, &du->agg, terse, out);
1037 static void show_thread_status_normal(struct thread_stat *ts,
1038 struct group_run_stats *rs,
1039 struct buf_output *out)
1041 double usr_cpu, sys_cpu;
1042 unsigned long runtime;
1043 double io_u_dist[FIO_IO_U_MAP_NR];
1047 if (!ddir_rw_sum(ts->io_bytes) && !ddir_rw_sum(ts->total_io_u))
1050 memset(time_buf, 0, sizeof(time_buf));
1053 os_ctime_r((const time_t *) &time_p, time_buf, sizeof(time_buf));
1056 log_buf(out, "%s: (groupid=%d, jobs=%d): err=%2d: pid=%d: %s",
1057 ts->name, ts->groupid, ts->members,
1058 ts->error, (int) ts->pid, time_buf);
1060 log_buf(out, "%s: (groupid=%d, jobs=%d): err=%2d (%s): pid=%d: %s",
1061 ts->name, ts->groupid, ts->members,
1062 ts->error, ts->verror, (int) ts->pid,
1066 if (strlen(ts->description))
1067 log_buf(out, " Description : [%s]\n", ts->description);
1069 if (ts->io_bytes[DDIR_READ])
1070 show_ddir_status(rs, ts, DDIR_READ, out);
1071 if (ts->io_bytes[DDIR_WRITE])
1072 show_ddir_status(rs, ts, DDIR_WRITE, out);
1073 if (ts->io_bytes[DDIR_TRIM])
1074 show_ddir_status(rs, ts, DDIR_TRIM, out);
1076 show_latencies(ts, out);
1078 if (ts->sync_stat.samples)
1079 show_ddir_status(rs, ts, DDIR_SYNC, out);
1081 runtime = ts->total_run_time;
1083 double runt = (double) runtime;
1085 usr_cpu = (double) ts->usr_time * 100 / runt;
1086 sys_cpu = (double) ts->sys_time * 100 / runt;
1092 log_buf(out, " cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%llu,"
1093 " majf=%llu, minf=%llu\n", usr_cpu, sys_cpu,
1094 (unsigned long long) ts->ctx,
1095 (unsigned long long) ts->majf,
1096 (unsigned long long) ts->minf);
1098 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
1099 log_buf(out, " IO depths : 1=%3.1f%%, 2=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%,"
1100 " 16=%3.1f%%, 32=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
1101 io_u_dist[1], io_u_dist[2],
1102 io_u_dist[3], io_u_dist[4],
1103 io_u_dist[5], io_u_dist[6]);
1105 stat_calc_dist(ts->io_u_submit, ts->total_submit, io_u_dist);
1106 log_buf(out, " submit : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
1107 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
1108 io_u_dist[1], io_u_dist[2],
1109 io_u_dist[3], io_u_dist[4],
1110 io_u_dist[5], io_u_dist[6]);
1111 stat_calc_dist(ts->io_u_complete, ts->total_complete, io_u_dist);
1112 log_buf(out, " complete : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
1113 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
1114 io_u_dist[1], io_u_dist[2],
1115 io_u_dist[3], io_u_dist[4],
1116 io_u_dist[5], io_u_dist[6]);
1117 log_buf(out, " issued rwts: total=%llu,%llu,%llu,%llu"
1118 " short=%llu,%llu,%llu,0"
1119 " dropped=%llu,%llu,%llu,0\n",
1120 (unsigned long long) ts->total_io_u[0],
1121 (unsigned long long) ts->total_io_u[1],
1122 (unsigned long long) ts->total_io_u[2],
1123 (unsigned long long) ts->total_io_u[3],
1124 (unsigned long long) ts->short_io_u[0],
1125 (unsigned long long) ts->short_io_u[1],
1126 (unsigned long long) ts->short_io_u[2],
1127 (unsigned long long) ts->drop_io_u[0],
1128 (unsigned long long) ts->drop_io_u[1],
1129 (unsigned long long) ts->drop_io_u[2]);
1130 if (ts->continue_on_error) {
1131 log_buf(out, " errors : total=%llu, first_error=%d/<%s>\n",
1132 (unsigned long long)ts->total_err_count,
1134 strerror(ts->first_error));
1136 if (ts->latency_depth) {
1137 log_buf(out, " latency : target=%llu, window=%llu, percentile=%.2f%%, depth=%u\n",
1138 (unsigned long long)ts->latency_target,
1139 (unsigned long long)ts->latency_window,
1140 ts->latency_percentile.u.f,
1144 if (ts->nr_block_infos)
1145 show_block_infos(ts->nr_block_infos, ts->block_infos,
1146 ts->percentile_list, out);
1149 show_ss_normal(ts, out);
1152 static void show_ddir_status_terse(struct thread_stat *ts,
1153 struct group_run_stats *rs, int ddir,
1154 int ver, struct buf_output *out)
1156 unsigned long long min, max, minv, maxv, bw, iops;
1157 unsigned long long *ovals = NULL;
1162 assert(ddir_rw(ddir));
1165 if (ts->runtime[ddir]) {
1166 uint64_t runt = ts->runtime[ddir];
1168 bw = ((1000 * ts->io_bytes[ddir]) / runt) / 1024; /* KiB/s */
1169 iops = (1000 * (uint64_t) ts->total_io_u[ddir]) / runt;
1172 log_buf(out, ";%llu;%llu;%llu;%llu",
1173 (unsigned long long) ts->io_bytes[ddir] >> 10, bw, iops,
1174 (unsigned long long) ts->runtime[ddir]);
1176 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
1177 log_buf(out, ";%llu;%llu;%f;%f", min/1000, max/1000, mean/1000, dev/1000);
1179 log_buf(out, ";%llu;%llu;%f;%f", 0ULL, 0ULL, 0.0, 0.0);
1181 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
1182 log_buf(out, ";%llu;%llu;%f;%f", min/1000, max/1000, mean/1000, dev/1000);
1184 log_buf(out, ";%llu;%llu;%f;%f", 0ULL, 0ULL, 0.0, 0.0);
1186 if (ts->lat_percentiles)
1187 len = calc_clat_percentiles(ts->io_u_plat[FIO_LAT][ddir],
1188 ts->lat_stat[ddir].samples,
1189 ts->percentile_list, &ovals, &maxv,
1191 else if (ts->clat_percentiles)
1192 len = calc_clat_percentiles(ts->io_u_plat[FIO_CLAT][ddir],
1193 ts->clat_stat[ddir].samples,
1194 ts->percentile_list, &ovals, &maxv,
1199 for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
1201 log_buf(out, ";0%%=0");
1204 log_buf(out, ";%f%%=%llu", ts->percentile_list[i].u.f, ovals[i]/1000);
1207 if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev))
1208 log_buf(out, ";%llu;%llu;%f;%f", min/1000, max/1000, mean/1000, dev/1000);
1210 log_buf(out, ";%llu;%llu;%f;%f", 0ULL, 0ULL, 0.0, 0.0);
1214 bw_stat = calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev);
1216 double p_of_agg = 100.0;
1218 if (rs->agg[ddir]) {
1219 p_of_agg = mean * 100 / (double) (rs->agg[ddir] / 1024);
1220 if (p_of_agg > 100.0)
1224 log_buf(out, ";%llu;%llu;%f%%;%f;%f", min, max, p_of_agg, mean, dev);
1226 log_buf(out, ";%llu;%llu;%f%%;%f;%f", 0ULL, 0ULL, 0.0, 0.0, 0.0);
1230 log_buf(out, ";%" PRIu64, (&ts->bw_stat[ddir])->samples);
1232 log_buf(out, ";%lu", 0UL);
1234 if (calc_lat(&ts->iops_stat[ddir], &min, &max, &mean, &dev))
1235 log_buf(out, ";%llu;%llu;%f;%f;%" PRIu64, min, max,
1236 mean, dev, (&ts->iops_stat[ddir])->samples);
1238 log_buf(out, ";%llu;%llu;%f;%f;%lu", 0ULL, 0ULL, 0.0, 0.0, 0UL);
1242 static struct json_object *add_ddir_lat_json(struct thread_stat *ts, uint32_t percentiles,
1243 struct io_stat *lat_stat, uint64_t *io_u_plat)
1247 unsigned int i, len;
1248 struct json_object *lat_object, *percentile_object, *clat_bins_object;
1249 unsigned long long min, max, maxv, minv, *ovals = NULL;
1251 if (!calc_lat(lat_stat, &min, &max, &mean, &dev)) {
1255 lat_object = json_create_object();
1256 json_object_add_value_int(lat_object, "min", min);
1257 json_object_add_value_int(lat_object, "max", max);
1258 json_object_add_value_float(lat_object, "mean", mean);
1259 json_object_add_value_float(lat_object, "stddev", dev);
1260 json_object_add_value_int(lat_object, "N", lat_stat->samples);
1262 if (percentiles && lat_stat->samples) {
1263 len = calc_clat_percentiles(io_u_plat, lat_stat->samples,
1264 ts->percentile_list, &ovals, &maxv, &minv);
1266 if (len > FIO_IO_U_LIST_MAX_LEN)
1267 len = FIO_IO_U_LIST_MAX_LEN;
1269 percentile_object = json_create_object();
1270 json_object_add_value_object(lat_object, "percentile", percentile_object);
1271 for (i = 0; i < len; i++) {
1272 snprintf(buf, sizeof(buf), "%f", ts->percentile_list[i].u.f);
1273 json_object_add_value_int(percentile_object, buf, ovals[i]);
1277 if (output_format & FIO_OUTPUT_JSON_PLUS) {
1278 clat_bins_object = json_create_object();
1279 json_object_add_value_object(lat_object, "bins", clat_bins_object);
1281 for(i = 0; i < FIO_IO_U_PLAT_NR; i++)
1283 snprintf(buf, sizeof(buf), "%llu", plat_idx_to_val(i));
1284 json_object_add_value_int(clat_bins_object, buf, io_u_plat[i]);
1292 static void add_ddir_status_json(struct thread_stat *ts,
1293 struct group_run_stats *rs, int ddir, struct json_object *parent)
1295 unsigned long long min, max;
1296 unsigned long long bw_bytes, bw;
1297 double mean, dev, iops;
1298 struct json_object *dir_object, *tmp_object;
1299 double p_of_agg = 100.0;
1301 assert(ddir_rw(ddir) || ddir_sync(ddir));
1303 if (ts->unified_rw_rep && ddir != DDIR_READ)
1306 dir_object = json_create_object();
1307 json_object_add_value_object(parent,
1308 ts->unified_rw_rep ? "mixed" : io_ddir_name(ddir), dir_object);
1310 if (ddir_rw(ddir)) {
1314 if (ts->runtime[ddir]) {
1315 uint64_t runt = ts->runtime[ddir];
1317 bw_bytes = ((1000 * ts->io_bytes[ddir]) / runt); /* Bytes/s */
1318 bw = bw_bytes / 1024; /* KiB/s */
1319 iops = (1000.0 * (uint64_t) ts->total_io_u[ddir]) / runt;
1322 json_object_add_value_int(dir_object, "io_bytes", ts->io_bytes[ddir]);
1323 json_object_add_value_int(dir_object, "io_kbytes", ts->io_bytes[ddir] >> 10);
1324 json_object_add_value_int(dir_object, "bw_bytes", bw_bytes);
1325 json_object_add_value_int(dir_object, "bw", bw);
1326 json_object_add_value_float(dir_object, "iops", iops);
1327 json_object_add_value_int(dir_object, "runtime", ts->runtime[ddir]);
1328 json_object_add_value_int(dir_object, "total_ios", ts->total_io_u[ddir]);
1329 json_object_add_value_int(dir_object, "short_ios", ts->short_io_u[ddir]);
1330 json_object_add_value_int(dir_object, "drop_ios", ts->drop_io_u[ddir]);
1332 tmp_object = add_ddir_lat_json(ts, ts->slat_percentiles,
1333 &ts->slat_stat[ddir], ts->io_u_plat[FIO_SLAT][ddir]);
1334 json_object_add_value_object(dir_object, "slat_ns", tmp_object);
1336 tmp_object = add_ddir_lat_json(ts, ts->clat_percentiles,
1337 &ts->clat_stat[ddir], ts->io_u_plat[FIO_CLAT][ddir]);
1338 json_object_add_value_object(dir_object, "clat_ns", tmp_object);
1340 tmp_object = add_ddir_lat_json(ts, ts->lat_percentiles,
1341 &ts->lat_stat[ddir], ts->io_u_plat[FIO_LAT][ddir]);
1342 json_object_add_value_object(dir_object, "lat_ns", tmp_object);
1344 json_object_add_value_int(dir_object, "total_ios", ts->total_io_u[DDIR_SYNC]);
1345 tmp_object = add_ddir_lat_json(ts, ts->lat_percentiles | ts->clat_percentiles,
1346 &ts->sync_stat, ts->io_u_sync_plat);
1347 json_object_add_value_object(dir_object, "lat_ns", tmp_object);
1353 /* Only print PRIO latencies if some high priority samples were gathered */
1354 if (ts->clat_high_prio_stat[ddir].samples > 0) {
1355 const char *high, *low;
1357 if (ts->lat_percentiles) {
1359 low = "lat_low_prio";
1362 low = "clat_low_prio";
1365 tmp_object = add_ddir_lat_json(ts, ts->clat_percentiles | ts->lat_percentiles,
1366 &ts->clat_high_prio_stat[ddir], ts->io_u_plat_high_prio[ddir]);
1367 json_object_add_value_object(dir_object, high, tmp_object);
1369 tmp_object = add_ddir_lat_json(ts, ts->clat_percentiles | ts->lat_percentiles,
1370 &ts->clat_prio_stat[ddir], ts->io_u_plat_prio[ddir]);
1371 json_object_add_value_object(dir_object, low, tmp_object);
1374 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
1375 if (rs->agg[ddir]) {
1376 p_of_agg = mean * 100 / (double) (rs->agg[ddir] / 1024);
1377 if (p_of_agg > 100.0)
1382 p_of_agg = mean = dev = 0.0;
1385 json_object_add_value_int(dir_object, "bw_min", min);
1386 json_object_add_value_int(dir_object, "bw_max", max);
1387 json_object_add_value_float(dir_object, "bw_agg", p_of_agg);
1388 json_object_add_value_float(dir_object, "bw_mean", mean);
1389 json_object_add_value_float(dir_object, "bw_dev", dev);
1390 json_object_add_value_int(dir_object, "bw_samples",
1391 (&ts->bw_stat[ddir])->samples);
1393 if (!calc_lat(&ts->iops_stat[ddir], &min, &max, &mean, &dev)) {
1397 json_object_add_value_int(dir_object, "iops_min", min);
1398 json_object_add_value_int(dir_object, "iops_max", max);
1399 json_object_add_value_float(dir_object, "iops_mean", mean);
1400 json_object_add_value_float(dir_object, "iops_stddev", dev);
1401 json_object_add_value_int(dir_object, "iops_samples",
1402 (&ts->iops_stat[ddir])->samples);
1404 if (ts->cachehit + ts->cachemiss) {
1408 total = ts->cachehit + ts->cachemiss;
1409 hit = (double) ts->cachehit / (double) total;
1411 json_object_add_value_float(dir_object, "cachehit", hit);
1415 static void show_thread_status_terse_all(struct thread_stat *ts,
1416 struct group_run_stats *rs, int ver,
1417 struct buf_output *out)
1419 double io_u_dist[FIO_IO_U_MAP_NR];
1420 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
1421 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
1422 double usr_cpu, sys_cpu;
1427 log_buf(out, "2;%s;%d;%d", ts->name, ts->groupid, ts->error);
1429 log_buf(out, "%d;%s;%s;%d;%d", ver, fio_version_string,
1430 ts->name, ts->groupid, ts->error);
1432 /* Log Read Status */
1433 show_ddir_status_terse(ts, rs, DDIR_READ, ver, out);
1434 /* Log Write Status */
1435 show_ddir_status_terse(ts, rs, DDIR_WRITE, ver, out);
1436 /* Log Trim Status */
1437 if (ver == 2 || ver == 4 || ver == 5)
1438 show_ddir_status_terse(ts, rs, DDIR_TRIM, ver, out);
1441 if (ts->total_run_time) {
1442 double runt = (double) ts->total_run_time;
1444 usr_cpu = (double) ts->usr_time * 100 / runt;
1445 sys_cpu = (double) ts->sys_time * 100 / runt;
1451 log_buf(out, ";%f%%;%f%%;%llu;%llu;%llu", usr_cpu, sys_cpu,
1452 (unsigned long long) ts->ctx,
1453 (unsigned long long) ts->majf,
1454 (unsigned long long) ts->minf);
1456 /* Calc % distribution of IO depths, usecond, msecond latency */
1457 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
1458 stat_calc_lat_nu(ts, io_u_lat_u);
1459 stat_calc_lat_m(ts, io_u_lat_m);
1461 /* Only show fixed 7 I/O depth levels*/
1462 log_buf(out, ";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%",
1463 io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3],
1464 io_u_dist[4], io_u_dist[5], io_u_dist[6]);
1466 /* Microsecond latency */
1467 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
1468 log_buf(out, ";%3.2f%%", io_u_lat_u[i]);
1469 /* Millisecond latency */
1470 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
1471 log_buf(out, ";%3.2f%%", io_u_lat_m[i]);
1473 /* disk util stats, if any */
1474 if (ver >= 3 && is_running_backend())
1475 show_disk_util(1, NULL, out);
1477 /* Additional output if continue_on_error set - default off*/
1478 if (ts->continue_on_error)
1479 log_buf(out, ";%llu;%d", (unsigned long long) ts->total_err_count, ts->first_error);
1481 /* Additional output if description is set */
1482 if (strlen(ts->description)) {
1485 log_buf(out, ";%s", ts->description);
1491 static void json_add_job_opts(struct json_object *root, const char *name,
1492 struct flist_head *opt_list)
1494 struct json_object *dir_object;
1495 struct flist_head *entry;
1496 struct print_option *p;
1498 if (flist_empty(opt_list))
1501 dir_object = json_create_object();
1502 json_object_add_value_object(root, name, dir_object);
1504 flist_for_each(entry, opt_list) {
1505 const char *pos = "";
1507 p = flist_entry(entry, struct print_option, list);
1510 json_object_add_value_string(dir_object, p->name, pos);
1514 static struct json_object *show_thread_status_json(struct thread_stat *ts,
1515 struct group_run_stats *rs,
1516 struct flist_head *opt_list)
1518 struct json_object *root, *tmp;
1519 struct jobs_eta *je;
1520 double io_u_dist[FIO_IO_U_MAP_NR];
1521 double io_u_lat_n[FIO_IO_U_LAT_N_NR];
1522 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
1523 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
1524 double usr_cpu, sys_cpu;
1528 root = json_create_object();
1529 json_object_add_value_string(root, "jobname", ts->name);
1530 json_object_add_value_int(root, "groupid", ts->groupid);
1531 json_object_add_value_int(root, "error", ts->error);
1534 je = get_jobs_eta(true, &size);
1536 json_object_add_value_int(root, "eta", je->eta_sec);
1537 json_object_add_value_int(root, "elapsed", je->elapsed_sec);
1541 json_add_job_opts(root, "job options", opt_list);
1543 add_ddir_status_json(ts, rs, DDIR_READ, root);
1544 add_ddir_status_json(ts, rs, DDIR_WRITE, root);
1545 add_ddir_status_json(ts, rs, DDIR_TRIM, root);
1546 add_ddir_status_json(ts, rs, DDIR_SYNC, root);
1549 if (ts->total_run_time) {
1550 double runt = (double) ts->total_run_time;
1552 usr_cpu = (double) ts->usr_time * 100 / runt;
1553 sys_cpu = (double) ts->sys_time * 100 / runt;
1558 json_object_add_value_int(root, "job_runtime", ts->total_run_time);
1559 json_object_add_value_float(root, "usr_cpu", usr_cpu);
1560 json_object_add_value_float(root, "sys_cpu", sys_cpu);
1561 json_object_add_value_int(root, "ctx", ts->ctx);
1562 json_object_add_value_int(root, "majf", ts->majf);
1563 json_object_add_value_int(root, "minf", ts->minf);
1565 /* Calc % distribution of IO depths */
1566 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
1567 tmp = json_create_object();
1568 json_object_add_value_object(root, "iodepth_level", tmp);
1569 /* Only show fixed 7 I/O depth levels*/
1570 for (i = 0; i < 7; i++) {
1573 snprintf(name, 20, "%d", 1 << i);
1575 snprintf(name, 20, ">=%d", 1 << i);
1576 json_object_add_value_float(tmp, (const char *)name, io_u_dist[i]);
1579 /* Calc % distribution of submit IO depths */
1580 stat_calc_dist(ts->io_u_submit, ts->total_submit, io_u_dist);
1581 tmp = json_create_object();
1582 json_object_add_value_object(root, "iodepth_submit", tmp);
1583 /* Only show fixed 7 I/O depth levels*/
1584 for (i = 0; i < 7; i++) {
1587 snprintf(name, 20, "0");
1589 snprintf(name, 20, "%d", 1 << (i+1));
1591 snprintf(name, 20, ">=%d", 1 << i);
1592 json_object_add_value_float(tmp, (const char *)name, io_u_dist[i]);
1595 /* Calc % distribution of completion IO depths */
1596 stat_calc_dist(ts->io_u_complete, ts->total_complete, io_u_dist);
1597 tmp = json_create_object();
1598 json_object_add_value_object(root, "iodepth_complete", tmp);
1599 /* Only show fixed 7 I/O depth levels*/
1600 for (i = 0; i < 7; i++) {
1603 snprintf(name, 20, "0");
1605 snprintf(name, 20, "%d", 1 << (i+1));
1607 snprintf(name, 20, ">=%d", 1 << i);
1608 json_object_add_value_float(tmp, (const char *)name, io_u_dist[i]);
1611 /* Calc % distribution of nsecond, usecond, msecond latency */
1612 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
1613 stat_calc_lat_n(ts, io_u_lat_n);
1614 stat_calc_lat_u(ts, io_u_lat_u);
1615 stat_calc_lat_m(ts, io_u_lat_m);
1617 /* Nanosecond latency */
1618 tmp = json_create_object();
1619 json_object_add_value_object(root, "latency_ns", tmp);
1620 for (i = 0; i < FIO_IO_U_LAT_N_NR; i++) {
1621 const char *ranges[] = { "2", "4", "10", "20", "50", "100",
1622 "250", "500", "750", "1000", };
1623 json_object_add_value_float(tmp, ranges[i], io_u_lat_n[i]);
1625 /* Microsecond latency */
1626 tmp = json_create_object();
1627 json_object_add_value_object(root, "latency_us", tmp);
1628 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++) {
1629 const char *ranges[] = { "2", "4", "10", "20", "50", "100",
1630 "250", "500", "750", "1000", };
1631 json_object_add_value_float(tmp, ranges[i], io_u_lat_u[i]);
1633 /* Millisecond latency */
1634 tmp = json_create_object();
1635 json_object_add_value_object(root, "latency_ms", tmp);
1636 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++) {
1637 const char *ranges[] = { "2", "4", "10", "20", "50", "100",
1638 "250", "500", "750", "1000", "2000",
1640 json_object_add_value_float(tmp, ranges[i], io_u_lat_m[i]);
1643 /* Additional output if continue_on_error set - default off*/
1644 if (ts->continue_on_error) {
1645 json_object_add_value_int(root, "total_err", ts->total_err_count);
1646 json_object_add_value_int(root, "first_error", ts->first_error);
1649 if (ts->latency_depth) {
1650 json_object_add_value_int(root, "latency_depth", ts->latency_depth);
1651 json_object_add_value_int(root, "latency_target", ts->latency_target);
1652 json_object_add_value_float(root, "latency_percentile", ts->latency_percentile.u.f);
1653 json_object_add_value_int(root, "latency_window", ts->latency_window);
1656 /* Additional output if description is set */
1657 if (strlen(ts->description))
1658 json_object_add_value_string(root, "desc", ts->description);
1660 if (ts->nr_block_infos) {
1661 /* Block error histogram and types */
1663 unsigned int *percentiles = NULL;
1664 unsigned int block_state_counts[BLOCK_STATE_COUNT];
1666 len = calc_block_percentiles(ts->nr_block_infos, ts->block_infos,
1667 ts->percentile_list,
1668 &percentiles, block_state_counts);
1671 struct json_object *block, *percentile_object, *states;
1673 block = json_create_object();
1674 json_object_add_value_object(root, "block", block);
1676 percentile_object = json_create_object();
1677 json_object_add_value_object(block, "percentiles",
1679 for (i = 0; i < len; i++) {
1681 snprintf(buf, sizeof(buf), "%f",
1682 ts->percentile_list[i].u.f);
1683 json_object_add_value_int(percentile_object,
1688 states = json_create_object();
1689 json_object_add_value_object(block, "states", states);
1690 for (state = 0; state < BLOCK_STATE_COUNT; state++) {
1691 json_object_add_value_int(states,
1692 block_state_names[state],
1693 block_state_counts[state]);
1700 struct json_object *data;
1701 struct json_array *iops, *bw;
1705 snprintf(ss_buf, sizeof(ss_buf), "%s%s:%f%s",
1706 ts->ss_state & FIO_SS_IOPS ? "iops" : "bw",
1707 ts->ss_state & FIO_SS_SLOPE ? "_slope" : "",
1708 (float) ts->ss_limit.u.f,
1709 ts->ss_state & FIO_SS_PCT ? "%" : "");
1711 tmp = json_create_object();
1712 json_object_add_value_object(root, "steadystate", tmp);
1713 json_object_add_value_string(tmp, "ss", ss_buf);
1714 json_object_add_value_int(tmp, "duration", (int)ts->ss_dur);
1715 json_object_add_value_int(tmp, "attained", (ts->ss_state & FIO_SS_ATTAINED) > 0);
1717 snprintf(ss_buf, sizeof(ss_buf), "%f%s", (float) ts->ss_criterion.u.f,
1718 ts->ss_state & FIO_SS_PCT ? "%" : "");
1719 json_object_add_value_string(tmp, "criterion", ss_buf);
1720 json_object_add_value_float(tmp, "max_deviation", ts->ss_deviation.u.f);
1721 json_object_add_value_float(tmp, "slope", ts->ss_slope.u.f);
1723 data = json_create_object();
1724 json_object_add_value_object(tmp, "data", data);
1725 bw = json_create_array();
1726 iops = json_create_array();
1729 ** if ss was attained or the buffer is not full,
1730 ** ss->head points to the first element in the list.
1731 ** otherwise it actually points to the second element
1734 if ((ts->ss_state & FIO_SS_ATTAINED) || !(ts->ss_state & FIO_SS_BUFFER_FULL))
1737 j = ts->ss_head == 0 ? ts->ss_dur - 1 : ts->ss_head - 1;
1738 for (l = 0; l < ts->ss_dur; l++) {
1739 k = (j + l) % ts->ss_dur;
1740 json_array_add_value_int(bw, ts->ss_bw_data[k]);
1741 json_array_add_value_int(iops, ts->ss_iops_data[k]);
1743 json_object_add_value_int(data, "bw_mean", steadystate_bw_mean(ts));
1744 json_object_add_value_int(data, "iops_mean", steadystate_iops_mean(ts));
1745 json_object_add_value_array(data, "iops", iops);
1746 json_object_add_value_array(data, "bw", bw);
1752 static void show_thread_status_terse(struct thread_stat *ts,
1753 struct group_run_stats *rs,
1754 struct buf_output *out)
1756 if (terse_version >= 2 && terse_version <= 5)
1757 show_thread_status_terse_all(ts, rs, terse_version, out);
1759 log_err("fio: bad terse version!? %d\n", terse_version);
1762 struct json_object *show_thread_status(struct thread_stat *ts,
1763 struct group_run_stats *rs,
1764 struct flist_head *opt_list,
1765 struct buf_output *out)
1767 struct json_object *ret = NULL;
1769 if (output_format & FIO_OUTPUT_TERSE)
1770 show_thread_status_terse(ts, rs, out);
1771 if (output_format & FIO_OUTPUT_JSON)
1772 ret = show_thread_status_json(ts, rs, opt_list);
1773 if (output_format & FIO_OUTPUT_NORMAL)
1774 show_thread_status_normal(ts, rs, out);
1779 static void __sum_stat(struct io_stat *dst, struct io_stat *src, bool first)
1783 dst->min_val = min(dst->min_val, src->min_val);
1784 dst->max_val = max(dst->max_val, src->max_val);
1787 * Compute new mean and S after the merge
1788 * <http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
1789 * #Parallel_algorithm>
1792 mean = src->mean.u.f;
1795 double delta = src->mean.u.f - dst->mean.u.f;
1797 mean = ((src->mean.u.f * src->samples) +
1798 (dst->mean.u.f * dst->samples)) /
1799 (dst->samples + src->samples);
1801 S = src->S.u.f + dst->S.u.f + pow(delta, 2.0) *
1802 (dst->samples * src->samples) /
1803 (dst->samples + src->samples);
1806 dst->samples += src->samples;
1807 dst->mean.u.f = mean;
1813 * We sum two kinds of stats - one that is time based, in which case we
1814 * apply the proper summing technique, and then one that is iops/bw
1815 * numbers. For group_reporting, we should just add those up, not make
1816 * them the mean of everything.
1818 static void sum_stat(struct io_stat *dst, struct io_stat *src, bool first,
1821 if (src->samples == 0)
1825 __sum_stat(dst, src, first);
1830 dst->min_val = src->min_val;
1831 dst->max_val = src->max_val;
1832 dst->samples = src->samples;
1833 dst->mean.u.f = src->mean.u.f;
1834 dst->S.u.f = src->S.u.f;
1836 dst->min_val += src->min_val;
1837 dst->max_val += src->max_val;
1838 dst->samples += src->samples;
1839 dst->mean.u.f += src->mean.u.f;
1840 dst->S.u.f += src->S.u.f;
1844 void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src)
1848 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1849 if (dst->max_run[i] < src->max_run[i])
1850 dst->max_run[i] = src->max_run[i];
1851 if (dst->min_run[i] && dst->min_run[i] > src->min_run[i])
1852 dst->min_run[i] = src->min_run[i];
1853 if (dst->max_bw[i] < src->max_bw[i])
1854 dst->max_bw[i] = src->max_bw[i];
1855 if (dst->min_bw[i] && dst->min_bw[i] > src->min_bw[i])
1856 dst->min_bw[i] = src->min_bw[i];
1858 dst->iobytes[i] += src->iobytes[i];
1859 dst->agg[i] += src->agg[i];
1863 dst->kb_base = src->kb_base;
1864 if (!dst->unit_base)
1865 dst->unit_base = src->unit_base;
1867 dst->sig_figs = src->sig_figs;
1870 void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src,
1875 for (l = 0; l < DDIR_RWDIR_CNT; l++) {
1876 if (!dst->unified_rw_rep) {
1877 sum_stat(&dst->clat_stat[l], &src->clat_stat[l], first, false);
1878 sum_stat(&dst->clat_high_prio_stat[l], &src->clat_high_prio_stat[l], first, false);
1879 sum_stat(&dst->clat_prio_stat[l], &src->clat_prio_stat[l], first, false);
1880 sum_stat(&dst->slat_stat[l], &src->slat_stat[l], first, false);
1881 sum_stat(&dst->lat_stat[l], &src->lat_stat[l], first, false);
1882 sum_stat(&dst->bw_stat[l], &src->bw_stat[l], first, true);
1883 sum_stat(&dst->iops_stat[l], &src->iops_stat[l], first, true);
1885 dst->io_bytes[l] += src->io_bytes[l];
1887 if (dst->runtime[l] < src->runtime[l])
1888 dst->runtime[l] = src->runtime[l];
1890 sum_stat(&dst->clat_stat[0], &src->clat_stat[l], first, false);
1891 sum_stat(&dst->clat_high_prio_stat[l], &src->clat_high_prio_stat[l], first, false);
1892 sum_stat(&dst->clat_prio_stat[l], &src->clat_prio_stat[l], first, false);
1893 sum_stat(&dst->slat_stat[0], &src->slat_stat[l], first, false);
1894 sum_stat(&dst->lat_stat[0], &src->lat_stat[l], first, false);
1895 sum_stat(&dst->bw_stat[0], &src->bw_stat[l], first, true);
1896 sum_stat(&dst->iops_stat[0], &src->iops_stat[l], first, true);
1898 dst->io_bytes[0] += src->io_bytes[l];
1900 if (dst->runtime[0] < src->runtime[l])
1901 dst->runtime[0] = src->runtime[l];
1904 * We're summing to the same destination, so override
1905 * 'first' after the first iteration of the loop
1911 sum_stat(&dst->sync_stat, &src->sync_stat, first, false);
1912 dst->usr_time += src->usr_time;
1913 dst->sys_time += src->sys_time;
1914 dst->ctx += src->ctx;
1915 dst->majf += src->majf;
1916 dst->minf += src->minf;
1918 for (k = 0; k < FIO_IO_U_MAP_NR; k++) {
1919 dst->io_u_map[k] += src->io_u_map[k];
1920 dst->io_u_submit[k] += src->io_u_submit[k];
1921 dst->io_u_complete[k] += src->io_u_complete[k];
1924 for (k = 0; k < FIO_IO_U_LAT_N_NR; k++)
1925 dst->io_u_lat_n[k] += src->io_u_lat_n[k];
1926 for (k = 0; k < FIO_IO_U_LAT_U_NR; k++)
1927 dst->io_u_lat_u[k] += src->io_u_lat_u[k];
1928 for (k = 0; k < FIO_IO_U_LAT_M_NR; k++)
1929 dst->io_u_lat_m[k] += src->io_u_lat_m[k];
1931 for (k = 0; k < DDIR_RWDIR_CNT; k++) {
1932 if (!dst->unified_rw_rep) {
1933 dst->total_io_u[k] += src->total_io_u[k];
1934 dst->short_io_u[k] += src->short_io_u[k];
1935 dst->drop_io_u[k] += src->drop_io_u[k];
1937 dst->total_io_u[0] += src->total_io_u[k];
1938 dst->short_io_u[0] += src->short_io_u[k];
1939 dst->drop_io_u[0] += src->drop_io_u[k];
1943 dst->total_io_u[DDIR_SYNC] += src->total_io_u[DDIR_SYNC];
1945 for (k = 0; k < FIO_LAT_CNT; k++)
1946 for (l = 0; l < DDIR_RWDIR_CNT; l++)
1947 for (m = 0; m < FIO_IO_U_PLAT_NR; m++)
1948 if (!dst->unified_rw_rep)
1949 dst->io_u_plat[k][l][m] += src->io_u_plat[k][l][m];
1951 dst->io_u_plat[k][0][m] += src->io_u_plat[k][l][m];
1953 for (k = 0; k < FIO_IO_U_PLAT_NR; k++)
1954 dst->io_u_sync_plat[k] += src->io_u_sync_plat[k];
1956 for (k = 0; k < DDIR_RWDIR_CNT; k++) {
1957 for (m = 0; m < FIO_IO_U_PLAT_NR; m++) {
1958 if (!dst->unified_rw_rep) {
1959 dst->io_u_plat_high_prio[k][m] += src->io_u_plat_high_prio[k][m];
1960 dst->io_u_plat_prio[k][m] += src->io_u_plat_prio[k][m];
1962 dst->io_u_plat_high_prio[0][m] += src->io_u_plat_high_prio[k][m];
1963 dst->io_u_plat_prio[0][m] += src->io_u_plat_prio[k][m];
1969 dst->total_run_time += src->total_run_time;
1970 dst->total_submit += src->total_submit;
1971 dst->total_complete += src->total_complete;
1972 dst->nr_zone_resets += src->nr_zone_resets;
1973 dst->cachehit += src->cachehit;
1974 dst->cachemiss += src->cachemiss;
1977 void init_group_run_stat(struct group_run_stats *gs)
1980 memset(gs, 0, sizeof(*gs));
1982 for (i = 0; i < DDIR_RWDIR_CNT; i++)
1983 gs->min_bw[i] = gs->min_run[i] = ~0UL;
1986 void init_thread_stat(struct thread_stat *ts)
1990 memset(ts, 0, sizeof(*ts));
1992 for (j = 0; j < DDIR_RWDIR_CNT; j++) {
1993 ts->lat_stat[j].min_val = -1UL;
1994 ts->clat_stat[j].min_val = -1UL;
1995 ts->slat_stat[j].min_val = -1UL;
1996 ts->bw_stat[j].min_val = -1UL;
1997 ts->iops_stat[j].min_val = -1UL;
1998 ts->clat_high_prio_stat[j].min_val = -1UL;
1999 ts->clat_prio_stat[j].min_val = -1UL;
2001 ts->sync_stat.min_val = -1UL;
2005 void __show_run_stats(void)
2007 struct group_run_stats *runstats, *rs;
2008 struct thread_data *td;
2009 struct thread_stat *threadstats, *ts;
2010 int i, j, k, nr_ts, last_ts, idx;
2011 bool kb_base_warned = false;
2012 bool unit_base_warned = false;
2013 struct json_object *root = NULL;
2014 struct json_array *array = NULL;
2015 struct buf_output output[FIO_OUTPUT_NR];
2016 struct flist_head **opt_lists;
2018 runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
2020 for (i = 0; i < groupid + 1; i++)
2021 init_group_run_stat(&runstats[i]);
2024 * find out how many threads stats we need. if group reporting isn't
2025 * enabled, it's one-per-td.
2029 for_each_td(td, i) {
2030 if (!td->o.group_reporting) {
2034 if (last_ts == td->groupid)
2039 last_ts = td->groupid;
2043 threadstats = malloc(nr_ts * sizeof(struct thread_stat));
2044 opt_lists = malloc(nr_ts * sizeof(struct flist_head *));
2046 for (i = 0; i < nr_ts; i++) {
2047 init_thread_stat(&threadstats[i]);
2048 opt_lists[i] = NULL;
2054 for_each_td(td, i) {
2057 if (idx && (!td->o.group_reporting ||
2058 (td->o.group_reporting && last_ts != td->groupid))) {
2063 last_ts = td->groupid;
2065 ts = &threadstats[j];
2067 ts->clat_percentiles = td->o.clat_percentiles;
2068 ts->lat_percentiles = td->o.lat_percentiles;
2069 ts->slat_percentiles = td->o.slat_percentiles;
2070 ts->percentile_precision = td->o.percentile_precision;
2071 memcpy(ts->percentile_list, td->o.percentile_list, sizeof(td->o.percentile_list));
2072 opt_lists[j] = &td->opt_list;
2077 if (ts->groupid == -1) {
2079 * These are per-group shared already
2081 snprintf(ts->name, sizeof(ts->name), "%s", td->o.name);
2082 if (td->o.description)
2083 snprintf(ts->description,
2084 sizeof(ts->description), "%s",
2087 memset(ts->description, 0, FIO_JOBDESC_SIZE);
2090 * If multiple entries in this group, this is
2093 ts->thread_number = td->thread_number;
2094 ts->groupid = td->groupid;
2097 * first pid in group, not very useful...
2101 ts->kb_base = td->o.kb_base;
2102 ts->unit_base = td->o.unit_base;
2103 ts->sig_figs = td->o.sig_figs;
2104 ts->unified_rw_rep = td->o.unified_rw_rep;
2105 } else if (ts->kb_base != td->o.kb_base && !kb_base_warned) {
2106 log_info("fio: kb_base differs for jobs in group, using"
2107 " %u as the base\n", ts->kb_base);
2108 kb_base_warned = true;
2109 } else if (ts->unit_base != td->o.unit_base && !unit_base_warned) {
2110 log_info("fio: unit_base differs for jobs in group, using"
2111 " %u as the base\n", ts->unit_base);
2112 unit_base_warned = true;
2115 ts->continue_on_error = td->o.continue_on_error;
2116 ts->total_err_count += td->total_err_count;
2117 ts->first_error = td->first_error;
2119 if (!td->error && td->o.continue_on_error &&
2121 ts->error = td->first_error;
2122 snprintf(ts->verror, sizeof(ts->verror), "%s",
2124 } else if (td->error) {
2125 ts->error = td->error;
2126 snprintf(ts->verror, sizeof(ts->verror), "%s",
2131 ts->latency_depth = td->latency_qd;
2132 ts->latency_target = td->o.latency_target;
2133 ts->latency_percentile = td->o.latency_percentile;
2134 ts->latency_window = td->o.latency_window;
2136 ts->nr_block_infos = td->ts.nr_block_infos;
2137 for (k = 0; k < ts->nr_block_infos; k++)
2138 ts->block_infos[k] = td->ts.block_infos[k];
2140 sum_thread_stats(ts, &td->ts, idx == 1);
2143 ts->ss_state = td->ss.state;
2144 ts->ss_dur = td->ss.dur;
2145 ts->ss_head = td->ss.head;
2146 ts->ss_bw_data = td->ss.bw_data;
2147 ts->ss_iops_data = td->ss.iops_data;
2148 ts->ss_limit.u.f = td->ss.limit;
2149 ts->ss_slope.u.f = td->ss.slope;
2150 ts->ss_deviation.u.f = td->ss.deviation;
2151 ts->ss_criterion.u.f = td->ss.criterion;
2154 ts->ss_dur = ts->ss_state = 0;
2157 for (i = 0; i < nr_ts; i++) {
2158 unsigned long long bw;
2160 ts = &threadstats[i];
2161 if (ts->groupid == -1)
2163 rs = &runstats[ts->groupid];
2164 rs->kb_base = ts->kb_base;
2165 rs->unit_base = ts->unit_base;
2166 rs->sig_figs = ts->sig_figs;
2167 rs->unified_rw_rep += ts->unified_rw_rep;
2169 for (j = 0; j < DDIR_RWDIR_CNT; j++) {
2170 if (!ts->runtime[j])
2172 if (ts->runtime[j] < rs->min_run[j] || !rs->min_run[j])
2173 rs->min_run[j] = ts->runtime[j];
2174 if (ts->runtime[j] > rs->max_run[j])
2175 rs->max_run[j] = ts->runtime[j];
2179 bw = ts->io_bytes[j] * 1000 / ts->runtime[j];
2180 if (bw < rs->min_bw[j])
2182 if (bw > rs->max_bw[j])
2185 rs->iobytes[j] += ts->io_bytes[j];
2189 for (i = 0; i < groupid + 1; i++) {
2194 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
2195 if (rs->max_run[ddir])
2196 rs->agg[ddir] = (rs->iobytes[ddir] * 1000) /
2201 for (i = 0; i < FIO_OUTPUT_NR; i++)
2202 buf_output_init(&output[i]);
2205 * don't overwrite last signal output
2207 if (output_format & FIO_OUTPUT_NORMAL)
2208 log_buf(&output[__FIO_OUTPUT_NORMAL], "\n");
2209 if (output_format & FIO_OUTPUT_JSON) {
2210 struct thread_data *global;
2213 unsigned long long ms_since_epoch;
2216 gettimeofday(&now, NULL);
2217 ms_since_epoch = (unsigned long long)(now.tv_sec) * 1000 +
2218 (unsigned long long)(now.tv_usec) / 1000;
2220 tv_sec = now.tv_sec;
2221 os_ctime_r(&tv_sec, time_buf, sizeof(time_buf));
2222 if (time_buf[strlen(time_buf) - 1] == '\n')
2223 time_buf[strlen(time_buf) - 1] = '\0';
2225 root = json_create_object();
2226 json_object_add_value_string(root, "fio version", fio_version_string);
2227 json_object_add_value_int(root, "timestamp", now.tv_sec);
2228 json_object_add_value_int(root, "timestamp_ms", ms_since_epoch);
2229 json_object_add_value_string(root, "time", time_buf);
2230 global = get_global_options();
2231 json_add_job_opts(root, "global options", &global->opt_list);
2232 array = json_create_array();
2233 json_object_add_value_array(root, "jobs", array);
2237 fio_server_send_job_options(&get_global_options()->opt_list, -1U);
2239 for (i = 0; i < nr_ts; i++) {
2240 ts = &threadstats[i];
2241 rs = &runstats[ts->groupid];
2244 fio_server_send_job_options(opt_lists[i], i);
2245 fio_server_send_ts(ts, rs);
2247 if (output_format & FIO_OUTPUT_TERSE)
2248 show_thread_status_terse(ts, rs, &output[__FIO_OUTPUT_TERSE]);
2249 if (output_format & FIO_OUTPUT_JSON) {
2250 struct json_object *tmp = show_thread_status_json(ts, rs, opt_lists[i]);
2251 json_array_add_value_object(array, tmp);
2253 if (output_format & FIO_OUTPUT_NORMAL)
2254 show_thread_status_normal(ts, rs, &output[__FIO_OUTPUT_NORMAL]);
2257 if (!is_backend && (output_format & FIO_OUTPUT_JSON)) {
2258 /* disk util stats, if any */
2259 show_disk_util(1, root, &output[__FIO_OUTPUT_JSON]);
2261 show_idle_prof_stats(FIO_OUTPUT_JSON, root, &output[__FIO_OUTPUT_JSON]);
2263 json_print_object(root, &output[__FIO_OUTPUT_JSON]);
2264 log_buf(&output[__FIO_OUTPUT_JSON], "\n");
2265 json_free_object(root);
2268 for (i = 0; i < groupid + 1; i++) {
2273 fio_server_send_gs(rs);
2274 else if (output_format & FIO_OUTPUT_NORMAL)
2275 show_group_stats(rs, &output[__FIO_OUTPUT_NORMAL]);
2279 fio_server_send_du();
2280 else if (output_format & FIO_OUTPUT_NORMAL) {
2281 show_disk_util(0, NULL, &output[__FIO_OUTPUT_NORMAL]);
2282 show_idle_prof_stats(FIO_OUTPUT_NORMAL, NULL, &output[__FIO_OUTPUT_NORMAL]);
2285 for (i = 0; i < FIO_OUTPUT_NR; i++) {
2286 struct buf_output *out = &output[i];
2288 log_info_buf(out->buf, out->buflen);
2289 buf_output_free(out);
2292 fio_idle_prof_cleanup();
2300 void __show_running_run_stats(void)
2302 struct thread_data *td;
2303 unsigned long long *rt;
2307 fio_sem_down(stat_sem);
2309 rt = malloc(thread_number * sizeof(unsigned long long));
2310 fio_gettime(&ts, NULL);
2312 for_each_td(td, i) {
2313 td->update_rusage = 1;
2314 td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
2315 td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
2316 td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
2317 td->ts.total_run_time = mtime_since(&td->epoch, &ts);
2319 rt[i] = mtime_since(&td->start, &ts);
2320 if (td_read(td) && td->ts.io_bytes[DDIR_READ])
2321 td->ts.runtime[DDIR_READ] += rt[i];
2322 if (td_write(td) && td->ts.io_bytes[DDIR_WRITE])
2323 td->ts.runtime[DDIR_WRITE] += rt[i];
2324 if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM])
2325 td->ts.runtime[DDIR_TRIM] += rt[i];
2328 for_each_td(td, i) {
2329 if (td->runstate >= TD_EXITED)
2331 if (td->rusage_sem) {
2332 td->update_rusage = 1;
2333 fio_sem_down(td->rusage_sem);
2335 td->update_rusage = 0;
2340 for_each_td(td, i) {
2341 if (td_read(td) && td->ts.io_bytes[DDIR_READ])
2342 td->ts.runtime[DDIR_READ] -= rt[i];
2343 if (td_write(td) && td->ts.io_bytes[DDIR_WRITE])
2344 td->ts.runtime[DDIR_WRITE] -= rt[i];
2345 if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM])
2346 td->ts.runtime[DDIR_TRIM] -= rt[i];
2350 fio_sem_up(stat_sem);
2353 static bool status_interval_init;
2354 static struct timespec status_time;
2355 static bool status_file_disabled;
2357 #define FIO_STATUS_FILE "fio-dump-status"
2359 static int check_status_file(void)
2362 const char *temp_dir;
2363 char fio_status_file_path[PATH_MAX];
2365 if (status_file_disabled)
2368 temp_dir = getenv("TMPDIR");
2369 if (temp_dir == NULL) {
2370 temp_dir = getenv("TEMP");
2371 if (temp_dir && strlen(temp_dir) >= PATH_MAX)
2374 if (temp_dir == NULL)
2377 __coverity_tainted_data_sanitize__(temp_dir);
2380 snprintf(fio_status_file_path, sizeof(fio_status_file_path), "%s/%s", temp_dir, FIO_STATUS_FILE);
2382 if (stat(fio_status_file_path, &sb))
2385 if (unlink(fio_status_file_path) < 0) {
2386 log_err("fio: failed to unlink %s: %s\n", fio_status_file_path,
2388 log_err("fio: disabling status file updates\n");
2389 status_file_disabled = true;
2395 void check_for_running_stats(void)
2397 if (status_interval) {
2398 if (!status_interval_init) {
2399 fio_gettime(&status_time, NULL);
2400 status_interval_init = true;
2401 } else if (mtime_since_now(&status_time) >= status_interval) {
2402 show_running_run_stats();
2403 fio_gettime(&status_time, NULL);
2407 if (check_status_file()) {
2408 show_running_run_stats();
2413 static inline void add_stat_sample(struct io_stat *is, unsigned long long data)
2418 if (data > is->max_val)
2420 if (data < is->min_val)
2423 delta = val - is->mean.u.f;
2425 is->mean.u.f += delta / (is->samples + 1.0);
2426 is->S.u.f += delta * (val - is->mean.u.f);
2433 * Return a struct io_logs, which is added to the tail of the log
2436 static struct io_logs *get_new_log(struct io_log *iolog)
2438 size_t new_size, new_samples;
2439 struct io_logs *cur_log;
2442 * Cap the size at MAX_LOG_ENTRIES, so we don't keep doubling
2445 if (!iolog->cur_log_max)
2446 new_samples = DEF_LOG_ENTRIES;
2448 new_samples = iolog->cur_log_max * 2;
2449 if (new_samples > MAX_LOG_ENTRIES)
2450 new_samples = MAX_LOG_ENTRIES;
2453 new_size = new_samples * log_entry_sz(iolog);
2455 cur_log = smalloc(sizeof(*cur_log));
2457 INIT_FLIST_HEAD(&cur_log->list);
2458 cur_log->log = malloc(new_size);
2460 cur_log->nr_samples = 0;
2461 cur_log->max_samples = new_samples;
2462 flist_add_tail(&cur_log->list, &iolog->io_logs);
2463 iolog->cur_log_max = new_samples;
2473 * Add and return a new log chunk, or return current log if big enough
2475 static struct io_logs *regrow_log(struct io_log *iolog)
2477 struct io_logs *cur_log;
2480 if (!iolog || iolog->disabled)
2483 cur_log = iolog_cur_log(iolog);
2485 cur_log = get_new_log(iolog);
2490 if (cur_log->nr_samples < cur_log->max_samples)
2494 * No room for a new sample. If we're compressing on the fly, flush
2495 * out the current chunk
2497 if (iolog->log_gz) {
2498 if (iolog_cur_flush(iolog, cur_log)) {
2499 log_err("fio: failed flushing iolog! Will stop logging.\n");
2505 * Get a new log array, and add to our list
2507 cur_log = get_new_log(iolog);
2509 log_err("fio: failed extending iolog! Will stop logging.\n");
2513 if (!iolog->pending || !iolog->pending->nr_samples)
2517 * Flush pending items to new log
2519 for (i = 0; i < iolog->pending->nr_samples; i++) {
2520 struct io_sample *src, *dst;
2522 src = get_sample(iolog, iolog->pending, i);
2523 dst = get_sample(iolog, cur_log, i);
2524 memcpy(dst, src, log_entry_sz(iolog));
2526 cur_log->nr_samples = iolog->pending->nr_samples;
2528 iolog->pending->nr_samples = 0;
2532 iolog->disabled = true;
2536 void regrow_logs(struct thread_data *td)
2538 regrow_log(td->slat_log);
2539 regrow_log(td->clat_log);
2540 regrow_log(td->clat_hist_log);
2541 regrow_log(td->lat_log);
2542 regrow_log(td->bw_log);
2543 regrow_log(td->iops_log);
2544 td->flags &= ~TD_F_REGROW_LOGS;
2547 static struct io_logs *get_cur_log(struct io_log *iolog)
2549 struct io_logs *cur_log;
2551 cur_log = iolog_cur_log(iolog);
2553 cur_log = get_new_log(iolog);
2558 if (cur_log->nr_samples < cur_log->max_samples)
2562 * Out of space. If we're in IO offload mode, or we're not doing
2563 * per unit logging (hence logging happens outside of the IO thread
2564 * as well), add a new log chunk inline. If we're doing inline
2565 * submissions, flag 'td' as needing a log regrow and we'll take
2566 * care of it on the submission side.
2568 if ((iolog->td && iolog->td->o.io_submit_mode == IO_MODE_OFFLOAD) ||
2569 !per_unit_log(iolog))
2570 return regrow_log(iolog);
2573 iolog->td->flags |= TD_F_REGROW_LOGS;
2575 assert(iolog->pending->nr_samples < iolog->pending->max_samples);
2576 return iolog->pending;
2579 static void __add_log_sample(struct io_log *iolog, union io_sample_data data,
2580 enum fio_ddir ddir, unsigned long long bs,
2581 unsigned long t, uint64_t offset, uint8_t priority_bit)
2583 struct io_logs *cur_log;
2585 if (iolog->disabled)
2587 if (flist_empty(&iolog->io_logs))
2588 iolog->avg_last[ddir] = t;
2590 cur_log = get_cur_log(iolog);
2592 struct io_sample *s;
2594 s = get_sample(iolog, cur_log, cur_log->nr_samples);
2597 s->time = t + (iolog->td ? iolog->td->unix_epoch : 0);
2598 io_sample_set_ddir(iolog, s, ddir);
2600 s->priority_bit = priority_bit;
2602 if (iolog->log_offset) {
2603 struct io_sample_offset *so = (void *) s;
2605 so->offset = offset;
2608 cur_log->nr_samples++;
2612 iolog->disabled = true;
2615 static inline void reset_io_stat(struct io_stat *ios)
2617 ios->min_val = -1ULL;
2618 ios->max_val = ios->samples = 0;
2619 ios->mean.u.f = ios->S.u.f = 0;
2622 void reset_io_stats(struct thread_data *td)
2624 struct thread_stat *ts = &td->ts;
2627 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
2628 reset_io_stat(&ts->clat_high_prio_stat[i]);
2629 reset_io_stat(&ts->clat_prio_stat[i]);
2630 reset_io_stat(&ts->clat_stat[i]);
2631 reset_io_stat(&ts->slat_stat[i]);
2632 reset_io_stat(&ts->lat_stat[i]);
2633 reset_io_stat(&ts->bw_stat[i]);
2634 reset_io_stat(&ts->iops_stat[i]);
2636 ts->io_bytes[i] = 0;
2638 ts->total_io_u[i] = 0;
2639 ts->short_io_u[i] = 0;
2640 ts->drop_io_u[i] = 0;
2642 for (j = 0; j < FIO_IO_U_PLAT_NR; j++) {
2643 ts->io_u_plat_high_prio[i][j] = 0;
2644 ts->io_u_plat_prio[i][j] = 0;
2646 ts->io_u_sync_plat[j] = 0;
2650 for (i = 0; i < FIO_LAT_CNT; i++)
2651 for (j = 0; j < DDIR_RWDIR_CNT; j++)
2652 for (k = 0; k < FIO_IO_U_PLAT_NR; k++)
2653 ts->io_u_plat[i][j][k] = 0;
2655 ts->total_io_u[DDIR_SYNC] = 0;
2657 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
2658 ts->io_u_map[i] = 0;
2659 ts->io_u_submit[i] = 0;
2660 ts->io_u_complete[i] = 0;
2663 for (i = 0; i < FIO_IO_U_LAT_N_NR; i++)
2664 ts->io_u_lat_n[i] = 0;
2665 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
2666 ts->io_u_lat_u[i] = 0;
2667 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
2668 ts->io_u_lat_m[i] = 0;
2670 ts->total_submit = 0;
2671 ts->total_complete = 0;
2672 ts->nr_zone_resets = 0;
2673 ts->cachehit = ts->cachemiss = 0;
2676 static void __add_stat_to_log(struct io_log *iolog, enum fio_ddir ddir,
2677 unsigned long elapsed, bool log_max, uint8_t priority_bit)
2680 * Note an entry in the log. Use the mean from the logged samples,
2681 * making sure to properly round up. Only write a log entry if we
2682 * had actual samples done.
2684 if (iolog->avg_window[ddir].samples) {
2685 union io_sample_data data;
2688 data.val = iolog->avg_window[ddir].max_val;
2690 data.val = iolog->avg_window[ddir].mean.u.f + 0.50;
2692 __add_log_sample(iolog, data, ddir, 0, elapsed, 0, priority_bit);
2695 reset_io_stat(&iolog->avg_window[ddir]);
2698 static void _add_stat_to_log(struct io_log *iolog, unsigned long elapsed,
2699 bool log_max, uint8_t priority_bit)
2703 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
2704 __add_stat_to_log(iolog, ddir, elapsed, log_max, priority_bit);
2707 static unsigned long add_log_sample(struct thread_data *td,
2708 struct io_log *iolog,
2709 union io_sample_data data,
2710 enum fio_ddir ddir, unsigned long long bs,
2711 uint64_t offset, uint8_t priority_bit)
2713 unsigned long elapsed, this_window;
2718 elapsed = mtime_since_now(&td->epoch);
2721 * If no time averaging, just add the log sample.
2723 if (!iolog->avg_msec) {
2724 __add_log_sample(iolog, data, ddir, bs, elapsed, offset, priority_bit);
2729 * Add the sample. If the time period has passed, then
2730 * add that entry to the log and clear.
2732 add_stat_sample(&iolog->avg_window[ddir], data.val);
2735 * If period hasn't passed, adding the above sample is all we
2738 this_window = elapsed - iolog->avg_last[ddir];
2739 if (elapsed < iolog->avg_last[ddir])
2740 return iolog->avg_last[ddir] - elapsed;
2741 else if (this_window < iolog->avg_msec) {
2742 unsigned long diff = iolog->avg_msec - this_window;
2744 if (inline_log(iolog) || diff > LOG_MSEC_SLACK)
2748 _add_stat_to_log(iolog, elapsed, td->o.log_max != 0, priority_bit);
2750 iolog->avg_last[ddir] = elapsed - (this_window - iolog->avg_msec);
2751 return iolog->avg_msec;
2754 void finalize_logs(struct thread_data *td, bool unit_logs)
2756 unsigned long elapsed;
2758 elapsed = mtime_since_now(&td->epoch);
2760 if (td->clat_log && unit_logs)
2761 _add_stat_to_log(td->clat_log, elapsed, td->o.log_max != 0, 0);
2762 if (td->slat_log && unit_logs)
2763 _add_stat_to_log(td->slat_log, elapsed, td->o.log_max != 0, 0);
2764 if (td->lat_log && unit_logs)
2765 _add_stat_to_log(td->lat_log, elapsed, td->o.log_max != 0, 0);
2766 if (td->bw_log && (unit_logs == per_unit_log(td->bw_log)))
2767 _add_stat_to_log(td->bw_log, elapsed, td->o.log_max != 0, 0);
2768 if (td->iops_log && (unit_logs == per_unit_log(td->iops_log)))
2769 _add_stat_to_log(td->iops_log, elapsed, td->o.log_max != 0, 0);
2772 void add_agg_sample(union io_sample_data data, enum fio_ddir ddir, unsigned long long bs,
2773 uint8_t priority_bit)
2775 struct io_log *iolog;
2780 iolog = agg_io_log[ddir];
2781 __add_log_sample(iolog, data, ddir, bs, mtime_since_genesis(), 0, priority_bit);
2784 void add_sync_clat_sample(struct thread_stat *ts, unsigned long long nsec)
2786 unsigned int idx = plat_val_to_idx(nsec);
2787 assert(idx < FIO_IO_U_PLAT_NR);
2789 ts->io_u_sync_plat[idx]++;
2790 add_stat_sample(&ts->sync_stat, nsec);
2793 static void add_lat_percentile_sample_noprio(struct thread_stat *ts,
2794 unsigned long long nsec, enum fio_ddir ddir, enum fio_lat lat)
2796 unsigned int idx = plat_val_to_idx(nsec);
2797 assert(idx < FIO_IO_U_PLAT_NR);
2799 ts->io_u_plat[lat][ddir][idx]++;
2802 static void add_lat_percentile_sample(struct thread_stat *ts,
2803 unsigned long long nsec, enum fio_ddir ddir, uint8_t priority_bit,
2806 unsigned int idx = plat_val_to_idx(nsec);
2808 add_lat_percentile_sample_noprio(ts, nsec, ddir, lat);
2811 ts->io_u_plat_prio[ddir][idx]++;
2813 ts->io_u_plat_high_prio[ddir][idx]++;
2816 void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
2817 unsigned long long nsec, unsigned long long bs,
2818 uint64_t offset, uint8_t priority_bit)
2820 const bool needs_lock = td_async_processing(td);
2821 unsigned long elapsed, this_window;
2822 struct thread_stat *ts = &td->ts;
2823 struct io_log *iolog = td->clat_hist_log;
2828 add_stat_sample(&ts->clat_stat[ddir], nsec);
2830 if (!ts->lat_percentiles) {
2832 add_stat_sample(&ts->clat_high_prio_stat[ddir], nsec);
2834 add_stat_sample(&ts->clat_prio_stat[ddir], nsec);
2838 add_log_sample(td, td->clat_log, sample_val(nsec), ddir, bs,
2839 offset, priority_bit);
2841 if (ts->clat_percentiles) {
2842 if (ts->lat_percentiles)
2843 add_lat_percentile_sample_noprio(ts, nsec, ddir, FIO_CLAT);
2845 add_lat_percentile_sample(ts, nsec, ddir, priority_bit, FIO_CLAT);
2848 if (iolog && iolog->hist_msec) {
2849 struct io_hist *hw = &iolog->hist_window[ddir];
2852 elapsed = mtime_since_now(&td->epoch);
2854 hw->hist_last = elapsed;
2855 this_window = elapsed - hw->hist_last;
2857 if (this_window >= iolog->hist_msec) {
2858 uint64_t *io_u_plat;
2859 struct io_u_plat_entry *dst;
2862 * Make a byte-for-byte copy of the latency histogram
2863 * stored in td->ts.io_u_plat[ddir], recording it in a
2864 * log sample. Note that the matching call to free() is
2865 * located in iolog.c after printing this sample to the
2868 io_u_plat = (uint64_t *) td->ts.io_u_plat[FIO_CLAT][ddir];
2869 dst = malloc(sizeof(struct io_u_plat_entry));
2870 memcpy(&(dst->io_u_plat), io_u_plat,
2871 FIO_IO_U_PLAT_NR * sizeof(uint64_t));
2872 flist_add(&dst->list, &hw->list);
2873 __add_log_sample(iolog, sample_plat(dst), ddir, bs,
2874 elapsed, offset, priority_bit);
2877 * Update the last time we recorded as being now, minus
2878 * any drift in time we encountered before actually
2879 * making the record.
2881 hw->hist_last = elapsed - (this_window - iolog->hist_msec);
2887 __td_io_u_unlock(td);
2890 void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
2891 unsigned long long nsec, unsigned long long bs, uint64_t offset,
2892 uint8_t priority_bit)
2894 const bool needs_lock = td_async_processing(td);
2895 struct thread_stat *ts = &td->ts;
2903 add_stat_sample(&ts->slat_stat[ddir], nsec);
2906 add_log_sample(td, td->slat_log, sample_val(nsec), ddir, bs, offset,
2909 if (ts->slat_percentiles)
2910 add_lat_percentile_sample_noprio(ts, nsec, ddir, FIO_SLAT);
2913 __td_io_u_unlock(td);
2916 void add_lat_sample(struct thread_data *td, enum fio_ddir ddir,
2917 unsigned long long nsec, unsigned long long bs,
2918 uint64_t offset, uint8_t priority_bit)
2920 const bool needs_lock = td_async_processing(td);
2921 struct thread_stat *ts = &td->ts;
2929 add_stat_sample(&ts->lat_stat[ddir], nsec);
2932 add_log_sample(td, td->lat_log, sample_val(nsec), ddir, bs,
2933 offset, priority_bit);
2935 if (ts->lat_percentiles) {
2936 add_lat_percentile_sample(ts, nsec, ddir, priority_bit, FIO_LAT);
2938 add_stat_sample(&ts->clat_high_prio_stat[ddir], nsec);
2940 add_stat_sample(&ts->clat_prio_stat[ddir], nsec);
2944 __td_io_u_unlock(td);
2947 void add_bw_sample(struct thread_data *td, struct io_u *io_u,
2948 unsigned int bytes, unsigned long long spent)
2950 const bool needs_lock = td_async_processing(td);
2951 struct thread_stat *ts = &td->ts;
2955 rate = (unsigned long) (bytes * 1000000ULL / spent);
2962 add_stat_sample(&ts->bw_stat[io_u->ddir], rate);
2965 add_log_sample(td, td->bw_log, sample_val(rate), io_u->ddir,
2966 bytes, io_u->offset, io_u_is_prio(io_u));
2968 td->stat_io_bytes[io_u->ddir] = td->this_io_bytes[io_u->ddir];
2971 __td_io_u_unlock(td);
2974 static int __add_samples(struct thread_data *td, struct timespec *parent_tv,
2975 struct timespec *t, unsigned int avg_time,
2976 uint64_t *this_io_bytes, uint64_t *stat_io_bytes,
2977 struct io_stat *stat, struct io_log *log,
2980 const bool needs_lock = td_async_processing(td);
2981 unsigned long spent, rate;
2983 unsigned long next, next_log;
2985 next_log = avg_time;
2987 spent = mtime_since(parent_tv, t);
2988 if (spent < avg_time && avg_time - spent >= LOG_MSEC_SLACK)
2989 return avg_time - spent;
2995 * Compute both read and write rates for the interval.
2997 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
3000 delta = this_io_bytes[ddir] - stat_io_bytes[ddir];
3002 continue; /* No entries for interval */
3006 rate = delta * 1000 / spent / 1024; /* KiB/s */
3008 rate = (delta * 1000) / spent;
3012 add_stat_sample(&stat[ddir], rate);
3015 unsigned long long bs = 0;
3017 if (td->o.min_bs[ddir] == td->o.max_bs[ddir])
3018 bs = td->o.min_bs[ddir];
3020 next = add_log_sample(td, log, sample_val(rate), ddir, bs, 0, 0);
3021 next_log = min(next_log, next);
3024 stat_io_bytes[ddir] = this_io_bytes[ddir];
3030 __td_io_u_unlock(td);
3032 if (spent <= avg_time)
3035 next = avg_time - (1 + spent - avg_time);
3037 return min(next, next_log);
3040 static int add_bw_samples(struct thread_data *td, struct timespec *t)
3042 return __add_samples(td, &td->bw_sample_time, t, td->o.bw_avg_time,
3043 td->this_io_bytes, td->stat_io_bytes,
3044 td->ts.bw_stat, td->bw_log, true);
3047 void add_iops_sample(struct thread_data *td, struct io_u *io_u,
3050 const bool needs_lock = td_async_processing(td);
3051 struct thread_stat *ts = &td->ts;
3056 add_stat_sample(&ts->iops_stat[io_u->ddir], 1);
3059 add_log_sample(td, td->iops_log, sample_val(1), io_u->ddir,
3060 bytes, io_u->offset, io_u_is_prio(io_u));
3062 td->stat_io_blocks[io_u->ddir] = td->this_io_blocks[io_u->ddir];
3065 __td_io_u_unlock(td);
3068 static int add_iops_samples(struct thread_data *td, struct timespec *t)
3070 return __add_samples(td, &td->iops_sample_time, t, td->o.iops_avg_time,
3071 td->this_io_blocks, td->stat_io_blocks,
3072 td->ts.iops_stat, td->iops_log, false);
3076 * Returns msecs to next event
3078 int calc_log_samples(void)
3080 struct thread_data *td;
3081 unsigned int next = ~0U, tmp;
3082 struct timespec now;
3085 fio_gettime(&now, NULL);
3087 for_each_td(td, i) {
3090 if (in_ramp_time(td) ||
3091 !(td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING)) {
3092 next = min(td->o.iops_avg_time, td->o.bw_avg_time);
3096 (td->bw_log && !per_unit_log(td->bw_log))) {
3097 tmp = add_bw_samples(td, &now);
3101 if (!td->iops_log ||
3102 (td->iops_log && !per_unit_log(td->iops_log))) {
3103 tmp = add_iops_samples(td, &now);
3109 return next == ~0U ? 0 : next;
3112 void stat_init(void)
3114 stat_sem = fio_sem_init(FIO_SEM_UNLOCKED);
3117 void stat_exit(void)
3120 * When we have the mutex, we know out-of-band access to it
3123 fio_sem_down(stat_sem);
3124 fio_sem_remove(stat_sem);
3128 * Called from signal handler. Wake up status thread.
3130 void show_running_run_stats(void)
3135 uint32_t *io_u_block_info(struct thread_data *td, struct io_u *io_u)
3137 /* Ignore io_u's which span multiple blocks--they will just get
3138 * inaccurate counts. */
3139 int idx = (io_u->offset - io_u->file->file_offset)
3140 / td->o.bs[DDIR_TRIM];
3141 uint32_t *info = &td->ts.block_infos[idx];
3142 assert(idx < td->ts.nr_block_infos);