12 #include "lib/ieee754.h"
14 #include "lib/getrusage.h"
17 #include "lib/output_buffer.h"
18 #include "helper_thread.h"
21 #define LOG_MSEC_SLACK 1
23 struct fio_sem *stat_sem;
25 void clear_rusage_stat(struct thread_data *td)
27 struct thread_stat *ts = &td->ts;
29 fio_getrusage(&td->ru_start);
30 ts->usr_time = ts->sys_time = 0;
32 ts->minf = ts->majf = 0;
35 void update_rusage_stat(struct thread_data *td)
37 struct thread_stat *ts = &td->ts;
39 fio_getrusage(&td->ru_end);
40 ts->usr_time += mtime_since_tv(&td->ru_start.ru_utime,
41 &td->ru_end.ru_utime);
42 ts->sys_time += mtime_since_tv(&td->ru_start.ru_stime,
43 &td->ru_end.ru_stime);
44 ts->ctx += td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw
45 - (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
46 ts->minf += td->ru_end.ru_minflt - td->ru_start.ru_minflt;
47 ts->majf += td->ru_end.ru_majflt - td->ru_start.ru_majflt;
49 memcpy(&td->ru_start, &td->ru_end, sizeof(td->ru_end));
53 * Given a latency, return the index of the corresponding bucket in
54 * the structure tracking percentiles.
56 * (1) find the group (and error bits) that the value (latency)
57 * belongs to by looking at its MSB. (2) find the bucket number in the
58 * group by looking at the index bits.
61 static unsigned int plat_val_to_idx(unsigned long long val)
63 unsigned int msb, error_bits, base, offset, idx;
65 /* Find MSB starting from bit 0 */
69 msb = (sizeof(val)*8) - __builtin_clzll(val) - 1;
72 * MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
73 * all bits of the sample as index
75 if (msb <= FIO_IO_U_PLAT_BITS)
78 /* Compute the number of error bits to discard*/
79 error_bits = msb - FIO_IO_U_PLAT_BITS;
81 /* Compute the number of buckets before the group */
82 base = (error_bits + 1) << FIO_IO_U_PLAT_BITS;
85 * Discard the error bits and apply the mask to find the
86 * index for the buckets in the group
88 offset = (FIO_IO_U_PLAT_VAL - 1) & (val >> error_bits);
90 /* Make sure the index does not exceed (array size - 1) */
91 idx = (base + offset) < (FIO_IO_U_PLAT_NR - 1) ?
92 (base + offset) : (FIO_IO_U_PLAT_NR - 1);
98 * Convert the given index of the bucket array to the value
99 * represented by the bucket
101 static unsigned long long plat_idx_to_val(unsigned int idx)
103 unsigned int error_bits;
104 unsigned long long k, base;
106 assert(idx < FIO_IO_U_PLAT_NR);
108 /* MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
109 * all bits of the sample as index */
110 if (idx < (FIO_IO_U_PLAT_VAL << 1))
113 /* Find the group and compute the minimum value of that group */
114 error_bits = (idx >> FIO_IO_U_PLAT_BITS) - 1;
115 base = ((unsigned long long) 1) << (error_bits + FIO_IO_U_PLAT_BITS);
117 /* Find its bucket number of the group */
118 k = idx % FIO_IO_U_PLAT_VAL;
120 /* Return the mean of the range of the bucket */
121 return base + ((k + 0.5) * (1 << error_bits));
124 static int double_cmp(const void *a, const void *b)
126 const fio_fp64_t fa = *(const fio_fp64_t *) a;
127 const fio_fp64_t fb = *(const fio_fp64_t *) b;
132 else if (fa.u.f < fb.u.f)
138 unsigned int calc_clat_percentiles(uint64_t *io_u_plat, unsigned long long nr,
139 fio_fp64_t *plist, unsigned long long **output,
140 unsigned long long *maxv, unsigned long long *minv)
142 unsigned long long sum = 0;
143 unsigned int len, i, j = 0;
144 unsigned int oval_len = 0;
145 unsigned long long *ovals = NULL;
152 while (len < FIO_IO_U_LIST_MAX_LEN && plist[len].u.f != 0.0)
159 * Sort the percentile list. Note that it may already be sorted if
160 * we are using the default values, but since it's a short list this
161 * isn't a worry. Also note that this does not work for NaN values.
164 qsort((void *)plist, len, sizeof(plist[0]), double_cmp);
167 * Calculate bucket values, note down max and min values
170 for (i = 0; i < FIO_IO_U_PLAT_NR && !is_last; i++) {
172 while (sum >= (plist[j].u.f / 100.0 * nr)) {
173 assert(plist[j].u.f <= 100.0);
177 ovals = realloc(ovals, oval_len * sizeof(*ovals));
180 ovals[j] = plat_idx_to_val(i);
181 if (ovals[j] < *minv)
183 if (ovals[j] > *maxv)
186 is_last = (j == len - 1) != 0;
199 * Find and display the p-th percentile of clat
201 static void show_clat_percentiles(uint64_t *io_u_plat, unsigned long long nr,
202 fio_fp64_t *plist, unsigned int precision,
203 const char *pre, struct buf_output *out)
205 unsigned int divisor, len, i, j = 0;
206 unsigned long long minv, maxv;
207 unsigned long long *ovals;
208 int per_line, scale_down, time_width;
212 len = calc_clat_percentiles(io_u_plat, nr, plist, &ovals, &maxv, &minv);
217 * We default to nsecs, but if the value range is such that we
218 * should scale down to usecs or msecs, do that.
220 if (minv > 2000000 && maxv > 99999999ULL) {
223 log_buf(out, " %s percentiles (msec):\n |", pre);
224 } else if (minv > 2000 && maxv > 99999) {
227 log_buf(out, " %s percentiles (usec):\n |", pre);
231 log_buf(out, " %s percentiles (nsec):\n |", pre);
235 time_width = max(5, (int) (log10(maxv / divisor) + 1));
236 snprintf(fmt, sizeof(fmt), " %%%u.%ufth=[%%%dllu]%%c", precision + 3,
237 precision, time_width);
238 /* fmt will be something like " %5.2fth=[%4llu]%c" */
239 per_line = (80 - 7) / (precision + 10 + time_width);
241 for (j = 0; j < len; j++) {
243 if (j != 0 && (j % per_line) == 0)
246 /* end of the list */
247 is_last = (j == len - 1) != 0;
249 for (i = 0; i < scale_down; i++)
250 ovals[j] = (ovals[j] + 999) / 1000;
252 log_buf(out, fmt, plist[j].u.f, ovals[j], is_last ? '\n' : ',');
257 if ((j % per_line) == per_line - 1) /* for formatting */
266 bool calc_lat(struct io_stat *is, unsigned long long *min,
267 unsigned long long *max, double *mean, double *dev)
269 double n = (double) is->samples;
276 *mean = is->mean.u.f;
279 *dev = sqrt(is->S.u.f / (n - 1.0));
286 void show_group_stats(struct group_run_stats *rs, struct buf_output *out)
288 char *io, *agg, *min, *max;
289 char *ioalt, *aggalt, *minalt, *maxalt;
290 const char *str[] = { " READ", " WRITE" , " TRIM"};
293 log_buf(out, "\nRun status group %d (all jobs):\n", rs->groupid);
295 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
296 const int i2p = is_power_of_2(rs->kb_base);
301 io = num2str(rs->iobytes[i], rs->sig_figs, 1, i2p, N2S_BYTE);
302 ioalt = num2str(rs->iobytes[i], rs->sig_figs, 1, !i2p, N2S_BYTE);
303 agg = num2str(rs->agg[i], rs->sig_figs, 1, i2p, rs->unit_base);
304 aggalt = num2str(rs->agg[i], rs->sig_figs, 1, !i2p, rs->unit_base);
305 min = num2str(rs->min_bw[i], rs->sig_figs, 1, i2p, rs->unit_base);
306 minalt = num2str(rs->min_bw[i], rs->sig_figs, 1, !i2p, rs->unit_base);
307 max = num2str(rs->max_bw[i], rs->sig_figs, 1, i2p, rs->unit_base);
308 maxalt = num2str(rs->max_bw[i], rs->sig_figs, 1, !i2p, rs->unit_base);
309 log_buf(out, "%s: bw=%s (%s), %s-%s (%s-%s), io=%s (%s), run=%llu-%llumsec\n",
310 rs->unified_rw_rep ? " MIXED" : str[i],
311 agg, aggalt, min, max, minalt, maxalt, io, ioalt,
312 (unsigned long long) rs->min_run[i],
313 (unsigned long long) rs->max_run[i]);
326 void stat_calc_dist(uint64_t *map, unsigned long total, double *io_u_dist)
331 * Do depth distribution calculations
333 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
335 io_u_dist[i] = (double) map[i] / (double) total;
336 io_u_dist[i] *= 100.0;
337 if (io_u_dist[i] < 0.1 && map[i])
344 static void stat_calc_lat(struct thread_stat *ts, double *dst,
345 uint64_t *src, int nr)
347 unsigned long total = ddir_rw_sum(ts->total_io_u);
351 * Do latency distribution calculations
353 for (i = 0; i < nr; i++) {
355 dst[i] = (double) src[i] / (double) total;
357 if (dst[i] < 0.01 && src[i])
365 * To keep the terse format unaltered, add all of the ns latency
366 * buckets to the first us latency bucket
368 void stat_calc_lat_nu(struct thread_stat *ts, double *io_u_lat_u)
370 unsigned long ntotal = 0, total = ddir_rw_sum(ts->total_io_u);
373 stat_calc_lat(ts, io_u_lat_u, ts->io_u_lat_u, FIO_IO_U_LAT_U_NR);
375 for (i = 0; i < FIO_IO_U_LAT_N_NR; i++)
376 ntotal += ts->io_u_lat_n[i];
378 io_u_lat_u[0] += 100.0 * (double) ntotal / (double) total;
381 void stat_calc_lat_n(struct thread_stat *ts, double *io_u_lat)
383 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_n, FIO_IO_U_LAT_N_NR);
386 void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat)
388 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_u, FIO_IO_U_LAT_U_NR);
391 void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat)
393 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_m, FIO_IO_U_LAT_M_NR);
396 static void display_lat(const char *name, unsigned long long min,
397 unsigned long long max, double mean, double dev,
398 struct buf_output *out)
400 const char *base = "(nsec)";
403 if (nsec_to_msec(&min, &max, &mean, &dev))
405 else if (nsec_to_usec(&min, &max, &mean, &dev))
408 minp = num2str(min, 6, 1, 0, N2S_NONE);
409 maxp = num2str(max, 6, 1, 0, N2S_NONE);
411 log_buf(out, " %s %s: min=%s, max=%s, avg=%5.02f,"
412 " stdev=%5.02f\n", name, base, minp, maxp, mean, dev);
418 static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
419 int ddir, struct buf_output *out)
421 const char *str[] = { " read", "write", " trim", "sync" };
423 unsigned long long min, max, bw, iops;
425 char *io_p, *bw_p, *bw_p_alt, *iops_p;
428 if (ddir_sync(ddir)) {
429 if (calc_lat(&ts->sync_stat, &min, &max, &mean, &dev)) {
430 log_buf(out, " %s:\n", "fsync/fdatasync/sync_file_range");
431 display_lat(str[ddir], min, max, mean, dev, out);
432 show_clat_percentiles(ts->io_u_sync_plat,
433 ts->sync_stat.samples,
435 ts->percentile_precision,
441 assert(ddir_rw(ddir));
443 if (!ts->runtime[ddir])
446 i2p = is_power_of_2(rs->kb_base);
447 runt = ts->runtime[ddir];
449 bw = (1000 * ts->io_bytes[ddir]) / runt;
450 io_p = num2str(ts->io_bytes[ddir], ts->sig_figs, 1, i2p, N2S_BYTE);
451 bw_p = num2str(bw, ts->sig_figs, 1, i2p, ts->unit_base);
452 bw_p_alt = num2str(bw, ts->sig_figs, 1, !i2p, ts->unit_base);
454 iops = (1000 * (uint64_t)ts->total_io_u[ddir]) / runt;
455 iops_p = num2str(iops, ts->sig_figs, 1, 0, N2S_NONE);
457 log_buf(out, " %s: IOPS=%s, BW=%s (%s)(%s/%llumsec)\n",
458 rs->unified_rw_rep ? "mixed" : str[ddir],
459 iops_p, bw_p, bw_p_alt, io_p,
460 (unsigned long long) ts->runtime[ddir]);
467 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
468 display_lat("slat", min, max, mean, dev, out);
469 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
470 display_lat("clat", min, max, mean, dev, out);
471 if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev))
472 display_lat(" lat", min, max, mean, dev, out);
474 if (ts->clat_percentiles || ts->lat_percentiles) {
475 const char *name = ts->clat_percentiles ? "clat" : " lat";
478 if (ts->clat_percentiles)
479 samples = ts->clat_stat[ddir].samples;
481 samples = ts->lat_stat[ddir].samples;
483 show_clat_percentiles(ts->io_u_plat[ddir],
486 ts->percentile_precision, name, out);
488 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
489 double p_of_agg = 100.0, fkb_base = (double)rs->kb_base;
492 if ((rs->unit_base == 1) && i2p)
494 else if (rs->unit_base == 1)
502 p_of_agg = mean * 100 / (double) (rs->agg[ddir] / 1024);
503 if (p_of_agg > 100.0)
507 if (rs->unit_base == 1) {
514 if (mean > fkb_base * fkb_base) {
519 bw_str = (rs->unit_base == 1 ? "Mibit" : "MiB");
522 log_buf(out, " bw (%5s/s): min=%5llu, max=%5llu, per=%3.2f%%, "
523 "avg=%5.02f, stdev=%5.02f, samples=%" PRIu64 "\n",
524 bw_str, min, max, p_of_agg, mean, dev,
525 (&ts->bw_stat[ddir])->samples);
527 if (calc_lat(&ts->iops_stat[ddir], &min, &max, &mean, &dev)) {
528 log_buf(out, " iops : min=%5llu, max=%5llu, "
529 "avg=%5.02f, stdev=%5.02f, samples=%" PRIu64 "\n",
530 min, max, mean, dev, (&ts->iops_stat[ddir])->samples);
534 static bool show_lat(double *io_u_lat, int nr, const char **ranges,
535 const char *msg, struct buf_output *out)
537 bool new_line = true, shown = false;
540 for (i = 0; i < nr; i++) {
541 if (io_u_lat[i] <= 0.0)
547 log_buf(out, " lat (%s) : ", msg);
553 log_buf(out, "%s%3.2f%%", ranges[i], io_u_lat[i]);
565 static void show_lat_n(double *io_u_lat_n, struct buf_output *out)
567 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
568 "250=", "500=", "750=", "1000=", };
570 show_lat(io_u_lat_n, FIO_IO_U_LAT_N_NR, ranges, "nsec", out);
573 static void show_lat_u(double *io_u_lat_u, struct buf_output *out)
575 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
576 "250=", "500=", "750=", "1000=", };
578 show_lat(io_u_lat_u, FIO_IO_U_LAT_U_NR, ranges, "usec", out);
581 static void show_lat_m(double *io_u_lat_m, struct buf_output *out)
583 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
584 "250=", "500=", "750=", "1000=", "2000=",
587 show_lat(io_u_lat_m, FIO_IO_U_LAT_M_NR, ranges, "msec", out);
590 static void show_latencies(struct thread_stat *ts, struct buf_output *out)
592 double io_u_lat_n[FIO_IO_U_LAT_N_NR];
593 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
594 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
596 stat_calc_lat_n(ts, io_u_lat_n);
597 stat_calc_lat_u(ts, io_u_lat_u);
598 stat_calc_lat_m(ts, io_u_lat_m);
600 show_lat_n(io_u_lat_n, out);
601 show_lat_u(io_u_lat_u, out);
602 show_lat_m(io_u_lat_m, out);
605 static int block_state_category(int block_state)
607 switch (block_state) {
608 case BLOCK_STATE_UNINIT:
610 case BLOCK_STATE_TRIMMED:
611 case BLOCK_STATE_WRITTEN:
613 case BLOCK_STATE_WRITE_FAILURE:
614 case BLOCK_STATE_TRIM_FAILURE:
617 /* Silence compile warning on some BSDs and have a return */
623 static int compare_block_infos(const void *bs1, const void *bs2)
625 uint32_t block1 = *(uint32_t *)bs1;
626 uint32_t block2 = *(uint32_t *)bs2;
627 int state1 = BLOCK_INFO_STATE(block1);
628 int state2 = BLOCK_INFO_STATE(block2);
629 int bscat1 = block_state_category(state1);
630 int bscat2 = block_state_category(state2);
631 int cycles1 = BLOCK_INFO_TRIMS(block1);
632 int cycles2 = BLOCK_INFO_TRIMS(block2);
639 if (cycles1 < cycles2)
641 if (cycles1 > cycles2)
649 assert(block1 == block2);
653 static int calc_block_percentiles(int nr_block_infos, uint32_t *block_infos,
654 fio_fp64_t *plist, unsigned int **percentiles,
660 qsort(block_infos, nr_block_infos, sizeof(uint32_t), compare_block_infos);
662 while (len < FIO_IO_U_LIST_MAX_LEN && plist[len].u.f != 0.0)
669 * Sort the percentile list. Note that it may already be sorted if
670 * we are using the default values, but since it's a short list this
671 * isn't a worry. Also note that this does not work for NaN values.
674 qsort((void *)plist, len, sizeof(plist[0]), double_cmp);
677 /* Start only after the uninit entries end */
679 nr_uninit < nr_block_infos
680 && BLOCK_INFO_STATE(block_infos[nr_uninit]) == BLOCK_STATE_UNINIT;
684 if (nr_uninit == nr_block_infos)
687 *percentiles = calloc(len, sizeof(**percentiles));
689 for (i = 0; i < len; i++) {
690 int idx = (plist[i].u.f * (nr_block_infos - nr_uninit) / 100)
692 (*percentiles)[i] = BLOCK_INFO_TRIMS(block_infos[idx]);
695 memset(types, 0, sizeof(*types) * BLOCK_STATE_COUNT);
696 for (i = 0; i < nr_block_infos; i++)
697 types[BLOCK_INFO_STATE(block_infos[i])]++;
702 static const char *block_state_names[] = {
703 [BLOCK_STATE_UNINIT] = "unwritten",
704 [BLOCK_STATE_TRIMMED] = "trimmed",
705 [BLOCK_STATE_WRITTEN] = "written",
706 [BLOCK_STATE_TRIM_FAILURE] = "trim failure",
707 [BLOCK_STATE_WRITE_FAILURE] = "write failure",
710 static void show_block_infos(int nr_block_infos, uint32_t *block_infos,
711 fio_fp64_t *plist, struct buf_output *out)
714 unsigned int *percentiles = NULL;
715 unsigned int block_state_counts[BLOCK_STATE_COUNT];
717 len = calc_block_percentiles(nr_block_infos, block_infos, plist,
718 &percentiles, block_state_counts);
720 log_buf(out, " block lifetime percentiles :\n |");
722 for (i = 0; i < len; i++) {
723 uint32_t block_info = percentiles[i];
724 #define LINE_LENGTH 75
725 char str[LINE_LENGTH];
726 int strln = snprintf(str, LINE_LENGTH, " %3.2fth=%u%c",
727 plist[i].u.f, block_info,
728 i == len - 1 ? '\n' : ',');
729 assert(strln < LINE_LENGTH);
730 if (pos + strln > LINE_LENGTH) {
732 log_buf(out, "\n |");
734 log_buf(out, "%s", str);
741 log_buf(out, " states :");
742 for (i = 0; i < BLOCK_STATE_COUNT; i++)
743 log_buf(out, " %s=%u%c",
744 block_state_names[i], block_state_counts[i],
745 i == BLOCK_STATE_COUNT - 1 ? '\n' : ',');
748 static void show_ss_normal(struct thread_stat *ts, struct buf_output *out)
750 char *p1, *p1alt, *p2;
751 unsigned long long bw_mean, iops_mean;
752 const int i2p = is_power_of_2(ts->kb_base);
757 bw_mean = steadystate_bw_mean(ts);
758 iops_mean = steadystate_iops_mean(ts);
760 p1 = num2str(bw_mean / ts->kb_base, ts->sig_figs, ts->kb_base, i2p, ts->unit_base);
761 p1alt = num2str(bw_mean / ts->kb_base, ts->sig_figs, ts->kb_base, !i2p, ts->unit_base);
762 p2 = num2str(iops_mean, ts->sig_figs, 1, 0, N2S_NONE);
764 log_buf(out, " steadystate : attained=%s, bw=%s (%s), iops=%s, %s%s=%.3f%s\n",
765 ts->ss_state & FIO_SS_ATTAINED ? "yes" : "no",
767 ts->ss_state & FIO_SS_IOPS ? "iops" : "bw",
768 ts->ss_state & FIO_SS_SLOPE ? " slope": " mean dev",
769 ts->ss_criterion.u.f,
770 ts->ss_state & FIO_SS_PCT ? "%" : "");
777 static void show_thread_status_normal(struct thread_stat *ts,
778 struct group_run_stats *rs,
779 struct buf_output *out)
781 double usr_cpu, sys_cpu;
782 unsigned long runtime;
783 double io_u_dist[FIO_IO_U_MAP_NR];
787 if (!ddir_rw_sum(ts->io_bytes) && !ddir_rw_sum(ts->total_io_u))
790 memset(time_buf, 0, sizeof(time_buf));
793 os_ctime_r((const time_t *) &time_p, time_buf, sizeof(time_buf));
796 log_buf(out, "%s: (groupid=%d, jobs=%d): err=%2d: pid=%d: %s",
797 ts->name, ts->groupid, ts->members,
798 ts->error, (int) ts->pid, time_buf);
800 log_buf(out, "%s: (groupid=%d, jobs=%d): err=%2d (%s): pid=%d: %s",
801 ts->name, ts->groupid, ts->members,
802 ts->error, ts->verror, (int) ts->pid,
806 if (strlen(ts->description))
807 log_buf(out, " Description : [%s]\n", ts->description);
809 if (ts->io_bytes[DDIR_READ])
810 show_ddir_status(rs, ts, DDIR_READ, out);
811 if (ts->io_bytes[DDIR_WRITE])
812 show_ddir_status(rs, ts, DDIR_WRITE, out);
813 if (ts->io_bytes[DDIR_TRIM])
814 show_ddir_status(rs, ts, DDIR_TRIM, out);
816 show_latencies(ts, out);
818 if (ts->sync_stat.samples)
819 show_ddir_status(rs, ts, DDIR_SYNC, out);
821 runtime = ts->total_run_time;
823 double runt = (double) runtime;
825 usr_cpu = (double) ts->usr_time * 100 / runt;
826 sys_cpu = (double) ts->sys_time * 100 / runt;
832 log_buf(out, " cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%llu,"
833 " majf=%llu, minf=%llu\n", usr_cpu, sys_cpu,
834 (unsigned long long) ts->ctx,
835 (unsigned long long) ts->majf,
836 (unsigned long long) ts->minf);
838 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
839 log_buf(out, " IO depths : 1=%3.1f%%, 2=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%,"
840 " 16=%3.1f%%, 32=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
841 io_u_dist[1], io_u_dist[2],
842 io_u_dist[3], io_u_dist[4],
843 io_u_dist[5], io_u_dist[6]);
845 stat_calc_dist(ts->io_u_submit, ts->total_submit, io_u_dist);
846 log_buf(out, " submit : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
847 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
848 io_u_dist[1], io_u_dist[2],
849 io_u_dist[3], io_u_dist[4],
850 io_u_dist[5], io_u_dist[6]);
851 stat_calc_dist(ts->io_u_complete, ts->total_complete, io_u_dist);
852 log_buf(out, " complete : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
853 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
854 io_u_dist[1], io_u_dist[2],
855 io_u_dist[3], io_u_dist[4],
856 io_u_dist[5], io_u_dist[6]);
857 log_buf(out, " issued rwts: total=%llu,%llu,%llu,%llu"
858 " short=%llu,%llu,%llu,0"
859 " dropped=%llu,%llu,%llu,0\n",
860 (unsigned long long) ts->total_io_u[0],
861 (unsigned long long) ts->total_io_u[1],
862 (unsigned long long) ts->total_io_u[2],
863 (unsigned long long) ts->total_io_u[3],
864 (unsigned long long) ts->short_io_u[0],
865 (unsigned long long) ts->short_io_u[1],
866 (unsigned long long) ts->short_io_u[2],
867 (unsigned long long) ts->drop_io_u[0],
868 (unsigned long long) ts->drop_io_u[1],
869 (unsigned long long) ts->drop_io_u[2]);
870 if (ts->continue_on_error) {
871 log_buf(out, " errors : total=%llu, first_error=%d/<%s>\n",
872 (unsigned long long)ts->total_err_count,
874 strerror(ts->first_error));
876 if (ts->latency_depth) {
877 log_buf(out, " latency : target=%llu, window=%llu, percentile=%.2f%%, depth=%u\n",
878 (unsigned long long)ts->latency_target,
879 (unsigned long long)ts->latency_window,
880 ts->latency_percentile.u.f,
884 if (ts->nr_block_infos)
885 show_block_infos(ts->nr_block_infos, ts->block_infos,
886 ts->percentile_list, out);
889 show_ss_normal(ts, out);
892 static void show_ddir_status_terse(struct thread_stat *ts,
893 struct group_run_stats *rs, int ddir,
894 int ver, struct buf_output *out)
896 unsigned long long min, max, minv, maxv, bw, iops;
897 unsigned long long *ovals = NULL;
902 assert(ddir_rw(ddir));
905 if (ts->runtime[ddir]) {
906 uint64_t runt = ts->runtime[ddir];
908 bw = ((1000 * ts->io_bytes[ddir]) / runt) / 1024; /* KiB/s */
909 iops = (1000 * (uint64_t) ts->total_io_u[ddir]) / runt;
912 log_buf(out, ";%llu;%llu;%llu;%llu",
913 (unsigned long long) ts->io_bytes[ddir] >> 10, bw, iops,
914 (unsigned long long) ts->runtime[ddir]);
916 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
917 log_buf(out, ";%llu;%llu;%f;%f", min/1000, max/1000, mean/1000, dev/1000);
919 log_buf(out, ";%llu;%llu;%f;%f", 0ULL, 0ULL, 0.0, 0.0);
921 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
922 log_buf(out, ";%llu;%llu;%f;%f", min/1000, max/1000, mean/1000, dev/1000);
924 log_buf(out, ";%llu;%llu;%f;%f", 0ULL, 0ULL, 0.0, 0.0);
926 if (ts->clat_percentiles || ts->lat_percentiles) {
927 len = calc_clat_percentiles(ts->io_u_plat[ddir],
928 ts->clat_stat[ddir].samples,
929 ts->percentile_list, &ovals, &maxv,
934 for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
936 log_buf(out, ";0%%=0");
939 log_buf(out, ";%f%%=%llu", ts->percentile_list[i].u.f, ovals[i]/1000);
942 if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev))
943 log_buf(out, ";%llu;%llu;%f;%f", min/1000, max/1000, mean/1000, dev/1000);
945 log_buf(out, ";%llu;%llu;%f;%f", 0ULL, 0ULL, 0.0, 0.0);
950 bw_stat = calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev);
952 double p_of_agg = 100.0;
955 p_of_agg = mean * 100 / (double) (rs->agg[ddir] / 1024);
956 if (p_of_agg > 100.0)
960 log_buf(out, ";%llu;%llu;%f%%;%f;%f", min, max, p_of_agg, mean, dev);
962 log_buf(out, ";%llu;%llu;%f%%;%f;%f", 0ULL, 0ULL, 0.0, 0.0, 0.0);
966 log_buf(out, ";%" PRIu64, (&ts->bw_stat[ddir])->samples);
968 log_buf(out, ";%lu", 0UL);
970 if (calc_lat(&ts->iops_stat[ddir], &min, &max, &mean, &dev))
971 log_buf(out, ";%llu;%llu;%f;%f;%" PRIu64, min, max,
972 mean, dev, (&ts->iops_stat[ddir])->samples);
974 log_buf(out, ";%llu;%llu;%f;%f;%lu", 0ULL, 0ULL, 0.0, 0.0, 0UL);
978 static void add_ddir_status_json(struct thread_stat *ts,
979 struct group_run_stats *rs, int ddir, struct json_object *parent)
981 unsigned long long min, max, minv, maxv;
982 unsigned long long bw_bytes, bw;
983 unsigned long long *ovals = NULL;
984 double mean, dev, iops;
987 const char *ddirname[] = { "read", "write", "trim", "sync" };
988 struct json_object *dir_object, *tmp_object, *percentile_object, *clat_bins_object = NULL;
990 double p_of_agg = 100.0;
992 assert(ddir_rw(ddir) || ddir_sync(ddir));
994 if (ts->unified_rw_rep && ddir != DDIR_READ)
997 dir_object = json_create_object();
998 json_object_add_value_object(parent,
999 ts->unified_rw_rep ? "mixed" : ddirname[ddir], dir_object);
1001 if (ddir_rw(ddir)) {
1005 if (ts->runtime[ddir]) {
1006 uint64_t runt = ts->runtime[ddir];
1008 bw_bytes = ((1000 * ts->io_bytes[ddir]) / runt); /* Bytes/s */
1009 bw = bw_bytes / 1024; /* KiB/s */
1010 iops = (1000.0 * (uint64_t) ts->total_io_u[ddir]) / runt;
1013 json_object_add_value_int(dir_object, "io_bytes", ts->io_bytes[ddir]);
1014 json_object_add_value_int(dir_object, "io_kbytes", ts->io_bytes[ddir] >> 10);
1015 json_object_add_value_int(dir_object, "bw_bytes", bw_bytes);
1016 json_object_add_value_int(dir_object, "bw", bw);
1017 json_object_add_value_float(dir_object, "iops", iops);
1018 json_object_add_value_int(dir_object, "runtime", ts->runtime[ddir]);
1019 json_object_add_value_int(dir_object, "total_ios", ts->total_io_u[ddir]);
1020 json_object_add_value_int(dir_object, "short_ios", ts->short_io_u[ddir]);
1021 json_object_add_value_int(dir_object, "drop_ios", ts->drop_io_u[ddir]);
1023 if (!calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev)) {
1027 tmp_object = json_create_object();
1028 json_object_add_value_object(dir_object, "slat_ns", tmp_object);
1029 json_object_add_value_int(tmp_object, "min", min);
1030 json_object_add_value_int(tmp_object, "max", max);
1031 json_object_add_value_float(tmp_object, "mean", mean);
1032 json_object_add_value_float(tmp_object, "stddev", dev);
1034 if (!calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev)) {
1038 tmp_object = json_create_object();
1039 json_object_add_value_object(dir_object, "clat_ns", tmp_object);
1040 json_object_add_value_int(tmp_object, "min", min);
1041 json_object_add_value_int(tmp_object, "max", max);
1042 json_object_add_value_float(tmp_object, "mean", mean);
1043 json_object_add_value_float(tmp_object, "stddev", dev);
1045 if (!calc_lat(&ts->sync_stat, &min, &max, &mean, &dev)) {
1050 tmp_object = json_create_object();
1051 json_object_add_value_object(dir_object, "lat_ns", tmp_object);
1052 json_object_add_value_int(dir_object, "total_ios", ts->total_io_u[DDIR_SYNC]);
1053 json_object_add_value_int(tmp_object, "min", min);
1054 json_object_add_value_int(tmp_object, "max", max);
1055 json_object_add_value_float(tmp_object, "mean", mean);
1056 json_object_add_value_float(tmp_object, "stddev", dev);
1059 if (ts->clat_percentiles || ts->lat_percentiles) {
1060 if (ddir_rw(ddir)) {
1061 len = calc_clat_percentiles(ts->io_u_plat[ddir],
1062 ts->clat_stat[ddir].samples,
1063 ts->percentile_list, &ovals, &maxv,
1066 len = calc_clat_percentiles(ts->io_u_sync_plat,
1067 ts->sync_stat.samples,
1068 ts->percentile_list, &ovals, &maxv,
1072 if (len > FIO_IO_U_LIST_MAX_LEN)
1073 len = FIO_IO_U_LIST_MAX_LEN;
1077 percentile_object = json_create_object();
1078 json_object_add_value_object(tmp_object, "percentile", percentile_object);
1079 for (i = 0; i < len; i++) {
1080 snprintf(buf, sizeof(buf), "%f", ts->percentile_list[i].u.f);
1081 json_object_add_value_int(percentile_object, (const char *)buf, ovals[i]);
1084 if (output_format & FIO_OUTPUT_JSON_PLUS) {
1085 clat_bins_object = json_create_object();
1086 if (ts->clat_percentiles)
1087 json_object_add_value_object(tmp_object, "bins", clat_bins_object);
1089 for(i = 0; i < FIO_IO_U_PLAT_NR; i++) {
1090 if (ddir_rw(ddir)) {
1091 if (ts->io_u_plat[ddir][i]) {
1092 snprintf(buf, sizeof(buf), "%llu", plat_idx_to_val(i));
1093 json_object_add_value_int(clat_bins_object, (const char *)buf, ts->io_u_plat[ddir][i]);
1096 if (ts->io_u_sync_plat[i]) {
1097 snprintf(buf, sizeof(buf), "%llu", plat_idx_to_val(i));
1098 json_object_add_value_int(clat_bins_object, (const char *)buf, ts->io_u_sync_plat[i]);
1107 if (!calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev)) {
1111 tmp_object = json_create_object();
1112 json_object_add_value_object(dir_object, "lat_ns", tmp_object);
1113 json_object_add_value_int(tmp_object, "min", min);
1114 json_object_add_value_int(tmp_object, "max", max);
1115 json_object_add_value_float(tmp_object, "mean", mean);
1116 json_object_add_value_float(tmp_object, "stddev", dev);
1117 if (output_format & FIO_OUTPUT_JSON_PLUS && ts->lat_percentiles)
1118 json_object_add_value_object(tmp_object, "bins", clat_bins_object);
1123 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
1124 if (rs->agg[ddir]) {
1125 p_of_agg = mean * 100 / (double) (rs->agg[ddir] / 1024);
1126 if (p_of_agg > 100.0)
1131 p_of_agg = mean = dev = 0.0;
1133 json_object_add_value_int(dir_object, "bw_min", min);
1134 json_object_add_value_int(dir_object, "bw_max", max);
1135 json_object_add_value_float(dir_object, "bw_agg", p_of_agg);
1136 json_object_add_value_float(dir_object, "bw_mean", mean);
1137 json_object_add_value_float(dir_object, "bw_dev", dev);
1138 json_object_add_value_int(dir_object, "bw_samples",
1139 (&ts->bw_stat[ddir])->samples);
1141 if (!calc_lat(&ts->iops_stat[ddir], &min, &max, &mean, &dev)) {
1145 json_object_add_value_int(dir_object, "iops_min", min);
1146 json_object_add_value_int(dir_object, "iops_max", max);
1147 json_object_add_value_float(dir_object, "iops_mean", mean);
1148 json_object_add_value_float(dir_object, "iops_stddev", dev);
1149 json_object_add_value_int(dir_object, "iops_samples",
1150 (&ts->iops_stat[ddir])->samples);
1153 static void show_thread_status_terse_all(struct thread_stat *ts,
1154 struct group_run_stats *rs, int ver,
1155 struct buf_output *out)
1157 double io_u_dist[FIO_IO_U_MAP_NR];
1158 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
1159 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
1160 double usr_cpu, sys_cpu;
1165 log_buf(out, "2;%s;%d;%d", ts->name, ts->groupid, ts->error);
1167 log_buf(out, "%d;%s;%s;%d;%d", ver, fio_version_string,
1168 ts->name, ts->groupid, ts->error);
1170 /* Log Read Status */
1171 show_ddir_status_terse(ts, rs, DDIR_READ, ver, out);
1172 /* Log Write Status */
1173 show_ddir_status_terse(ts, rs, DDIR_WRITE, ver, out);
1174 /* Log Trim Status */
1175 if (ver == 2 || ver == 4 || ver == 5)
1176 show_ddir_status_terse(ts, rs, DDIR_TRIM, ver, out);
1179 if (ts->total_run_time) {
1180 double runt = (double) ts->total_run_time;
1182 usr_cpu = (double) ts->usr_time * 100 / runt;
1183 sys_cpu = (double) ts->sys_time * 100 / runt;
1189 log_buf(out, ";%f%%;%f%%;%llu;%llu;%llu", usr_cpu, sys_cpu,
1190 (unsigned long long) ts->ctx,
1191 (unsigned long long) ts->majf,
1192 (unsigned long long) ts->minf);
1194 /* Calc % distribution of IO depths, usecond, msecond latency */
1195 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
1196 stat_calc_lat_nu(ts, io_u_lat_u);
1197 stat_calc_lat_m(ts, io_u_lat_m);
1199 /* Only show fixed 7 I/O depth levels*/
1200 log_buf(out, ";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%",
1201 io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3],
1202 io_u_dist[4], io_u_dist[5], io_u_dist[6]);
1204 /* Microsecond latency */
1205 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
1206 log_buf(out, ";%3.2f%%", io_u_lat_u[i]);
1207 /* Millisecond latency */
1208 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
1209 log_buf(out, ";%3.2f%%", io_u_lat_m[i]);
1211 /* disk util stats, if any */
1213 show_disk_util(1, NULL, out);
1215 /* Additional output if continue_on_error set - default off*/
1216 if (ts->continue_on_error)
1217 log_buf(out, ";%llu;%d", (unsigned long long) ts->total_err_count, ts->first_error);
1221 /* Additional output if description is set */
1222 if (strlen(ts->description))
1223 log_buf(out, ";%s", ts->description);
1228 static void json_add_job_opts(struct json_object *root, const char *name,
1229 struct flist_head *opt_list)
1231 struct json_object *dir_object;
1232 struct flist_head *entry;
1233 struct print_option *p;
1235 if (flist_empty(opt_list))
1238 dir_object = json_create_object();
1239 json_object_add_value_object(root, name, dir_object);
1241 flist_for_each(entry, opt_list) {
1242 const char *pos = "";
1244 p = flist_entry(entry, struct print_option, list);
1247 json_object_add_value_string(dir_object, p->name, pos);
1251 static struct json_object *show_thread_status_json(struct thread_stat *ts,
1252 struct group_run_stats *rs,
1253 struct flist_head *opt_list)
1255 struct json_object *root, *tmp;
1256 struct jobs_eta *je;
1257 double io_u_dist[FIO_IO_U_MAP_NR];
1258 double io_u_lat_n[FIO_IO_U_LAT_N_NR];
1259 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
1260 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
1261 double usr_cpu, sys_cpu;
1265 root = json_create_object();
1266 json_object_add_value_string(root, "jobname", ts->name);
1267 json_object_add_value_int(root, "groupid", ts->groupid);
1268 json_object_add_value_int(root, "error", ts->error);
1271 je = get_jobs_eta(true, &size);
1273 json_object_add_value_int(root, "eta", je->eta_sec);
1274 json_object_add_value_int(root, "elapsed", je->elapsed_sec);
1278 json_add_job_opts(root, "job options", opt_list);
1280 add_ddir_status_json(ts, rs, DDIR_READ, root);
1281 add_ddir_status_json(ts, rs, DDIR_WRITE, root);
1282 add_ddir_status_json(ts, rs, DDIR_TRIM, root);
1283 add_ddir_status_json(ts, rs, DDIR_SYNC, root);
1286 if (ts->total_run_time) {
1287 double runt = (double) ts->total_run_time;
1289 usr_cpu = (double) ts->usr_time * 100 / runt;
1290 sys_cpu = (double) ts->sys_time * 100 / runt;
1295 json_object_add_value_float(root, "usr_cpu", usr_cpu);
1296 json_object_add_value_float(root, "sys_cpu", sys_cpu);
1297 json_object_add_value_int(root, "ctx", ts->ctx);
1298 json_object_add_value_int(root, "majf", ts->majf);
1299 json_object_add_value_int(root, "minf", ts->minf);
1302 /* Calc % distribution of IO depths, usecond, msecond latency */
1303 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
1304 stat_calc_lat_n(ts, io_u_lat_n);
1305 stat_calc_lat_u(ts, io_u_lat_u);
1306 stat_calc_lat_m(ts, io_u_lat_m);
1308 tmp = json_create_object();
1309 json_object_add_value_object(root, "iodepth_level", tmp);
1310 /* Only show fixed 7 I/O depth levels*/
1311 for (i = 0; i < 7; i++) {
1314 snprintf(name, 20, "%d", 1 << i);
1316 snprintf(name, 20, ">=%d", 1 << i);
1317 json_object_add_value_float(tmp, (const char *)name, io_u_dist[i]);
1320 /* Nanosecond latency */
1321 tmp = json_create_object();
1322 json_object_add_value_object(root, "latency_ns", tmp);
1323 for (i = 0; i < FIO_IO_U_LAT_N_NR; i++) {
1324 const char *ranges[] = { "2", "4", "10", "20", "50", "100",
1325 "250", "500", "750", "1000", };
1326 json_object_add_value_float(tmp, ranges[i], io_u_lat_n[i]);
1328 /* Microsecond latency */
1329 tmp = json_create_object();
1330 json_object_add_value_object(root, "latency_us", tmp);
1331 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++) {
1332 const char *ranges[] = { "2", "4", "10", "20", "50", "100",
1333 "250", "500", "750", "1000", };
1334 json_object_add_value_float(tmp, ranges[i], io_u_lat_u[i]);
1336 /* Millisecond latency */
1337 tmp = json_create_object();
1338 json_object_add_value_object(root, "latency_ms", tmp);
1339 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++) {
1340 const char *ranges[] = { "2", "4", "10", "20", "50", "100",
1341 "250", "500", "750", "1000", "2000",
1343 json_object_add_value_float(tmp, ranges[i], io_u_lat_m[i]);
1346 /* Additional output if continue_on_error set - default off*/
1347 if (ts->continue_on_error) {
1348 json_object_add_value_int(root, "total_err", ts->total_err_count);
1349 json_object_add_value_int(root, "first_error", ts->first_error);
1352 if (ts->latency_depth) {
1353 json_object_add_value_int(root, "latency_depth", ts->latency_depth);
1354 json_object_add_value_int(root, "latency_target", ts->latency_target);
1355 json_object_add_value_float(root, "latency_percentile", ts->latency_percentile.u.f);
1356 json_object_add_value_int(root, "latency_window", ts->latency_window);
1359 /* Additional output if description is set */
1360 if (strlen(ts->description))
1361 json_object_add_value_string(root, "desc", ts->description);
1363 if (ts->nr_block_infos) {
1364 /* Block error histogram and types */
1366 unsigned int *percentiles = NULL;
1367 unsigned int block_state_counts[BLOCK_STATE_COUNT];
1369 len = calc_block_percentiles(ts->nr_block_infos, ts->block_infos,
1370 ts->percentile_list,
1371 &percentiles, block_state_counts);
1374 struct json_object *block, *percentile_object, *states;
1376 block = json_create_object();
1377 json_object_add_value_object(root, "block", block);
1379 percentile_object = json_create_object();
1380 json_object_add_value_object(block, "percentiles",
1382 for (i = 0; i < len; i++) {
1384 snprintf(buf, sizeof(buf), "%f",
1385 ts->percentile_list[i].u.f);
1386 json_object_add_value_int(percentile_object,
1391 states = json_create_object();
1392 json_object_add_value_object(block, "states", states);
1393 for (state = 0; state < BLOCK_STATE_COUNT; state++) {
1394 json_object_add_value_int(states,
1395 block_state_names[state],
1396 block_state_counts[state]);
1403 struct json_object *data;
1404 struct json_array *iops, *bw;
1408 snprintf(ss_buf, sizeof(ss_buf), "%s%s:%f%s",
1409 ts->ss_state & FIO_SS_IOPS ? "iops" : "bw",
1410 ts->ss_state & FIO_SS_SLOPE ? "_slope" : "",
1411 (float) ts->ss_limit.u.f,
1412 ts->ss_state & FIO_SS_PCT ? "%" : "");
1414 tmp = json_create_object();
1415 json_object_add_value_object(root, "steadystate", tmp);
1416 json_object_add_value_string(tmp, "ss", ss_buf);
1417 json_object_add_value_int(tmp, "duration", (int)ts->ss_dur);
1418 json_object_add_value_int(tmp, "attained", (ts->ss_state & FIO_SS_ATTAINED) > 0);
1420 snprintf(ss_buf, sizeof(ss_buf), "%f%s", (float) ts->ss_criterion.u.f,
1421 ts->ss_state & FIO_SS_PCT ? "%" : "");
1422 json_object_add_value_string(tmp, "criterion", ss_buf);
1423 json_object_add_value_float(tmp, "max_deviation", ts->ss_deviation.u.f);
1424 json_object_add_value_float(tmp, "slope", ts->ss_slope.u.f);
1426 data = json_create_object();
1427 json_object_add_value_object(tmp, "data", data);
1428 bw = json_create_array();
1429 iops = json_create_array();
1432 ** if ss was attained or the buffer is not full,
1433 ** ss->head points to the first element in the list.
1434 ** otherwise it actually points to the second element
1437 if ((ts->ss_state & FIO_SS_ATTAINED) || !(ts->ss_state & FIO_SS_BUFFER_FULL))
1440 j = ts->ss_head == 0 ? ts->ss_dur - 1 : ts->ss_head - 1;
1441 for (i = 0; i < ts->ss_dur; i++) {
1442 k = (j + i) % ts->ss_dur;
1443 json_array_add_value_int(bw, ts->ss_bw_data[k]);
1444 json_array_add_value_int(iops, ts->ss_iops_data[k]);
1446 json_object_add_value_int(data, "bw_mean", steadystate_bw_mean(ts));
1447 json_object_add_value_int(data, "iops_mean", steadystate_iops_mean(ts));
1448 json_object_add_value_array(data, "iops", iops);
1449 json_object_add_value_array(data, "bw", bw);
1455 static void show_thread_status_terse(struct thread_stat *ts,
1456 struct group_run_stats *rs,
1457 struct buf_output *out)
1459 if (terse_version >= 2 && terse_version <= 5)
1460 show_thread_status_terse_all(ts, rs, terse_version, out);
1462 log_err("fio: bad terse version!? %d\n", terse_version);
1465 struct json_object *show_thread_status(struct thread_stat *ts,
1466 struct group_run_stats *rs,
1467 struct flist_head *opt_list,
1468 struct buf_output *out)
1470 struct json_object *ret = NULL;
1472 if (output_format & FIO_OUTPUT_TERSE)
1473 show_thread_status_terse(ts, rs, out);
1474 if (output_format & FIO_OUTPUT_JSON)
1475 ret = show_thread_status_json(ts, rs, opt_list);
1476 if (output_format & FIO_OUTPUT_NORMAL)
1477 show_thread_status_normal(ts, rs, out);
1482 static void sum_stat(struct io_stat *dst, struct io_stat *src, bool first)
1486 if (src->samples == 0)
1489 dst->min_val = min(dst->min_val, src->min_val);
1490 dst->max_val = max(dst->max_val, src->max_val);
1493 * Compute new mean and S after the merge
1494 * <http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
1495 * #Parallel_algorithm>
1498 mean = src->mean.u.f;
1501 double delta = src->mean.u.f - dst->mean.u.f;
1503 mean = ((src->mean.u.f * src->samples) +
1504 (dst->mean.u.f * dst->samples)) /
1505 (dst->samples + src->samples);
1507 S = src->S.u.f + dst->S.u.f + pow(delta, 2.0) *
1508 (dst->samples * src->samples) /
1509 (dst->samples + src->samples);
1512 dst->samples += src->samples;
1513 dst->mean.u.f = mean;
1517 void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src)
1521 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1522 if (dst->max_run[i] < src->max_run[i])
1523 dst->max_run[i] = src->max_run[i];
1524 if (dst->min_run[i] && dst->min_run[i] > src->min_run[i])
1525 dst->min_run[i] = src->min_run[i];
1526 if (dst->max_bw[i] < src->max_bw[i])
1527 dst->max_bw[i] = src->max_bw[i];
1528 if (dst->min_bw[i] && dst->min_bw[i] > src->min_bw[i])
1529 dst->min_bw[i] = src->min_bw[i];
1531 dst->iobytes[i] += src->iobytes[i];
1532 dst->agg[i] += src->agg[i];
1536 dst->kb_base = src->kb_base;
1537 if (!dst->unit_base)
1538 dst->unit_base = src->unit_base;
1540 dst->sig_figs = src->sig_figs;
1543 void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src,
1548 for (l = 0; l < DDIR_RWDIR_CNT; l++) {
1549 if (!dst->unified_rw_rep) {
1550 sum_stat(&dst->clat_stat[l], &src->clat_stat[l], first);
1551 sum_stat(&dst->slat_stat[l], &src->slat_stat[l], first);
1552 sum_stat(&dst->lat_stat[l], &src->lat_stat[l], first);
1553 sum_stat(&dst->bw_stat[l], &src->bw_stat[l], first);
1554 sum_stat(&dst->iops_stat[l], &src->iops_stat[l], first);
1556 dst->io_bytes[l] += src->io_bytes[l];
1558 if (dst->runtime[l] < src->runtime[l])
1559 dst->runtime[l] = src->runtime[l];
1561 sum_stat(&dst->clat_stat[0], &src->clat_stat[l], first);
1562 sum_stat(&dst->slat_stat[0], &src->slat_stat[l], first);
1563 sum_stat(&dst->lat_stat[0], &src->lat_stat[l], first);
1564 sum_stat(&dst->bw_stat[0], &src->bw_stat[l], first);
1565 sum_stat(&dst->iops_stat[0], &src->iops_stat[l], first);
1567 dst->io_bytes[0] += src->io_bytes[l];
1569 if (dst->runtime[0] < src->runtime[l])
1570 dst->runtime[0] = src->runtime[l];
1573 * We're summing to the same destination, so override
1574 * 'first' after the first iteration of the loop
1580 sum_stat(&dst->sync_stat, &src->sync_stat, first);
1581 dst->usr_time += src->usr_time;
1582 dst->sys_time += src->sys_time;
1583 dst->ctx += src->ctx;
1584 dst->majf += src->majf;
1585 dst->minf += src->minf;
1587 for (k = 0; k < FIO_IO_U_MAP_NR; k++) {
1588 dst->io_u_map[k] += src->io_u_map[k];
1589 dst->io_u_submit[k] += src->io_u_submit[k];
1590 dst->io_u_complete[k] += src->io_u_complete[k];
1592 for (k = 0; k < FIO_IO_U_LAT_N_NR; k++) {
1593 dst->io_u_lat_n[k] += src->io_u_lat_n[k];
1594 dst->io_u_lat_u[k] += src->io_u_lat_u[k];
1595 dst->io_u_lat_m[k] += src->io_u_lat_m[k];
1597 for (k = 0; k < FIO_IO_U_PLAT_NR; k++)
1598 dst->io_u_sync_plat[k] += src->io_u_sync_plat[k];
1600 for (k = 0; k < DDIR_RWDIR_CNT; k++) {
1601 if (!dst->unified_rw_rep) {
1602 dst->total_io_u[k] += src->total_io_u[k];
1603 dst->short_io_u[k] += src->short_io_u[k];
1604 dst->drop_io_u[k] += src->drop_io_u[k];
1606 dst->total_io_u[0] += src->total_io_u[k];
1607 dst->short_io_u[0] += src->short_io_u[k];
1608 dst->drop_io_u[0] += src->drop_io_u[k];
1612 dst->total_io_u[DDIR_SYNC] += src->total_io_u[DDIR_SYNC];
1614 for (k = 0; k < DDIR_RWDIR_CNT; k++) {
1617 for (m = 0; m < FIO_IO_U_PLAT_NR; m++) {
1618 if (!dst->unified_rw_rep)
1619 dst->io_u_plat[k][m] += src->io_u_plat[k][m];
1621 dst->io_u_plat[0][m] += src->io_u_plat[k][m];
1625 dst->total_run_time += src->total_run_time;
1626 dst->total_submit += src->total_submit;
1627 dst->total_complete += src->total_complete;
1630 void init_group_run_stat(struct group_run_stats *gs)
1633 memset(gs, 0, sizeof(*gs));
1635 for (i = 0; i < DDIR_RWDIR_CNT; i++)
1636 gs->min_bw[i] = gs->min_run[i] = ~0UL;
1639 void init_thread_stat(struct thread_stat *ts)
1643 memset(ts, 0, sizeof(*ts));
1645 for (j = 0; j < DDIR_RWDIR_CNT; j++) {
1646 ts->lat_stat[j].min_val = -1UL;
1647 ts->clat_stat[j].min_val = -1UL;
1648 ts->slat_stat[j].min_val = -1UL;
1649 ts->bw_stat[j].min_val = -1UL;
1650 ts->iops_stat[j].min_val = -1UL;
1652 ts->sync_stat.min_val = -1UL;
1656 void __show_run_stats(void)
1658 struct group_run_stats *runstats, *rs;
1659 struct thread_data *td;
1660 struct thread_stat *threadstats, *ts;
1661 int i, j, k, nr_ts, last_ts, idx;
1662 bool kb_base_warned = false;
1663 bool unit_base_warned = false;
1664 struct json_object *root = NULL;
1665 struct json_array *array = NULL;
1666 struct buf_output output[FIO_OUTPUT_NR];
1667 struct flist_head **opt_lists;
1669 runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
1671 for (i = 0; i < groupid + 1; i++)
1672 init_group_run_stat(&runstats[i]);
1675 * find out how many threads stats we need. if group reporting isn't
1676 * enabled, it's one-per-td.
1680 for_each_td(td, i) {
1681 if (!td->o.group_reporting) {
1685 if (last_ts == td->groupid)
1690 last_ts = td->groupid;
1694 threadstats = malloc(nr_ts * sizeof(struct thread_stat));
1695 opt_lists = malloc(nr_ts * sizeof(struct flist_head *));
1697 for (i = 0; i < nr_ts; i++) {
1698 init_thread_stat(&threadstats[i]);
1699 opt_lists[i] = NULL;
1705 for_each_td(td, i) {
1708 if (idx && (!td->o.group_reporting ||
1709 (td->o.group_reporting && last_ts != td->groupid))) {
1714 last_ts = td->groupid;
1716 ts = &threadstats[j];
1718 ts->clat_percentiles = td->o.clat_percentiles;
1719 ts->lat_percentiles = td->o.lat_percentiles;
1720 ts->percentile_precision = td->o.percentile_precision;
1721 memcpy(ts->percentile_list, td->o.percentile_list, sizeof(td->o.percentile_list));
1722 opt_lists[j] = &td->opt_list;
1727 if (ts->groupid == -1) {
1729 * These are per-group shared already
1731 strncpy(ts->name, td->o.name, FIO_JOBNAME_SIZE - 1);
1732 if (td->o.description)
1733 strncpy(ts->description, td->o.description,
1734 FIO_JOBDESC_SIZE - 1);
1736 memset(ts->description, 0, FIO_JOBDESC_SIZE);
1739 * If multiple entries in this group, this is
1742 ts->thread_number = td->thread_number;
1743 ts->groupid = td->groupid;
1746 * first pid in group, not very useful...
1750 ts->kb_base = td->o.kb_base;
1751 ts->unit_base = td->o.unit_base;
1752 ts->sig_figs = td->o.sig_figs;
1753 ts->unified_rw_rep = td->o.unified_rw_rep;
1754 } else if (ts->kb_base != td->o.kb_base && !kb_base_warned) {
1755 log_info("fio: kb_base differs for jobs in group, using"
1756 " %u as the base\n", ts->kb_base);
1757 kb_base_warned = true;
1758 } else if (ts->unit_base != td->o.unit_base && !unit_base_warned) {
1759 log_info("fio: unit_base differs for jobs in group, using"
1760 " %u as the base\n", ts->unit_base);
1761 unit_base_warned = true;
1764 ts->continue_on_error = td->o.continue_on_error;
1765 ts->total_err_count += td->total_err_count;
1766 ts->first_error = td->first_error;
1768 if (!td->error && td->o.continue_on_error &&
1770 ts->error = td->first_error;
1771 ts->verror[sizeof(ts->verror) - 1] = '\0';
1772 strncpy(ts->verror, td->verror, sizeof(ts->verror) - 1);
1773 } else if (td->error) {
1774 ts->error = td->error;
1775 ts->verror[sizeof(ts->verror) - 1] = '\0';
1776 strncpy(ts->verror, td->verror, sizeof(ts->verror) - 1);
1780 ts->latency_depth = td->latency_qd;
1781 ts->latency_target = td->o.latency_target;
1782 ts->latency_percentile = td->o.latency_percentile;
1783 ts->latency_window = td->o.latency_window;
1785 ts->nr_block_infos = td->ts.nr_block_infos;
1786 for (k = 0; k < ts->nr_block_infos; k++)
1787 ts->block_infos[k] = td->ts.block_infos[k];
1789 sum_thread_stats(ts, &td->ts, idx == 1);
1792 ts->ss_state = td->ss.state;
1793 ts->ss_dur = td->ss.dur;
1794 ts->ss_head = td->ss.head;
1795 ts->ss_bw_data = td->ss.bw_data;
1796 ts->ss_iops_data = td->ss.iops_data;
1797 ts->ss_limit.u.f = td->ss.limit;
1798 ts->ss_slope.u.f = td->ss.slope;
1799 ts->ss_deviation.u.f = td->ss.deviation;
1800 ts->ss_criterion.u.f = td->ss.criterion;
1803 ts->ss_dur = ts->ss_state = 0;
1806 for (i = 0; i < nr_ts; i++) {
1807 unsigned long long bw;
1809 ts = &threadstats[i];
1810 if (ts->groupid == -1)
1812 rs = &runstats[ts->groupid];
1813 rs->kb_base = ts->kb_base;
1814 rs->unit_base = ts->unit_base;
1815 rs->sig_figs = ts->sig_figs;
1816 rs->unified_rw_rep += ts->unified_rw_rep;
1818 for (j = 0; j < DDIR_RWDIR_CNT; j++) {
1819 if (!ts->runtime[j])
1821 if (ts->runtime[j] < rs->min_run[j] || !rs->min_run[j])
1822 rs->min_run[j] = ts->runtime[j];
1823 if (ts->runtime[j] > rs->max_run[j])
1824 rs->max_run[j] = ts->runtime[j];
1828 bw = ts->io_bytes[j] * 1000 / ts->runtime[j];
1829 if (bw < rs->min_bw[j])
1831 if (bw > rs->max_bw[j])
1834 rs->iobytes[j] += ts->io_bytes[j];
1838 for (i = 0; i < groupid + 1; i++) {
1843 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
1844 if (rs->max_run[ddir])
1845 rs->agg[ddir] = (rs->iobytes[ddir] * 1000) /
1850 for (i = 0; i < FIO_OUTPUT_NR; i++)
1851 buf_output_init(&output[i]);
1854 * don't overwrite last signal output
1856 if (output_format & FIO_OUTPUT_NORMAL)
1857 log_buf(&output[__FIO_OUTPUT_NORMAL], "\n");
1858 if (output_format & FIO_OUTPUT_JSON) {
1859 struct thread_data *global;
1862 unsigned long long ms_since_epoch;
1865 gettimeofday(&now, NULL);
1866 ms_since_epoch = (unsigned long long)(now.tv_sec) * 1000 +
1867 (unsigned long long)(now.tv_usec) / 1000;
1869 tv_sec = now.tv_sec;
1870 os_ctime_r(&tv_sec, time_buf, sizeof(time_buf));
1871 if (time_buf[strlen(time_buf) - 1] == '\n')
1872 time_buf[strlen(time_buf) - 1] = '\0';
1874 root = json_create_object();
1875 json_object_add_value_string(root, "fio version", fio_version_string);
1876 json_object_add_value_int(root, "timestamp", now.tv_sec);
1877 json_object_add_value_int(root, "timestamp_ms", ms_since_epoch);
1878 json_object_add_value_string(root, "time", time_buf);
1879 global = get_global_options();
1880 json_add_job_opts(root, "global options", &global->opt_list);
1881 array = json_create_array();
1882 json_object_add_value_array(root, "jobs", array);
1886 fio_server_send_job_options(&get_global_options()->opt_list, -1U);
1888 for (i = 0; i < nr_ts; i++) {
1889 ts = &threadstats[i];
1890 rs = &runstats[ts->groupid];
1893 fio_server_send_job_options(opt_lists[i], i);
1894 fio_server_send_ts(ts, rs);
1896 if (output_format & FIO_OUTPUT_TERSE)
1897 show_thread_status_terse(ts, rs, &output[__FIO_OUTPUT_TERSE]);
1898 if (output_format & FIO_OUTPUT_JSON) {
1899 struct json_object *tmp = show_thread_status_json(ts, rs, opt_lists[i]);
1900 json_array_add_value_object(array, tmp);
1902 if (output_format & FIO_OUTPUT_NORMAL)
1903 show_thread_status_normal(ts, rs, &output[__FIO_OUTPUT_NORMAL]);
1906 if (!is_backend && (output_format & FIO_OUTPUT_JSON)) {
1907 /* disk util stats, if any */
1908 show_disk_util(1, root, &output[__FIO_OUTPUT_JSON]);
1910 show_idle_prof_stats(FIO_OUTPUT_JSON, root, &output[__FIO_OUTPUT_JSON]);
1912 json_print_object(root, &output[__FIO_OUTPUT_JSON]);
1913 log_buf(&output[__FIO_OUTPUT_JSON], "\n");
1914 json_free_object(root);
1917 for (i = 0; i < groupid + 1; i++) {
1922 fio_server_send_gs(rs);
1923 else if (output_format & FIO_OUTPUT_NORMAL)
1924 show_group_stats(rs, &output[__FIO_OUTPUT_NORMAL]);
1928 fio_server_send_du();
1929 else if (output_format & FIO_OUTPUT_NORMAL) {
1930 show_disk_util(0, NULL, &output[__FIO_OUTPUT_NORMAL]);
1931 show_idle_prof_stats(FIO_OUTPUT_NORMAL, NULL, &output[__FIO_OUTPUT_NORMAL]);
1934 for (i = 0; i < FIO_OUTPUT_NR; i++) {
1935 struct buf_output *out = &output[i];
1937 log_info_buf(out->buf, out->buflen);
1938 buf_output_free(out);
1947 void show_run_stats(void)
1949 fio_sem_down(stat_sem);
1951 fio_sem_up(stat_sem);
1954 void __show_running_run_stats(void)
1956 struct thread_data *td;
1957 unsigned long long *rt;
1961 fio_sem_down(stat_sem);
1963 rt = malloc(thread_number * sizeof(unsigned long long));
1964 fio_gettime(&ts, NULL);
1966 for_each_td(td, i) {
1967 td->update_rusage = 1;
1968 td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
1969 td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
1970 td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
1971 td->ts.total_run_time = mtime_since(&td->epoch, &ts);
1973 rt[i] = mtime_since(&td->start, &ts);
1974 if (td_read(td) && td->ts.io_bytes[DDIR_READ])
1975 td->ts.runtime[DDIR_READ] += rt[i];
1976 if (td_write(td) && td->ts.io_bytes[DDIR_WRITE])
1977 td->ts.runtime[DDIR_WRITE] += rt[i];
1978 if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM])
1979 td->ts.runtime[DDIR_TRIM] += rt[i];
1982 for_each_td(td, i) {
1983 if (td->runstate >= TD_EXITED)
1985 if (td->rusage_sem) {
1986 td->update_rusage = 1;
1987 fio_sem_down(td->rusage_sem);
1989 td->update_rusage = 0;
1994 for_each_td(td, i) {
1995 if (td_read(td) && td->ts.io_bytes[DDIR_READ])
1996 td->ts.runtime[DDIR_READ] -= rt[i];
1997 if (td_write(td) && td->ts.io_bytes[DDIR_WRITE])
1998 td->ts.runtime[DDIR_WRITE] -= rt[i];
1999 if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM])
2000 td->ts.runtime[DDIR_TRIM] -= rt[i];
2004 fio_sem_up(stat_sem);
2007 static bool status_interval_init;
2008 static struct timespec status_time;
2009 static bool status_file_disabled;
2011 #define FIO_STATUS_FILE "fio-dump-status"
2013 static int check_status_file(void)
2016 const char *temp_dir;
2017 char fio_status_file_path[PATH_MAX];
2019 if (status_file_disabled)
2022 temp_dir = getenv("TMPDIR");
2023 if (temp_dir == NULL) {
2024 temp_dir = getenv("TEMP");
2025 if (temp_dir && strlen(temp_dir) >= PATH_MAX)
2028 if (temp_dir == NULL)
2031 snprintf(fio_status_file_path, sizeof(fio_status_file_path), "%s/%s", temp_dir, FIO_STATUS_FILE);
2033 if (stat(fio_status_file_path, &sb))
2036 if (unlink(fio_status_file_path) < 0) {
2037 log_err("fio: failed to unlink %s: %s\n", fio_status_file_path,
2039 log_err("fio: disabling status file updates\n");
2040 status_file_disabled = true;
2046 void check_for_running_stats(void)
2048 if (status_interval) {
2049 if (!status_interval_init) {
2050 fio_gettime(&status_time, NULL);
2051 status_interval_init = true;
2052 } else if (mtime_since_now(&status_time) >= status_interval) {
2053 show_running_run_stats();
2054 fio_gettime(&status_time, NULL);
2058 if (check_status_file()) {
2059 show_running_run_stats();
2064 static inline void add_stat_sample(struct io_stat *is, unsigned long long data)
2069 if (data > is->max_val)
2071 if (data < is->min_val)
2074 delta = val - is->mean.u.f;
2076 is->mean.u.f += delta / (is->samples + 1.0);
2077 is->S.u.f += delta * (val - is->mean.u.f);
2084 * Return a struct io_logs, which is added to the tail of the log
2087 static struct io_logs *get_new_log(struct io_log *iolog)
2089 size_t new_size, new_samples;
2090 struct io_logs *cur_log;
2093 * Cap the size at MAX_LOG_ENTRIES, so we don't keep doubling
2096 if (!iolog->cur_log_max)
2097 new_samples = DEF_LOG_ENTRIES;
2099 new_samples = iolog->cur_log_max * 2;
2100 if (new_samples > MAX_LOG_ENTRIES)
2101 new_samples = MAX_LOG_ENTRIES;
2104 new_size = new_samples * log_entry_sz(iolog);
2106 cur_log = smalloc(sizeof(*cur_log));
2108 INIT_FLIST_HEAD(&cur_log->list);
2109 cur_log->log = malloc(new_size);
2111 cur_log->nr_samples = 0;
2112 cur_log->max_samples = new_samples;
2113 flist_add_tail(&cur_log->list, &iolog->io_logs);
2114 iolog->cur_log_max = new_samples;
2124 * Add and return a new log chunk, or return current log if big enough
2126 static struct io_logs *regrow_log(struct io_log *iolog)
2128 struct io_logs *cur_log;
2131 if (!iolog || iolog->disabled)
2134 cur_log = iolog_cur_log(iolog);
2136 cur_log = get_new_log(iolog);
2141 if (cur_log->nr_samples < cur_log->max_samples)
2145 * No room for a new sample. If we're compressing on the fly, flush
2146 * out the current chunk
2148 if (iolog->log_gz) {
2149 if (iolog_cur_flush(iolog, cur_log)) {
2150 log_err("fio: failed flushing iolog! Will stop logging.\n");
2156 * Get a new log array, and add to our list
2158 cur_log = get_new_log(iolog);
2160 log_err("fio: failed extending iolog! Will stop logging.\n");
2164 if (!iolog->pending || !iolog->pending->nr_samples)
2168 * Flush pending items to new log
2170 for (i = 0; i < iolog->pending->nr_samples; i++) {
2171 struct io_sample *src, *dst;
2173 src = get_sample(iolog, iolog->pending, i);
2174 dst = get_sample(iolog, cur_log, i);
2175 memcpy(dst, src, log_entry_sz(iolog));
2177 cur_log->nr_samples = iolog->pending->nr_samples;
2179 iolog->pending->nr_samples = 0;
2183 iolog->disabled = true;
2187 void regrow_logs(struct thread_data *td)
2189 regrow_log(td->slat_log);
2190 regrow_log(td->clat_log);
2191 regrow_log(td->clat_hist_log);
2192 regrow_log(td->lat_log);
2193 regrow_log(td->bw_log);
2194 regrow_log(td->iops_log);
2195 td->flags &= ~TD_F_REGROW_LOGS;
2198 static struct io_logs *get_cur_log(struct io_log *iolog)
2200 struct io_logs *cur_log;
2202 cur_log = iolog_cur_log(iolog);
2204 cur_log = get_new_log(iolog);
2209 if (cur_log->nr_samples < cur_log->max_samples)
2213 * Out of space. If we're in IO offload mode, or we're not doing
2214 * per unit logging (hence logging happens outside of the IO thread
2215 * as well), add a new log chunk inline. If we're doing inline
2216 * submissions, flag 'td' as needing a log regrow and we'll take
2217 * care of it on the submission side.
2219 if (iolog->td->o.io_submit_mode == IO_MODE_OFFLOAD ||
2220 !per_unit_log(iolog))
2221 return regrow_log(iolog);
2223 iolog->td->flags |= TD_F_REGROW_LOGS;
2224 assert(iolog->pending->nr_samples < iolog->pending->max_samples);
2225 return iolog->pending;
2228 static void __add_log_sample(struct io_log *iolog, union io_sample_data data,
2229 enum fio_ddir ddir, unsigned int bs,
2230 unsigned long t, uint64_t offset)
2232 struct io_logs *cur_log;
2234 if (iolog->disabled)
2236 if (flist_empty(&iolog->io_logs))
2237 iolog->avg_last[ddir] = t;
2239 cur_log = get_cur_log(iolog);
2241 struct io_sample *s;
2243 s = get_sample(iolog, cur_log, cur_log->nr_samples);
2246 s->time = t + (iolog->td ? iolog->td->unix_epoch : 0);
2247 io_sample_set_ddir(iolog, s, ddir);
2250 if (iolog->log_offset) {
2251 struct io_sample_offset *so = (void *) s;
2253 so->offset = offset;
2256 cur_log->nr_samples++;
2260 iolog->disabled = true;
2263 static inline void reset_io_stat(struct io_stat *ios)
2265 ios->max_val = ios->min_val = ios->samples = 0;
2266 ios->mean.u.f = ios->S.u.f = 0;
2269 void reset_io_stats(struct thread_data *td)
2271 struct thread_stat *ts = &td->ts;
2274 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
2275 reset_io_stat(&ts->clat_stat[i]);
2276 reset_io_stat(&ts->slat_stat[i]);
2277 reset_io_stat(&ts->lat_stat[i]);
2278 reset_io_stat(&ts->bw_stat[i]);
2279 reset_io_stat(&ts->iops_stat[i]);
2281 ts->io_bytes[i] = 0;
2283 ts->total_io_u[i] = 0;
2284 ts->short_io_u[i] = 0;
2285 ts->drop_io_u[i] = 0;
2287 for (j = 0; j < FIO_IO_U_PLAT_NR; j++) {
2288 ts->io_u_plat[i][j] = 0;
2290 ts->io_u_sync_plat[j] = 0;
2294 ts->total_io_u[DDIR_SYNC] = 0;
2296 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
2297 ts->io_u_map[i] = 0;
2298 ts->io_u_submit[i] = 0;
2299 ts->io_u_complete[i] = 0;
2302 for (i = 0; i < FIO_IO_U_LAT_N_NR; i++)
2303 ts->io_u_lat_n[i] = 0;
2304 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
2305 ts->io_u_lat_u[i] = 0;
2306 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
2307 ts->io_u_lat_m[i] = 0;
2309 ts->total_submit = 0;
2310 ts->total_complete = 0;
2313 static void __add_stat_to_log(struct io_log *iolog, enum fio_ddir ddir,
2314 unsigned long elapsed, bool log_max)
2317 * Note an entry in the log. Use the mean from the logged samples,
2318 * making sure to properly round up. Only write a log entry if we
2319 * had actual samples done.
2321 if (iolog->avg_window[ddir].samples) {
2322 union io_sample_data data;
2325 data.val = iolog->avg_window[ddir].max_val;
2327 data.val = iolog->avg_window[ddir].mean.u.f + 0.50;
2329 __add_log_sample(iolog, data, ddir, 0, elapsed, 0);
2332 reset_io_stat(&iolog->avg_window[ddir]);
2335 static void _add_stat_to_log(struct io_log *iolog, unsigned long elapsed,
2340 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
2341 __add_stat_to_log(iolog, ddir, elapsed, log_max);
2344 static unsigned long add_log_sample(struct thread_data *td,
2345 struct io_log *iolog,
2346 union io_sample_data data,
2347 enum fio_ddir ddir, unsigned int bs,
2350 unsigned long elapsed, this_window;
2355 elapsed = mtime_since_now(&td->epoch);
2358 * If no time averaging, just add the log sample.
2360 if (!iolog->avg_msec) {
2361 __add_log_sample(iolog, data, ddir, bs, elapsed, offset);
2366 * Add the sample. If the time period has passed, then
2367 * add that entry to the log and clear.
2369 add_stat_sample(&iolog->avg_window[ddir], data.val);
2372 * If period hasn't passed, adding the above sample is all we
2375 this_window = elapsed - iolog->avg_last[ddir];
2376 if (elapsed < iolog->avg_last[ddir])
2377 return iolog->avg_last[ddir] - elapsed;
2378 else if (this_window < iolog->avg_msec) {
2379 unsigned long diff = iolog->avg_msec - this_window;
2381 if (inline_log(iolog) || diff > LOG_MSEC_SLACK)
2385 __add_stat_to_log(iolog, ddir, elapsed, td->o.log_max != 0);
2387 iolog->avg_last[ddir] = elapsed - (this_window - iolog->avg_msec);
2388 return iolog->avg_msec;
2391 void finalize_logs(struct thread_data *td, bool unit_logs)
2393 unsigned long elapsed;
2395 elapsed = mtime_since_now(&td->epoch);
2397 if (td->clat_log && unit_logs)
2398 _add_stat_to_log(td->clat_log, elapsed, td->o.log_max != 0);
2399 if (td->slat_log && unit_logs)
2400 _add_stat_to_log(td->slat_log, elapsed, td->o.log_max != 0);
2401 if (td->lat_log && unit_logs)
2402 _add_stat_to_log(td->lat_log, elapsed, td->o.log_max != 0);
2403 if (td->bw_log && (unit_logs == per_unit_log(td->bw_log)))
2404 _add_stat_to_log(td->bw_log, elapsed, td->o.log_max != 0);
2405 if (td->iops_log && (unit_logs == per_unit_log(td->iops_log)))
2406 _add_stat_to_log(td->iops_log, elapsed, td->o.log_max != 0);
2409 void add_agg_sample(union io_sample_data data, enum fio_ddir ddir, unsigned int bs)
2411 struct io_log *iolog;
2416 iolog = agg_io_log[ddir];
2417 __add_log_sample(iolog, data, ddir, bs, mtime_since_genesis(), 0);
2420 void add_sync_clat_sample(struct thread_stat *ts, unsigned long long nsec)
2422 unsigned int idx = plat_val_to_idx(nsec);
2423 assert(idx < FIO_IO_U_PLAT_NR);
2425 ts->io_u_sync_plat[idx]++;
2426 add_stat_sample(&ts->sync_stat, nsec);
2429 static void add_clat_percentile_sample(struct thread_stat *ts,
2430 unsigned long long nsec, enum fio_ddir ddir)
2432 unsigned int idx = plat_val_to_idx(nsec);
2433 assert(idx < FIO_IO_U_PLAT_NR);
2435 ts->io_u_plat[ddir][idx]++;
2438 void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
2439 unsigned long long nsec, unsigned int bs, uint64_t offset)
2441 unsigned long elapsed, this_window;
2442 struct thread_stat *ts = &td->ts;
2443 struct io_log *iolog = td->clat_hist_log;
2447 add_stat_sample(&ts->clat_stat[ddir], nsec);
2450 add_log_sample(td, td->clat_log, sample_val(nsec), ddir, bs,
2453 if (ts->clat_percentiles)
2454 add_clat_percentile_sample(ts, nsec, ddir);
2456 if (iolog && iolog->hist_msec) {
2457 struct io_hist *hw = &iolog->hist_window[ddir];
2460 elapsed = mtime_since_now(&td->epoch);
2462 hw->hist_last = elapsed;
2463 this_window = elapsed - hw->hist_last;
2465 if (this_window >= iolog->hist_msec) {
2466 uint64_t *io_u_plat;
2467 struct io_u_plat_entry *dst;
2470 * Make a byte-for-byte copy of the latency histogram
2471 * stored in td->ts.io_u_plat[ddir], recording it in a
2472 * log sample. Note that the matching call to free() is
2473 * located in iolog.c after printing this sample to the
2476 io_u_plat = (uint64_t *) td->ts.io_u_plat[ddir];
2477 dst = malloc(sizeof(struct io_u_plat_entry));
2478 memcpy(&(dst->io_u_plat), io_u_plat,
2479 FIO_IO_U_PLAT_NR * sizeof(unsigned int));
2480 flist_add(&dst->list, &hw->list);
2481 __add_log_sample(iolog, sample_plat(dst), ddir, bs,
2485 * Update the last time we recorded as being now, minus
2486 * any drift in time we encountered before actually
2487 * making the record.
2489 hw->hist_last = elapsed - (this_window - iolog->hist_msec);
2497 void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
2498 unsigned long usec, unsigned int bs, uint64_t offset)
2500 struct thread_stat *ts = &td->ts;
2507 add_stat_sample(&ts->slat_stat[ddir], usec);
2510 add_log_sample(td, td->slat_log, sample_val(usec), ddir, bs, offset);
2515 void add_lat_sample(struct thread_data *td, enum fio_ddir ddir,
2516 unsigned long long nsec, unsigned int bs, uint64_t offset)
2518 struct thread_stat *ts = &td->ts;
2525 add_stat_sample(&ts->lat_stat[ddir], nsec);
2528 add_log_sample(td, td->lat_log, sample_val(nsec), ddir, bs,
2531 if (ts->lat_percentiles)
2532 add_clat_percentile_sample(ts, nsec, ddir);
2537 void add_bw_sample(struct thread_data *td, struct io_u *io_u,
2538 unsigned int bytes, unsigned long long spent)
2540 struct thread_stat *ts = &td->ts;
2544 rate = (unsigned long) (bytes * 1000000ULL / spent);
2550 add_stat_sample(&ts->bw_stat[io_u->ddir], rate);
2553 add_log_sample(td, td->bw_log, sample_val(rate), io_u->ddir,
2554 bytes, io_u->offset);
2556 td->stat_io_bytes[io_u->ddir] = td->this_io_bytes[io_u->ddir];
2560 static int __add_samples(struct thread_data *td, struct timespec *parent_tv,
2561 struct timespec *t, unsigned int avg_time,
2562 uint64_t *this_io_bytes, uint64_t *stat_io_bytes,
2563 struct io_stat *stat, struct io_log *log,
2566 unsigned long spent, rate;
2568 unsigned long next, next_log;
2570 next_log = avg_time;
2572 spent = mtime_since(parent_tv, t);
2573 if (spent < avg_time && avg_time - spent >= LOG_MSEC_SLACK)
2574 return avg_time - spent;
2579 * Compute both read and write rates for the interval.
2581 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
2584 delta = this_io_bytes[ddir] - stat_io_bytes[ddir];
2586 continue; /* No entries for interval */
2590 rate = delta * 1000 / spent / 1024; /* KiB/s */
2592 rate = (delta * 1000) / spent;
2596 add_stat_sample(&stat[ddir], rate);
2599 unsigned int bs = 0;
2601 if (td->o.min_bs[ddir] == td->o.max_bs[ddir])
2602 bs = td->o.min_bs[ddir];
2604 next = add_log_sample(td, log, sample_val(rate), ddir, bs, 0);
2605 next_log = min(next_log, next);
2608 stat_io_bytes[ddir] = this_io_bytes[ddir];
2611 timespec_add_msec(parent_tv, avg_time);
2615 if (spent <= avg_time)
2618 next = avg_time - (1 + spent - avg_time);
2620 return min(next, next_log);
2623 static int add_bw_samples(struct thread_data *td, struct timespec *t)
2625 return __add_samples(td, &td->bw_sample_time, t, td->o.bw_avg_time,
2626 td->this_io_bytes, td->stat_io_bytes,
2627 td->ts.bw_stat, td->bw_log, true);
2630 void add_iops_sample(struct thread_data *td, struct io_u *io_u,
2633 struct thread_stat *ts = &td->ts;
2637 add_stat_sample(&ts->iops_stat[io_u->ddir], 1);
2640 add_log_sample(td, td->iops_log, sample_val(1), io_u->ddir,
2641 bytes, io_u->offset);
2643 td->stat_io_blocks[io_u->ddir] = td->this_io_blocks[io_u->ddir];
2647 static int add_iops_samples(struct thread_data *td, struct timespec *t)
2649 return __add_samples(td, &td->iops_sample_time, t, td->o.iops_avg_time,
2650 td->this_io_blocks, td->stat_io_blocks,
2651 td->ts.iops_stat, td->iops_log, false);
2655 * Returns msecs to next event
2657 int calc_log_samples(void)
2659 struct thread_data *td;
2660 unsigned int next = ~0U, tmp;
2661 struct timespec now;
2664 fio_gettime(&now, NULL);
2666 for_each_td(td, i) {
2669 if (in_ramp_time(td) ||
2670 !(td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING)) {
2671 next = min(td->o.iops_avg_time, td->o.bw_avg_time);
2675 (td->bw_log && !per_unit_log(td->bw_log))) {
2676 tmp = add_bw_samples(td, &now);
2680 if (!td->iops_log ||
2681 (td->iops_log && !per_unit_log(td->iops_log))) {
2682 tmp = add_iops_samples(td, &now);
2688 return next == ~0U ? 0 : next;
2691 void stat_init(void)
2693 stat_sem = fio_sem_init(FIO_SEM_UNLOCKED);
2696 void stat_exit(void)
2699 * When we have the mutex, we know out-of-band access to it
2702 fio_sem_down(stat_sem);
2703 fio_sem_remove(stat_sem);
2707 * Called from signal handler. Wake up status thread.
2709 void show_running_run_stats(void)
2714 uint32_t *io_u_block_info(struct thread_data *td, struct io_u *io_u)
2716 /* Ignore io_u's which span multiple blocks--they will just get
2717 * inaccurate counts. */
2718 int idx = (io_u->offset - io_u->file->file_offset)
2719 / td->o.bs[DDIR_TRIM];
2720 uint32_t *info = &td->ts.block_infos[idx];
2721 assert(idx < td->ts.nr_block_infos);