12 #include "lib/ieee754.h"
14 #include "lib/getrusage.h"
17 static struct fio_mutex *stat_mutex;
19 void update_rusage_stat(struct thread_data *td)
21 struct thread_stat *ts = &td->ts;
23 fio_getrusage(&td->ru_end);
24 ts->usr_time += mtime_since(&td->ru_start.ru_utime,
25 &td->ru_end.ru_utime);
26 ts->sys_time += mtime_since(&td->ru_start.ru_stime,
27 &td->ru_end.ru_stime);
28 ts->ctx += td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw
29 - (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
30 ts->minf += td->ru_end.ru_minflt - td->ru_start.ru_minflt;
31 ts->majf += td->ru_end.ru_majflt - td->ru_start.ru_majflt;
33 memcpy(&td->ru_start, &td->ru_end, sizeof(td->ru_end));
37 * Given a latency, return the index of the corresponding bucket in
38 * the structure tracking percentiles.
40 * (1) find the group (and error bits) that the value (latency)
41 * belongs to by looking at its MSB. (2) find the bucket number in the
42 * group by looking at the index bits.
45 static unsigned int plat_val_to_idx(unsigned int val)
47 unsigned int msb, error_bits, base, offset, idx;
49 /* Find MSB starting from bit 0 */
53 msb = (sizeof(val)*8) - __builtin_clz(val) - 1;
56 * MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
57 * all bits of the sample as index
59 if (msb <= FIO_IO_U_PLAT_BITS)
62 /* Compute the number of error bits to discard*/
63 error_bits = msb - FIO_IO_U_PLAT_BITS;
65 /* Compute the number of buckets before the group */
66 base = (error_bits + 1) << FIO_IO_U_PLAT_BITS;
69 * Discard the error bits and apply the mask to find the
70 * index for the buckets in the group
72 offset = (FIO_IO_U_PLAT_VAL - 1) & (val >> error_bits);
74 /* Make sure the index does not exceed (array size - 1) */
75 idx = (base + offset) < (FIO_IO_U_PLAT_NR - 1) ?
76 (base + offset) : (FIO_IO_U_PLAT_NR - 1);
82 * Convert the given index of the bucket array to the value
83 * represented by the bucket
85 static unsigned int plat_idx_to_val(unsigned int idx)
87 unsigned int error_bits, k, base;
89 assert(idx < FIO_IO_U_PLAT_NR);
91 /* MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
92 * all bits of the sample as index */
93 if (idx < (FIO_IO_U_PLAT_VAL << 1))
96 /* Find the group and compute the minimum value of that group */
97 error_bits = (idx >> FIO_IO_U_PLAT_BITS) - 1;
98 base = 1 << (error_bits + FIO_IO_U_PLAT_BITS);
100 /* Find its bucket number of the group */
101 k = idx % FIO_IO_U_PLAT_VAL;
103 /* Return the mean of the range of the bucket */
104 return base + ((k + 0.5) * (1 << error_bits));
107 static int double_cmp(const void *a, const void *b)
109 const fio_fp64_t fa = *(const fio_fp64_t *) a;
110 const fio_fp64_t fb = *(const fio_fp64_t *) b;
115 else if (fa.u.f < fb.u.f)
121 unsigned int calc_clat_percentiles(unsigned int *io_u_plat, unsigned long nr,
122 fio_fp64_t *plist, unsigned int **output,
123 unsigned int *maxv, unsigned int *minv)
125 unsigned long sum = 0;
126 unsigned int len, i, j = 0;
127 unsigned int oval_len = 0;
128 unsigned int *ovals = NULL;
135 while (len < FIO_IO_U_LIST_MAX_LEN && plist[len].u.f != 0.0)
142 * Sort the percentile list. Note that it may already be sorted if
143 * we are using the default values, but since it's a short list this
144 * isn't a worry. Also note that this does not work for NaN values.
147 qsort((void *)plist, len, sizeof(plist[0]), double_cmp);
150 * Calculate bucket values, note down max and min values
153 for (i = 0; i < FIO_IO_U_PLAT_NR && !is_last; i++) {
155 while (sum >= (plist[j].u.f / 100.0 * nr)) {
156 assert(plist[j].u.f <= 100.0);
160 ovals = realloc(ovals, oval_len * sizeof(unsigned int));
163 ovals[j] = plat_idx_to_val(i);
164 if (ovals[j] < *minv)
166 if (ovals[j] > *maxv)
169 is_last = (j == len - 1);
182 * Find and display the p-th percentile of clat
184 static void show_clat_percentiles(unsigned int *io_u_plat, unsigned long nr,
185 fio_fp64_t *plist, unsigned int precision)
187 unsigned int len, j = 0, minv, maxv;
189 int is_last, per_line, scale_down;
192 len = calc_clat_percentiles(io_u_plat, nr, plist, &ovals, &maxv, &minv);
197 * We default to usecs, but if the value range is such that we
198 * should scale down to msecs, do that.
200 if (minv > 2000 && maxv > 99999) {
202 log_info(" clat percentiles (msec):\n |");
205 log_info(" clat percentiles (usec):\n |");
208 snprintf(fmt, sizeof(fmt), "%%1.%uf", precision);
209 per_line = (80 - 7) / (precision + 14);
211 for (j = 0; j < len; j++) {
212 char fbuf[16], *ptr = fbuf;
215 if (j != 0 && (j % per_line) == 0)
218 /* end of the list */
219 is_last = (j == len - 1);
221 if (plist[j].u.f < 10.0)
222 ptr += sprintf(fbuf, " ");
224 snprintf(ptr, sizeof(fbuf), fmt, plist[j].u.f);
227 ovals[j] = (ovals[j] + 999) / 1000;
229 log_info(" %sth=[%5u]%c", fbuf, ovals[j], is_last ? '\n' : ',');
234 if ((j % per_line) == per_line - 1) /* for formatting */
243 int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
244 double *mean, double *dev)
246 double n = (double) is->samples;
253 *mean = is->mean.u.f;
256 *dev = sqrt(is->S.u.f / (n - 1.0));
263 void show_group_stats(struct group_run_stats *rs)
265 char *p1, *p2, *p3, *p4;
266 const char *ddir_str[] = { " READ", " WRITE" , " TRIM"};
269 log_info("\nRun status group %d (all jobs):\n", rs->groupid);
271 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
272 const int i2p = is_power_of_2(rs->kb_base);
277 p1 = num2str(rs->io_kb[i], 6, rs->kb_base, i2p, 8);
278 p2 = num2str(rs->agg[i], 6, rs->kb_base, i2p, rs->unit_base);
279 p3 = num2str(rs->min_bw[i], 6, rs->kb_base, i2p, rs->unit_base);
280 p4 = num2str(rs->max_bw[i], 6, rs->kb_base, i2p, rs->unit_base);
282 log_info("%s: io=%s, aggrb=%s/s, minb=%s/s, maxb=%s/s,"
283 " mint=%llumsec, maxt=%llumsec\n",
284 rs->unified_rw_rep ? " MIXED" : ddir_str[i],
286 (unsigned long long) rs->min_run[i],
287 (unsigned long long) rs->max_run[i]);
296 void stat_calc_dist(unsigned int *map, unsigned long total, double *io_u_dist)
301 * Do depth distribution calculations
303 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
305 io_u_dist[i] = (double) map[i] / (double) total;
306 io_u_dist[i] *= 100.0;
307 if (io_u_dist[i] < 0.1 && map[i])
314 static void stat_calc_lat(struct thread_stat *ts, double *dst,
315 unsigned int *src, int nr)
317 unsigned long total = ddir_rw_sum(ts->total_io_u);
321 * Do latency distribution calculations
323 for (i = 0; i < nr; i++) {
325 dst[i] = (double) src[i] / (double) total;
327 if (dst[i] < 0.01 && src[i])
334 void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat)
336 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_u, FIO_IO_U_LAT_U_NR);
339 void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat)
341 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_m, FIO_IO_U_LAT_M_NR);
344 static void display_lat(const char *name, unsigned long min, unsigned long max,
345 double mean, double dev)
347 const char *base = "(usec)";
350 if (!usec_to_msec(&min, &max, &mean, &dev))
353 minp = num2str(min, 6, 1, 0, 0);
354 maxp = num2str(max, 6, 1, 0, 0);
356 log_info(" %s %s: min=%s, max=%s, avg=%5.02f,"
357 " stdev=%5.02f\n", name, base, minp, maxp, mean, dev);
363 static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
366 const char *ddir_str[] = { "read ", "write", "trim" };
367 unsigned long min, max, runt;
368 unsigned long long bw, iops;
370 char *io_p, *bw_p, *iops_p;
373 assert(ddir_rw(ddir));
375 if (!ts->runtime[ddir])
378 i2p = is_power_of_2(rs->kb_base);
379 runt = ts->runtime[ddir];
381 bw = (1000 * ts->io_bytes[ddir]) / runt;
382 io_p = num2str(ts->io_bytes[ddir], 6, 1, i2p, 8);
383 bw_p = num2str(bw, 6, 1, i2p, ts->unit_base);
385 iops = (1000 * (uint64_t)ts->total_io_u[ddir]) / runt;
386 iops_p = num2str(iops, 6, 1, 0, 0);
388 log_info(" %s: io=%s, bw=%s/s, iops=%s, runt=%6llumsec\n",
389 rs->unified_rw_rep ? "mixed" : ddir_str[ddir],
391 (unsigned long long) ts->runtime[ddir]);
397 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
398 display_lat("slat", min, max, mean, dev);
399 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
400 display_lat("clat", min, max, mean, dev);
401 if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev))
402 display_lat(" lat", min, max, mean, dev);
404 if (ts->clat_percentiles) {
405 show_clat_percentiles(ts->io_u_plat[ddir],
406 ts->clat_stat[ddir].samples,
408 ts->percentile_precision);
410 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
411 double p_of_agg = 100.0, fkb_base = (double)rs->kb_base;
412 const char *bw_str = (rs->unit_base == 1 ? "Kbit" : "KB");
414 if (rs->unit_base == 1) {
422 p_of_agg = mean * 100 / (double) rs->agg[ddir];
423 if (p_of_agg > 100.0)
427 if (mean > fkb_base * fkb_base) {
432 bw_str = (rs->unit_base == 1 ? "Mbit" : "MB");
435 log_info(" bw (%-4s/s): min=%5lu, max=%5lu, per=%3.2f%%,"
436 " avg=%5.02f, stdev=%5.02f\n", bw_str, min, max,
437 p_of_agg, mean, dev);
441 static int show_lat(double *io_u_lat, int nr, const char **ranges,
444 int new_line = 1, i, line = 0, shown = 0;
446 for (i = 0; i < nr; i++) {
447 if (io_u_lat[i] <= 0.0)
453 log_info(" lat (%s) : ", msg);
459 log_info("%s%3.2f%%", ranges[i], io_u_lat[i]);
471 static void show_lat_u(double *io_u_lat_u)
473 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
474 "250=", "500=", "750=", "1000=", };
476 show_lat(io_u_lat_u, FIO_IO_U_LAT_U_NR, ranges, "usec");
479 static void show_lat_m(double *io_u_lat_m)
481 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
482 "250=", "500=", "750=", "1000=", "2000=",
485 show_lat(io_u_lat_m, FIO_IO_U_LAT_M_NR, ranges, "msec");
488 static void show_latencies(struct thread_stat *ts)
490 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
491 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
493 stat_calc_lat_u(ts, io_u_lat_u);
494 stat_calc_lat_m(ts, io_u_lat_m);
496 show_lat_u(io_u_lat_u);
497 show_lat_m(io_u_lat_m);
500 static void show_thread_status_normal(struct thread_stat *ts,
501 struct group_run_stats *rs)
503 double usr_cpu, sys_cpu;
504 unsigned long runtime;
505 double io_u_dist[FIO_IO_U_MAP_NR];
509 if (!(ts->io_bytes[DDIR_READ] + ts->io_bytes[DDIR_WRITE] +
510 ts->io_bytes[DDIR_TRIM]) && !(ts->total_io_u[DDIR_READ] +
511 ts->total_io_u[DDIR_WRITE] + ts->total_io_u[DDIR_TRIM]))
515 os_ctime_r((const time_t *) &time_p, time_buf, sizeof(time_buf));
518 log_info("%s: (groupid=%d, jobs=%d): err=%2d: pid=%d: %s",
519 ts->name, ts->groupid, ts->members,
520 ts->error, (int) ts->pid, time_buf);
522 log_info("%s: (groupid=%d, jobs=%d): err=%2d (%s): pid=%d: %s",
523 ts->name, ts->groupid, ts->members,
524 ts->error, ts->verror, (int) ts->pid,
528 if (strlen(ts->description))
529 log_info(" Description : [%s]\n", ts->description);
531 if (ts->io_bytes[DDIR_READ])
532 show_ddir_status(rs, ts, DDIR_READ);
533 if (ts->io_bytes[DDIR_WRITE])
534 show_ddir_status(rs, ts, DDIR_WRITE);
535 if (ts->io_bytes[DDIR_TRIM])
536 show_ddir_status(rs, ts, DDIR_TRIM);
540 runtime = ts->total_run_time;
542 double runt = (double) runtime;
544 usr_cpu = (double) ts->usr_time * 100 / runt;
545 sys_cpu = (double) ts->sys_time * 100 / runt;
551 log_info(" cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%llu,"
552 " majf=%llu, minf=%llu\n", usr_cpu, sys_cpu,
553 (unsigned long long) ts->ctx,
554 (unsigned long long) ts->majf,
555 (unsigned long long) ts->minf);
557 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
558 log_info(" IO depths : 1=%3.1f%%, 2=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%,"
559 " 16=%3.1f%%, 32=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
560 io_u_dist[1], io_u_dist[2],
561 io_u_dist[3], io_u_dist[4],
562 io_u_dist[5], io_u_dist[6]);
564 stat_calc_dist(ts->io_u_submit, ts->total_submit, io_u_dist);
565 log_info(" submit : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
566 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
567 io_u_dist[1], io_u_dist[2],
568 io_u_dist[3], io_u_dist[4],
569 io_u_dist[5], io_u_dist[6]);
570 stat_calc_dist(ts->io_u_complete, ts->total_complete, io_u_dist);
571 log_info(" complete : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
572 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
573 io_u_dist[1], io_u_dist[2],
574 io_u_dist[3], io_u_dist[4],
575 io_u_dist[5], io_u_dist[6]);
576 log_info(" issued : total=r=%llu/w=%llu/d=%llu,"
577 " short=r=%llu/w=%llu/d=%llu\n",
578 (unsigned long long) ts->total_io_u[0],
579 (unsigned long long) ts->total_io_u[1],
580 (unsigned long long) ts->total_io_u[2],
581 (unsigned long long) ts->short_io_u[0],
582 (unsigned long long) ts->short_io_u[1],
583 (unsigned long long) ts->short_io_u[2]);
584 if (ts->continue_on_error) {
585 log_info(" errors : total=%llu, first_error=%d/<%s>\n",
586 (unsigned long long)ts->total_err_count,
588 strerror(ts->first_error));
590 if (ts->latency_depth) {
591 log_info(" latency : target=%llu, window=%llu, percentile=%.2f%%, depth=%u\n",
592 (unsigned long long)ts->latency_target,
593 (unsigned long long)ts->latency_window,
594 ts->latency_percentile.u.f,
599 static void show_ddir_status_terse(struct thread_stat *ts,
600 struct group_run_stats *rs, int ddir)
602 unsigned long min, max;
603 unsigned long long bw, iops;
604 unsigned int *ovals = NULL;
606 unsigned int len, minv, maxv;
609 assert(ddir_rw(ddir));
612 if (ts->runtime[ddir]) {
613 uint64_t runt = ts->runtime[ddir];
615 bw = ((1000 * ts->io_bytes[ddir]) / runt) / 1024;
616 iops = (1000 * (uint64_t) ts->total_io_u[ddir]) / runt;
619 log_info(";%llu;%llu;%llu;%llu",
620 (unsigned long long) ts->io_bytes[ddir] >> 10, bw, iops,
621 (unsigned long long) ts->runtime[ddir]);
623 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
624 log_info(";%lu;%lu;%f;%f", min, max, mean, dev);
626 log_info(";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
628 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
629 log_info(";%lu;%lu;%f;%f", min, max, mean, dev);
631 log_info(";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
633 if (ts->clat_percentiles) {
634 len = calc_clat_percentiles(ts->io_u_plat[ddir],
635 ts->clat_stat[ddir].samples,
636 ts->percentile_list, &ovals, &maxv,
641 for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
646 log_info(";%f%%=%u", ts->percentile_list[i].u.f, ovals[i]);
649 if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev))
650 log_info(";%lu;%lu;%f;%f", min, max, mean, dev);
652 log_info(";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
657 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
658 double p_of_agg = 100.0;
661 p_of_agg = mean * 100 / (double) rs->agg[ddir];
662 if (p_of_agg > 100.0)
666 log_info(";%lu;%lu;%f%%;%f;%f", min, max, p_of_agg, mean, dev);
668 log_info(";%lu;%lu;%f%%;%f;%f", 0UL, 0UL, 0.0, 0.0, 0.0);
671 static void add_ddir_status_json(struct thread_stat *ts,
672 struct group_run_stats *rs, int ddir, struct json_object *parent)
674 unsigned long min, max;
675 unsigned long long bw, iops;
676 unsigned int *ovals = NULL;
678 unsigned int len, minv, maxv;
680 const char *ddirname[] = {"read", "write", "trim"};
681 struct json_object *dir_object, *tmp_object, *percentile_object;
683 double p_of_agg = 100.0;
685 assert(ddir_rw(ddir));
687 if (ts->unified_rw_rep && ddir != DDIR_READ)
690 dir_object = json_create_object();
691 json_object_add_value_object(parent,
692 ts->unified_rw_rep ? "mixed" : ddirname[ddir], dir_object);
695 if (ts->runtime[ddir]) {
696 uint64_t runt = ts->runtime[ddir];
698 bw = ((1000 * ts->io_bytes[ddir]) / runt) / 1024;
699 iops = (1000 * (uint64_t) ts->total_io_u[ddir]) / runt;
702 json_object_add_value_int(dir_object, "io_bytes", ts->io_bytes[ddir] >> 10);
703 json_object_add_value_int(dir_object, "bw", bw);
704 json_object_add_value_int(dir_object, "iops", iops);
705 json_object_add_value_int(dir_object, "runtime", ts->runtime[ddir]);
707 if (!calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev)) {
711 tmp_object = json_create_object();
712 json_object_add_value_object(dir_object, "slat", tmp_object);
713 json_object_add_value_int(tmp_object, "min", min);
714 json_object_add_value_int(tmp_object, "max", max);
715 json_object_add_value_float(tmp_object, "mean", mean);
716 json_object_add_value_float(tmp_object, "stddev", dev);
718 if (!calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev)) {
722 tmp_object = json_create_object();
723 json_object_add_value_object(dir_object, "clat", tmp_object);
724 json_object_add_value_int(tmp_object, "min", min);
725 json_object_add_value_int(tmp_object, "max", max);
726 json_object_add_value_float(tmp_object, "mean", mean);
727 json_object_add_value_float(tmp_object, "stddev", dev);
729 if (ts->clat_percentiles) {
730 len = calc_clat_percentiles(ts->io_u_plat[ddir],
731 ts->clat_stat[ddir].samples,
732 ts->percentile_list, &ovals, &maxv,
737 percentile_object = json_create_object();
738 json_object_add_value_object(tmp_object, "percentile", percentile_object);
739 for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
741 json_object_add_value_int(percentile_object, "0.00", 0);
744 snprintf(buf, sizeof(buf), "%f", ts->percentile_list[i].u.f);
745 json_object_add_value_int(percentile_object, (const char *)buf, ovals[i]);
748 if (!calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev)) {
752 tmp_object = json_create_object();
753 json_object_add_value_object(dir_object, "lat", tmp_object);
754 json_object_add_value_int(tmp_object, "min", min);
755 json_object_add_value_int(tmp_object, "max", max);
756 json_object_add_value_float(tmp_object, "mean", mean);
757 json_object_add_value_float(tmp_object, "stddev", dev);
761 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
763 p_of_agg = mean * 100 / (double) rs->agg[ddir];
764 if (p_of_agg > 100.0)
769 p_of_agg = mean = dev = 0.0;
771 json_object_add_value_int(dir_object, "bw_min", min);
772 json_object_add_value_int(dir_object, "bw_max", max);
773 json_object_add_value_float(dir_object, "bw_agg", p_of_agg);
774 json_object_add_value_float(dir_object, "bw_mean", mean);
775 json_object_add_value_float(dir_object, "bw_dev", dev);
778 static void show_thread_status_terse_v2(struct thread_stat *ts,
779 struct group_run_stats *rs)
781 double io_u_dist[FIO_IO_U_MAP_NR];
782 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
783 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
784 double usr_cpu, sys_cpu;
788 log_info("2;%s;%d;%d", ts->name, ts->groupid, ts->error);
789 /* Log Read Status */
790 show_ddir_status_terse(ts, rs, DDIR_READ);
791 /* Log Write Status */
792 show_ddir_status_terse(ts, rs, DDIR_WRITE);
793 /* Log Trim Status */
794 show_ddir_status_terse(ts, rs, DDIR_TRIM);
797 if (ts->total_run_time) {
798 double runt = (double) ts->total_run_time;
800 usr_cpu = (double) ts->usr_time * 100 / runt;
801 sys_cpu = (double) ts->sys_time * 100 / runt;
807 log_info(";%f%%;%f%%;%llu;%llu;%llu", usr_cpu, sys_cpu,
808 (unsigned long long) ts->ctx,
809 (unsigned long long) ts->majf,
810 (unsigned long long) ts->minf);
812 /* Calc % distribution of IO depths, usecond, msecond latency */
813 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
814 stat_calc_lat_u(ts, io_u_lat_u);
815 stat_calc_lat_m(ts, io_u_lat_m);
817 /* Only show fixed 7 I/O depth levels*/
818 log_info(";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%",
819 io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3],
820 io_u_dist[4], io_u_dist[5], io_u_dist[6]);
822 /* Microsecond latency */
823 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
824 log_info(";%3.2f%%", io_u_lat_u[i]);
825 /* Millisecond latency */
826 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
827 log_info(";%3.2f%%", io_u_lat_m[i]);
828 /* Additional output if continue_on_error set - default off*/
829 if (ts->continue_on_error)
830 log_info(";%llu;%d", (unsigned long long) ts->total_err_count, ts->first_error);
833 /* Additional output if description is set */
834 if (strlen(ts->description))
835 log_info(";%s", ts->description);
840 static void show_thread_status_terse_v3_v4(struct thread_stat *ts,
841 struct group_run_stats *rs, int ver)
843 double io_u_dist[FIO_IO_U_MAP_NR];
844 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
845 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
846 double usr_cpu, sys_cpu;
850 log_info("%d;%s;%s;%d;%d", ver, fio_version_string,
851 ts->name, ts->groupid, ts->error);
852 /* Log Read Status */
853 show_ddir_status_terse(ts, rs, DDIR_READ);
854 /* Log Write Status */
855 show_ddir_status_terse(ts, rs, DDIR_WRITE);
856 /* Log Trim Status */
858 show_ddir_status_terse(ts, rs, DDIR_TRIM);
861 if (ts->total_run_time) {
862 double runt = (double) ts->total_run_time;
864 usr_cpu = (double) ts->usr_time * 100 / runt;
865 sys_cpu = (double) ts->sys_time * 100 / runt;
871 log_info(";%f%%;%f%%;%llu;%llu;%llu", usr_cpu, sys_cpu,
872 (unsigned long long) ts->ctx,
873 (unsigned long long) ts->majf,
874 (unsigned long long) ts->minf);
876 /* Calc % distribution of IO depths, usecond, msecond latency */
877 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
878 stat_calc_lat_u(ts, io_u_lat_u);
879 stat_calc_lat_m(ts, io_u_lat_m);
881 /* Only show fixed 7 I/O depth levels*/
882 log_info(";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%",
883 io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3],
884 io_u_dist[4], io_u_dist[5], io_u_dist[6]);
886 /* Microsecond latency */
887 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
888 log_info(";%3.2f%%", io_u_lat_u[i]);
889 /* Millisecond latency */
890 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
891 log_info(";%3.2f%%", io_u_lat_m[i]);
893 /* disk util stats, if any */
895 show_disk_util(1, NULL);
897 /* Additional output if continue_on_error set - default off*/
898 if (ts->continue_on_error)
899 log_info(";%llu;%d", (unsigned long long) ts->total_err_count, ts->first_error);
901 /* Additional output if description is set */
902 if (strlen(ts->description))
903 log_info(";%s", ts->description);
908 static struct json_object *show_thread_status_json(struct thread_stat *ts,
909 struct group_run_stats *rs)
911 struct json_object *root, *tmp;
912 double io_u_dist[FIO_IO_U_MAP_NR];
913 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
914 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
915 double usr_cpu, sys_cpu;
918 root = json_create_object();
919 json_object_add_value_string(root, "jobname", ts->name);
920 json_object_add_value_int(root, "groupid", ts->groupid);
921 json_object_add_value_int(root, "error", ts->error);
923 add_ddir_status_json(ts, rs, DDIR_READ, root);
924 add_ddir_status_json(ts, rs, DDIR_WRITE, root);
925 add_ddir_status_json(ts, rs, DDIR_TRIM, root);
928 if (ts->total_run_time) {
929 double runt = (double) ts->total_run_time;
931 usr_cpu = (double) ts->usr_time * 100 / runt;
932 sys_cpu = (double) ts->sys_time * 100 / runt;
937 json_object_add_value_float(root, "usr_cpu", usr_cpu);
938 json_object_add_value_float(root, "sys_cpu", sys_cpu);
939 json_object_add_value_int(root, "ctx", ts->ctx);
940 json_object_add_value_int(root, "majf", ts->majf);
941 json_object_add_value_int(root, "minf", ts->minf);
944 /* Calc % distribution of IO depths, usecond, msecond latency */
945 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
946 stat_calc_lat_u(ts, io_u_lat_u);
947 stat_calc_lat_m(ts, io_u_lat_m);
949 tmp = json_create_object();
950 json_object_add_value_object(root, "iodepth_level", tmp);
951 /* Only show fixed 7 I/O depth levels*/
952 for (i = 0; i < 7; i++) {
955 snprintf(name, 20, "%d", 1 << i);
957 snprintf(name, 20, ">=%d", 1 << i);
958 json_object_add_value_float(tmp, (const char *)name, io_u_dist[i]);
961 tmp = json_create_object();
962 json_object_add_value_object(root, "latency_us", tmp);
963 /* Microsecond latency */
964 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++) {
965 const char *ranges[] = { "2", "4", "10", "20", "50", "100",
966 "250", "500", "750", "1000", };
967 json_object_add_value_float(tmp, ranges[i], io_u_lat_u[i]);
969 /* Millisecond latency */
970 tmp = json_create_object();
971 json_object_add_value_object(root, "latency_ms", tmp);
972 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++) {
973 const char *ranges[] = { "2", "4", "10", "20", "50", "100",
974 "250", "500", "750", "1000", "2000",
976 json_object_add_value_float(tmp, ranges[i], io_u_lat_m[i]);
979 /* Additional output if continue_on_error set - default off*/
980 if (ts->continue_on_error) {
981 json_object_add_value_int(root, "total_err", ts->total_err_count);
982 json_object_add_value_int(root, "first_error", ts->first_error);
985 if (ts->latency_depth) {
986 json_object_add_value_int(root, "latency_depth", ts->latency_depth);
987 json_object_add_value_int(root, "latency_target", ts->latency_target);
988 json_object_add_value_float(root, "latency_percentile", ts->latency_percentile.u.f);
989 json_object_add_value_int(root, "latency_window", ts->latency_window);
992 /* Additional output if description is set */
993 if (strlen(ts->description))
994 json_object_add_value_string(root, "desc", ts->description);
999 static void show_thread_status_terse(struct thread_stat *ts,
1000 struct group_run_stats *rs)
1002 if (terse_version == 2)
1003 show_thread_status_terse_v2(ts, rs);
1004 else if (terse_version == 3 || terse_version == 4)
1005 show_thread_status_terse_v3_v4(ts, rs, terse_version);
1007 log_err("fio: bad terse version!? %d\n", terse_version);
1010 struct json_object *show_thread_status(struct thread_stat *ts,
1011 struct group_run_stats *rs)
1013 if (output_format == FIO_OUTPUT_TERSE)
1014 show_thread_status_terse(ts, rs);
1015 else if (output_format == FIO_OUTPUT_JSON)
1016 return show_thread_status_json(ts, rs);
1018 show_thread_status_normal(ts, rs);
1022 static void sum_stat(struct io_stat *dst, struct io_stat *src, int nr)
1026 if (src->samples == 0)
1029 dst->min_val = min(dst->min_val, src->min_val);
1030 dst->max_val = max(dst->max_val, src->max_val);
1033 * Compute new mean and S after the merge
1034 * <http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
1035 * #Parallel_algorithm>
1038 mean = src->mean.u.f;
1041 double delta = src->mean.u.f - dst->mean.u.f;
1043 mean = ((src->mean.u.f * src->samples) +
1044 (dst->mean.u.f * dst->samples)) /
1045 (dst->samples + src->samples);
1047 S = src->S.u.f + dst->S.u.f + pow(delta, 2.0) *
1048 (dst->samples * src->samples) /
1049 (dst->samples + src->samples);
1052 dst->samples += src->samples;
1053 dst->mean.u.f = mean;
1057 void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src)
1061 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1062 if (dst->max_run[i] < src->max_run[i])
1063 dst->max_run[i] = src->max_run[i];
1064 if (dst->min_run[i] && dst->min_run[i] > src->min_run[i])
1065 dst->min_run[i] = src->min_run[i];
1066 if (dst->max_bw[i] < src->max_bw[i])
1067 dst->max_bw[i] = src->max_bw[i];
1068 if (dst->min_bw[i] && dst->min_bw[i] > src->min_bw[i])
1069 dst->min_bw[i] = src->min_bw[i];
1071 dst->io_kb[i] += src->io_kb[i];
1072 dst->agg[i] += src->agg[i];
1077 void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src, int nr)
1081 for (l = 0; l < DDIR_RWDIR_CNT; l++) {
1082 if (!dst->unified_rw_rep) {
1083 sum_stat(&dst->clat_stat[l], &src->clat_stat[l], nr);
1084 sum_stat(&dst->slat_stat[l], &src->slat_stat[l], nr);
1085 sum_stat(&dst->lat_stat[l], &src->lat_stat[l], nr);
1086 sum_stat(&dst->bw_stat[l], &src->bw_stat[l], nr);
1088 dst->io_bytes[l] += src->io_bytes[l];
1090 if (dst->runtime[l] < src->runtime[l])
1091 dst->runtime[l] = src->runtime[l];
1093 sum_stat(&dst->clat_stat[0], &src->clat_stat[l], nr);
1094 sum_stat(&dst->slat_stat[0], &src->slat_stat[l], nr);
1095 sum_stat(&dst->lat_stat[0], &src->lat_stat[l], nr);
1096 sum_stat(&dst->bw_stat[0], &src->bw_stat[l], nr);
1098 dst->io_bytes[0] += src->io_bytes[l];
1100 if (dst->runtime[0] < src->runtime[l])
1101 dst->runtime[0] = src->runtime[l];
1105 dst->usr_time += src->usr_time;
1106 dst->sys_time += src->sys_time;
1107 dst->ctx += src->ctx;
1108 dst->majf += src->majf;
1109 dst->minf += src->minf;
1111 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
1112 dst->io_u_map[k] += src->io_u_map[k];
1113 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
1114 dst->io_u_submit[k] += src->io_u_submit[k];
1115 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
1116 dst->io_u_complete[k] += src->io_u_complete[k];
1117 for (k = 0; k < FIO_IO_U_LAT_U_NR; k++)
1118 dst->io_u_lat_u[k] += src->io_u_lat_u[k];
1119 for (k = 0; k < FIO_IO_U_LAT_M_NR; k++)
1120 dst->io_u_lat_m[k] += src->io_u_lat_m[k];
1122 for (k = 0; k < DDIR_RWDIR_CNT; k++) {
1123 if (!dst->unified_rw_rep) {
1124 dst->total_io_u[k] += src->total_io_u[k];
1125 dst->short_io_u[k] += src->short_io_u[k];
1127 dst->total_io_u[0] += src->total_io_u[k];
1128 dst->short_io_u[0] += src->short_io_u[k];
1132 for (k = 0; k < DDIR_RWDIR_CNT; k++) {
1135 for (m = 0; m < FIO_IO_U_PLAT_NR; m++) {
1136 if (!dst->unified_rw_rep)
1137 dst->io_u_plat[k][m] += src->io_u_plat[k][m];
1139 dst->io_u_plat[0][m] += src->io_u_plat[k][m];
1143 dst->total_run_time += src->total_run_time;
1144 dst->total_submit += src->total_submit;
1145 dst->total_complete += src->total_complete;
1148 void init_group_run_stat(struct group_run_stats *gs)
1151 memset(gs, 0, sizeof(*gs));
1153 for (i = 0; i < DDIR_RWDIR_CNT; i++)
1154 gs->min_bw[i] = gs->min_run[i] = ~0UL;
1157 void init_thread_stat(struct thread_stat *ts)
1161 memset(ts, 0, sizeof(*ts));
1163 for (j = 0; j < DDIR_RWDIR_CNT; j++) {
1164 ts->lat_stat[j].min_val = -1UL;
1165 ts->clat_stat[j].min_val = -1UL;
1166 ts->slat_stat[j].min_val = -1UL;
1167 ts->bw_stat[j].min_val = -1UL;
1172 void __show_run_stats(void)
1174 struct group_run_stats *runstats, *rs;
1175 struct thread_data *td;
1176 struct thread_stat *threadstats, *ts;
1177 int i, j, nr_ts, last_ts, idx;
1178 int kb_base_warned = 0;
1179 int unit_base_warned = 0;
1180 struct json_object *root = NULL;
1181 struct json_array *array = NULL;
1183 runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
1185 for (i = 0; i < groupid + 1; i++)
1186 init_group_run_stat(&runstats[i]);
1189 * find out how many threads stats we need. if group reporting isn't
1190 * enabled, it's one-per-td.
1194 for_each_td(td, i) {
1195 if (!td->o.group_reporting) {
1199 if (last_ts == td->groupid)
1202 last_ts = td->groupid;
1206 threadstats = malloc(nr_ts * sizeof(struct thread_stat));
1208 for (i = 0; i < nr_ts; i++)
1209 init_thread_stat(&threadstats[i]);
1214 for_each_td(td, i) {
1215 if (idx && (!td->o.group_reporting ||
1216 (td->o.group_reporting && last_ts != td->groupid))) {
1221 last_ts = td->groupid;
1223 ts = &threadstats[j];
1225 ts->clat_percentiles = td->o.clat_percentiles;
1226 ts->percentile_precision = td->o.percentile_precision;
1227 memcpy(ts->percentile_list, td->o.percentile_list, sizeof(td->o.percentile_list));
1232 if (ts->groupid == -1) {
1234 * These are per-group shared already
1236 strncpy(ts->name, td->o.name, FIO_JOBNAME_SIZE - 1);
1237 if (td->o.description)
1238 strncpy(ts->description, td->o.description,
1239 FIO_JOBDESC_SIZE - 1);
1241 memset(ts->description, 0, FIO_JOBDESC_SIZE);
1244 * If multiple entries in this group, this is
1247 ts->thread_number = td->thread_number;
1248 ts->groupid = td->groupid;
1251 * first pid in group, not very useful...
1255 ts->kb_base = td->o.kb_base;
1256 ts->unit_base = td->o.unit_base;
1257 ts->unified_rw_rep = td->o.unified_rw_rep;
1258 } else if (ts->kb_base != td->o.kb_base && !kb_base_warned) {
1259 log_info("fio: kb_base differs for jobs in group, using"
1260 " %u as the base\n", ts->kb_base);
1262 } else if (ts->unit_base != td->o.unit_base && !unit_base_warned) {
1263 log_info("fio: unit_base differs for jobs in group, using"
1264 " %u as the base\n", ts->unit_base);
1265 unit_base_warned = 1;
1268 ts->continue_on_error = td->o.continue_on_error;
1269 ts->total_err_count += td->total_err_count;
1270 ts->first_error = td->first_error;
1272 if (!td->error && td->o.continue_on_error &&
1274 ts->error = td->first_error;
1275 ts->verror[sizeof(ts->verror) - 1] = '\0';
1276 strncpy(ts->verror, td->verror, sizeof(ts->verror) - 1);
1277 } else if (td->error) {
1278 ts->error = td->error;
1279 ts->verror[sizeof(ts->verror) - 1] = '\0';
1280 strncpy(ts->verror, td->verror, sizeof(ts->verror) - 1);
1284 ts->latency_depth = td->latency_qd;
1285 ts->latency_target = td->o.latency_target;
1286 ts->latency_percentile = td->o.latency_percentile;
1287 ts->latency_window = td->o.latency_window;
1289 sum_thread_stats(ts, &td->ts, idx);
1292 for (i = 0; i < nr_ts; i++) {
1293 unsigned long long bw;
1295 ts = &threadstats[i];
1296 rs = &runstats[ts->groupid];
1297 rs->kb_base = ts->kb_base;
1298 rs->unit_base = ts->unit_base;
1299 rs->unified_rw_rep += ts->unified_rw_rep;
1301 for (j = 0; j < DDIR_RWDIR_CNT; j++) {
1302 if (!ts->runtime[j])
1304 if (ts->runtime[j] < rs->min_run[j] || !rs->min_run[j])
1305 rs->min_run[j] = ts->runtime[j];
1306 if (ts->runtime[j] > rs->max_run[j])
1307 rs->max_run[j] = ts->runtime[j];
1310 if (ts->runtime[j]) {
1311 unsigned long runt = ts->runtime[j];
1312 unsigned long long kb;
1314 kb = ts->io_bytes[j] / rs->kb_base;
1315 bw = kb * 1000 / runt;
1317 if (bw < rs->min_bw[j])
1319 if (bw > rs->max_bw[j])
1322 rs->io_kb[j] += ts->io_bytes[j] / rs->kb_base;
1326 for (i = 0; i < groupid + 1; i++) {
1331 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
1332 if (rs->max_run[ddir])
1333 rs->agg[ddir] = (rs->io_kb[ddir] * 1000) /
1339 * don't overwrite last signal output
1341 if (output_format == FIO_OUTPUT_NORMAL)
1343 else if (output_format == FIO_OUTPUT_JSON) {
1344 root = json_create_object();
1345 json_object_add_value_string(root, "fio version", fio_version_string);
1346 array = json_create_array();
1347 json_object_add_value_array(root, "jobs", array);
1350 for (i = 0; i < nr_ts; i++) {
1351 ts = &threadstats[i];
1352 rs = &runstats[ts->groupid];
1355 fio_server_send_ts(ts, rs);
1356 else if (output_format == FIO_OUTPUT_TERSE)
1357 show_thread_status_terse(ts, rs);
1358 else if (output_format == FIO_OUTPUT_JSON) {
1359 struct json_object *tmp = show_thread_status_json(ts, rs);
1360 json_array_add_value_object(array, tmp);
1362 show_thread_status_normal(ts, rs);
1364 if (output_format == FIO_OUTPUT_JSON) {
1365 /* disk util stats, if any */
1366 show_disk_util(1, root);
1368 show_idle_prof_stats(FIO_OUTPUT_JSON, root);
1370 json_print_object(root);
1372 json_free_object(root);
1375 for (i = 0; i < groupid + 1; i++) {
1380 fio_server_send_gs(rs);
1381 else if (output_format == FIO_OUTPUT_NORMAL)
1382 show_group_stats(rs);
1386 fio_server_send_du();
1387 else if (output_format == FIO_OUTPUT_NORMAL) {
1388 show_disk_util(0, NULL);
1389 show_idle_prof_stats(FIO_OUTPUT_NORMAL, NULL);
1392 if ( !(output_format == FIO_OUTPUT_TERSE) && append_terse_output) {
1393 log_info("\nAdditional Terse Output:\n");
1395 for (i = 0; i < nr_ts; i++) {
1396 ts = &threadstats[i];
1397 rs = &runstats[ts->groupid];
1398 show_thread_status_terse(ts, rs);
1407 void show_run_stats(void)
1409 fio_mutex_down(stat_mutex);
1411 fio_mutex_up(stat_mutex);
1414 static void *__show_running_run_stats(void *arg)
1416 struct thread_data *td;
1417 unsigned long long *rt;
1421 fio_mutex_down(stat_mutex);
1423 rt = malloc(thread_number * sizeof(unsigned long long));
1424 fio_gettime(&tv, NULL);
1426 for_each_td(td, i) {
1427 rt[i] = mtime_since(&td->start, &tv);
1428 if (td_read(td) && td->io_bytes[DDIR_READ])
1429 td->ts.runtime[DDIR_READ] += rt[i];
1430 if (td_write(td) && td->io_bytes[DDIR_WRITE])
1431 td->ts.runtime[DDIR_WRITE] += rt[i];
1432 if (td_trim(td) && td->io_bytes[DDIR_TRIM])
1433 td->ts.runtime[DDIR_TRIM] += rt[i];
1435 td->update_rusage = 1;
1436 td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
1437 td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
1438 td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
1439 td->ts.total_run_time = mtime_since(&td->epoch, &tv);
1442 for_each_td(td, i) {
1443 if (td->rusage_sem) {
1444 td->update_rusage = 1;
1445 fio_mutex_down(td->rusage_sem);
1447 td->update_rusage = 0;
1452 for_each_td(td, i) {
1453 if (td_read(td) && td->io_bytes[DDIR_READ])
1454 td->ts.runtime[DDIR_READ] -= rt[i];
1455 if (td_write(td) && td->io_bytes[DDIR_WRITE])
1456 td->ts.runtime[DDIR_WRITE] -= rt[i];
1457 if (td_trim(td) && td->io_bytes[DDIR_TRIM])
1458 td->ts.runtime[DDIR_TRIM] -= rt[i];
1462 fio_mutex_up(stat_mutex);
1468 * Called from signal handler. It _should_ be safe to just run this inline
1469 * in the sig handler, but we should be disturbing the system less by just
1470 * creating a thread to do it.
1472 void show_running_run_stats(void)
1476 thread = calloc(1, sizeof(*thread));
1480 if (!pthread_create(thread, NULL, __show_running_run_stats, thread)) {
1483 err = pthread_detach(*thread);
1485 log_err("fio: DU thread detach failed: %s\n", strerror(err));
1493 static int status_interval_init;
1494 static struct timeval status_time;
1495 static int status_file_disabled;
1497 #define FIO_STATUS_FILE "fio-dump-status"
1499 static int check_status_file(void)
1502 const char *temp_dir;
1503 char fio_status_file_path[PATH_MAX];
1505 if (status_file_disabled)
1508 temp_dir = getenv("TMPDIR");
1509 if (temp_dir == NULL) {
1510 temp_dir = getenv("TEMP");
1511 if (temp_dir && strlen(temp_dir) >= PATH_MAX)
1514 if (temp_dir == NULL)
1517 snprintf(fio_status_file_path, sizeof(fio_status_file_path), "%s/%s", temp_dir, FIO_STATUS_FILE);
1519 if (stat(fio_status_file_path, &sb))
1522 if (unlink(fio_status_file_path) < 0) {
1523 log_err("fio: failed to unlink %s: %s\n", fio_status_file_path,
1525 log_err("fio: disabling status file updates\n");
1526 status_file_disabled = 1;
1532 void check_for_running_stats(void)
1534 if (status_interval) {
1535 if (!status_interval_init) {
1536 fio_gettime(&status_time, NULL);
1537 status_interval_init = 1;
1538 } else if (mtime_since_now(&status_time) >= status_interval) {
1539 show_running_run_stats();
1540 fio_gettime(&status_time, NULL);
1544 if (check_status_file()) {
1545 show_running_run_stats();
1550 static inline void add_stat_sample(struct io_stat *is, unsigned long data)
1555 if (data > is->max_val)
1557 if (data < is->min_val)
1560 delta = val - is->mean.u.f;
1562 is->mean.u.f += delta / (is->samples + 1.0);
1563 is->S.u.f += delta * (val - is->mean.u.f);
1569 static void __add_log_sample(struct io_log *iolog, unsigned long val,
1570 enum fio_ddir ddir, unsigned int bs,
1571 unsigned long t, uint64_t offset)
1573 uint64_t nr_samples = iolog->nr_samples;
1574 struct io_sample *s;
1576 if (iolog->disabled)
1579 if (!iolog->nr_samples)
1580 iolog->avg_last = t;
1582 if (iolog->nr_samples == iolog->max_samples) {
1586 new_size = 2 * iolog->max_samples * log_entry_sz(iolog);
1588 if (iolog->log_gz && (new_size > iolog->log_gz)) {
1589 if (iolog_flush(iolog, 0)) {
1590 log_err("fio: failed flushing iolog! Will stop logging.\n");
1591 iolog->disabled = 1;
1594 nr_samples = iolog->nr_samples;
1596 new_log = realloc(iolog->log, new_size);
1598 log_err("fio: failed extending iolog! Will stop logging.\n");
1599 iolog->disabled = 1;
1602 iolog->log = new_log;
1603 iolog->max_samples <<= 1;
1607 s = get_sample(iolog, nr_samples);
1611 io_sample_set_ddir(iolog, s, ddir);
1614 if (iolog->log_offset) {
1615 struct io_sample_offset *so = (void *) s;
1617 so->offset = offset;
1620 iolog->nr_samples++;
1623 static inline void reset_io_stat(struct io_stat *ios)
1625 ios->max_val = ios->min_val = ios->samples = 0;
1626 ios->mean.u.f = ios->S.u.f = 0;
1629 void reset_io_stats(struct thread_data *td)
1631 struct thread_stat *ts = &td->ts;
1634 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1635 reset_io_stat(&ts->clat_stat[i]);
1636 reset_io_stat(&ts->slat_stat[i]);
1637 reset_io_stat(&ts->lat_stat[i]);
1638 reset_io_stat(&ts->bw_stat[i]);
1639 reset_io_stat(&ts->iops_stat[i]);
1641 ts->io_bytes[i] = 0;
1644 for (j = 0; j < FIO_IO_U_PLAT_NR; j++)
1645 ts->io_u_plat[i][j] = 0;
1648 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
1649 ts->io_u_map[i] = 0;
1650 ts->io_u_submit[i] = 0;
1651 ts->io_u_complete[i] = 0;
1652 ts->io_u_lat_u[i] = 0;
1653 ts->io_u_lat_m[i] = 0;
1654 ts->total_submit = 0;
1655 ts->total_complete = 0;
1658 for (i = 0; i < 3; i++) {
1659 ts->total_io_u[i] = 0;
1660 ts->short_io_u[i] = 0;
1664 static void _add_stat_to_log(struct io_log *iolog, unsigned long elapsed)
1667 * Note an entry in the log. Use the mean from the logged samples,
1668 * making sure to properly round up. Only write a log entry if we
1669 * had actual samples done.
1671 if (iolog->avg_window[DDIR_READ].samples) {
1674 mr = iolog->avg_window[DDIR_READ].mean.u.f + 0.50;
1675 __add_log_sample(iolog, mr, DDIR_READ, 0, elapsed, 0);
1677 if (iolog->avg_window[DDIR_WRITE].samples) {
1680 mw = iolog->avg_window[DDIR_WRITE].mean.u.f + 0.50;
1681 __add_log_sample(iolog, mw, DDIR_WRITE, 0, elapsed, 0);
1683 if (iolog->avg_window[DDIR_TRIM].samples) {
1686 mw = iolog->avg_window[DDIR_TRIM].mean.u.f + 0.50;
1687 __add_log_sample(iolog, mw, DDIR_TRIM, 0, elapsed, 0);
1690 reset_io_stat(&iolog->avg_window[DDIR_READ]);
1691 reset_io_stat(&iolog->avg_window[DDIR_WRITE]);
1692 reset_io_stat(&iolog->avg_window[DDIR_TRIM]);
1695 static void add_log_sample(struct thread_data *td, struct io_log *iolog,
1696 unsigned long val, enum fio_ddir ddir,
1697 unsigned int bs, uint64_t offset)
1699 unsigned long elapsed, this_window;
1704 elapsed = mtime_since_now(&td->epoch);
1707 * If no time averaging, just add the log sample.
1709 if (!iolog->avg_msec) {
1710 __add_log_sample(iolog, val, ddir, bs, elapsed, offset);
1715 * Add the sample. If the time period has passed, then
1716 * add that entry to the log and clear.
1718 add_stat_sample(&iolog->avg_window[ddir], val);
1721 * If period hasn't passed, adding the above sample is all we
1724 this_window = elapsed - iolog->avg_last;
1725 if (this_window < iolog->avg_msec)
1728 _add_stat_to_log(iolog, elapsed);
1730 iolog->avg_last = elapsed;
1733 void finalize_logs(struct thread_data *td)
1735 unsigned long elapsed;
1737 elapsed = mtime_since_now(&td->epoch);
1740 _add_stat_to_log(td->clat_log, elapsed);
1742 _add_stat_to_log(td->slat_log, elapsed);
1744 _add_stat_to_log(td->lat_log, elapsed);
1746 _add_stat_to_log(td->bw_log, elapsed);
1748 _add_stat_to_log(td->iops_log, elapsed);
1751 void add_agg_sample(unsigned long val, enum fio_ddir ddir, unsigned int bs)
1753 struct io_log *iolog;
1758 iolog = agg_io_log[ddir];
1759 __add_log_sample(iolog, val, ddir, bs, mtime_since_genesis(), 0);
1762 static void add_clat_percentile_sample(struct thread_stat *ts,
1763 unsigned long usec, enum fio_ddir ddir)
1765 unsigned int idx = plat_val_to_idx(usec);
1766 assert(idx < FIO_IO_U_PLAT_NR);
1768 ts->io_u_plat[ddir][idx]++;
1771 void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
1772 unsigned long usec, unsigned int bs, uint64_t offset)
1774 struct thread_stat *ts = &td->ts;
1779 add_stat_sample(&ts->clat_stat[ddir], usec);
1782 add_log_sample(td, td->clat_log, usec, ddir, bs, offset);
1784 if (ts->clat_percentiles)
1785 add_clat_percentile_sample(ts, usec, ddir);
1788 void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
1789 unsigned long usec, unsigned int bs, uint64_t offset)
1791 struct thread_stat *ts = &td->ts;
1796 add_stat_sample(&ts->slat_stat[ddir], usec);
1799 add_log_sample(td, td->slat_log, usec, ddir, bs, offset);
1802 void add_lat_sample(struct thread_data *td, enum fio_ddir ddir,
1803 unsigned long usec, unsigned int bs, uint64_t offset)
1805 struct thread_stat *ts = &td->ts;
1810 add_stat_sample(&ts->lat_stat[ddir], usec);
1813 add_log_sample(td, td->lat_log, usec, ddir, bs, offset);
1816 void add_bw_sample(struct thread_data *td, enum fio_ddir ddir, unsigned int bs,
1819 struct thread_stat *ts = &td->ts;
1820 unsigned long spent, rate;
1825 spent = mtime_since(&td->bw_sample_time, t);
1826 if (spent < td->o.bw_avg_time)
1830 * Compute both read and write rates for the interval.
1832 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
1835 delta = td->this_io_bytes[ddir] - td->stat_io_bytes[ddir];
1837 continue; /* No entries for interval */
1840 rate = delta * 1000 / spent / 1024;
1844 add_stat_sample(&ts->bw_stat[ddir], rate);
1847 add_log_sample(td, td->bw_log, rate, ddir, bs, 0);
1849 td->stat_io_bytes[ddir] = td->this_io_bytes[ddir];
1852 fio_gettime(&td->bw_sample_time, NULL);
1855 void add_iops_sample(struct thread_data *td, enum fio_ddir ddir, unsigned int bs,
1858 struct thread_stat *ts = &td->ts;
1859 unsigned long spent, iops;
1864 spent = mtime_since(&td->iops_sample_time, t);
1865 if (spent < td->o.iops_avg_time)
1869 * Compute both read and write rates for the interval.
1871 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
1874 delta = td->this_io_blocks[ddir] - td->stat_io_blocks[ddir];
1876 continue; /* No entries for interval */
1879 iops = (delta * 1000) / spent;
1883 add_stat_sample(&ts->iops_stat[ddir], iops);
1886 add_log_sample(td, td->iops_log, iops, ddir, bs, 0);
1888 td->stat_io_blocks[ddir] = td->this_io_blocks[ddir];
1891 fio_gettime(&td->iops_sample_time, NULL);
1894 void stat_init(void)
1896 stat_mutex = fio_mutex_init(FIO_MUTEX_UNLOCKED);
1899 void stat_exit(void)
1902 * When we have the mutex, we know out-of-band access to it
1905 fio_mutex_down(stat_mutex);
1906 fio_mutex_remove(stat_mutex);