12 #include "lib/ieee754.h"
14 void update_rusage_stat(struct thread_data *td)
16 struct thread_stat *ts = &td->ts;
18 getrusage(RUSAGE_SELF, &td->ru_end);
20 ts->usr_time += mtime_since(&td->ru_start.ru_utime,
21 &td->ru_end.ru_utime);
22 ts->sys_time += mtime_since(&td->ru_start.ru_stime,
23 &td->ru_end.ru_stime);
24 ts->ctx += td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw
25 - (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
26 ts->minf += td->ru_end.ru_minflt - td->ru_start.ru_minflt;
27 ts->majf += td->ru_end.ru_majflt - td->ru_start.ru_majflt;
29 memcpy(&td->ru_start, &td->ru_end, sizeof(td->ru_end));
33 * Given a latency, return the index of the corresponding bucket in
34 * the structure tracking percentiles.
36 * (1) find the group (and error bits) that the value (latency)
37 * belongs to by looking at its MSB. (2) find the bucket number in the
38 * group by looking at the index bits.
41 static unsigned int plat_val_to_idx(unsigned int val)
43 unsigned int msb, error_bits, base, offset, idx;
45 /* Find MSB starting from bit 0 */
49 msb = (sizeof(val)*8) - __builtin_clz(val) - 1;
52 * MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
53 * all bits of the sample as index
55 if (msb <= FIO_IO_U_PLAT_BITS)
58 /* Compute the number of error bits to discard*/
59 error_bits = msb - FIO_IO_U_PLAT_BITS;
61 /* Compute the number of buckets before the group */
62 base = (error_bits + 1) << FIO_IO_U_PLAT_BITS;
65 * Discard the error bits and apply the mask to find the
66 * index for the buckets in the group
68 offset = (FIO_IO_U_PLAT_VAL - 1) & (val >> error_bits);
70 /* Make sure the index does not exceed (array size - 1) */
71 idx = (base + offset) < (FIO_IO_U_PLAT_NR - 1)?
72 (base + offset) : (FIO_IO_U_PLAT_NR - 1);
78 * Convert the given index of the bucket array to the value
79 * represented by the bucket
81 static unsigned int plat_idx_to_val(unsigned int idx)
83 unsigned int error_bits, k, base;
85 assert(idx < FIO_IO_U_PLAT_NR);
87 /* MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
88 * all bits of the sample as index */
89 if (idx < (FIO_IO_U_PLAT_VAL << 1) )
92 /* Find the group and compute the minimum value of that group */
93 error_bits = (idx >> FIO_IO_U_PLAT_BITS) -1;
94 base = 1 << (error_bits + FIO_IO_U_PLAT_BITS);
96 /* Find its bucket number of the group */
97 k = idx % FIO_IO_U_PLAT_VAL;
99 /* Return the mean of the range of the bucket */
100 return base + ((k + 0.5) * (1 << error_bits));
103 static int double_cmp(const void *a, const void *b)
105 const fio_fp64_t fa = *(const fio_fp64_t *) a;
106 const fio_fp64_t fb = *(const fio_fp64_t *) b;
111 else if (fa.u.f < fb.u.f)
117 static unsigned int calc_clat_percentiles(unsigned int *io_u_plat,
118 unsigned long nr, fio_fp64_t *plist,
119 unsigned int **output,
123 unsigned long sum = 0;
124 unsigned int len, i, j = 0;
125 unsigned int oval_len = 0;
126 unsigned int *ovals = NULL;
133 while (len < FIO_IO_U_LIST_MAX_LEN && plist[len].u.f != 0.0)
140 * Sort the percentile list. Note that it may already be sorted if
141 * we are using the default values, but since it's a short list this
142 * isn't a worry. Also note that this does not work for NaN values.
145 qsort((void*)plist, len, sizeof(plist[0]), double_cmp);
148 * Calculate bucket values, note down max and min values
151 for (i = 0; i < FIO_IO_U_PLAT_NR && !is_last; i++) {
153 while (sum >= (plist[j].u.f / 100.0 * nr)) {
154 assert(plist[j].u.f <= 100.0);
158 ovals = realloc(ovals, oval_len * sizeof(unsigned int));
161 ovals[j] = plat_idx_to_val(i);
162 if (ovals[j] < *minv)
164 if (ovals[j] > *maxv)
167 is_last = (j == len - 1);
180 * Find and display the p-th percentile of clat
182 static void show_clat_percentiles(unsigned int *io_u_plat, unsigned long nr,
185 unsigned int len, j = 0, minv, maxv;
187 int is_last, scale_down;
189 len = calc_clat_percentiles(io_u_plat, nr, plist, &ovals, &maxv, &minv);
194 * We default to usecs, but if the value range is such that we
195 * should scale down to msecs, do that.
197 if (minv > 2000 && maxv > 99999) {
199 log_info(" clat percentiles (msec):\n |");
202 log_info(" clat percentiles (usec):\n |");
205 for (j = 0; j < len; j++) {
209 if (j != 0 && (j % 4) == 0)
212 /* end of the list */
213 is_last = (j == len - 1);
215 if (plist[j].u.f < 10.0)
216 sprintf(fbuf, " %2.2f", plist[j].u.f);
218 sprintf(fbuf, "%2.2f", plist[j].u.f);
221 ovals[j] = (ovals[j] + 999) / 1000;
223 log_info(" %sth=[%5u]%c", fbuf, ovals[j], is_last ? '\n' : ',');
228 if (j % 4 == 3) /* for formatting */
237 static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
238 double *mean, double *dev)
240 double n = is->samples;
242 if (is->samples == 0)
248 n = (double) is->samples;
249 *mean = is->mean.u.f;
252 *dev = sqrt(is->S.u.f / (n - 1.0));
259 void show_group_stats(struct group_run_stats *rs)
261 char *p1, *p2, *p3, *p4;
262 const char *ddir_str[] = { " READ", " WRITE" };
265 log_info("\nRun status group %d (all jobs):\n", rs->groupid);
267 for (i = 0; i <= DDIR_WRITE; i++) {
268 const int i2p = is_power_of_2(rs->kb_base);
273 p1 = num2str(rs->io_kb[i], 6, rs->kb_base, i2p);
274 p2 = num2str(rs->agg[i], 6, rs->kb_base, i2p);
275 p3 = num2str(rs->min_bw[i], 6, rs->kb_base, i2p);
276 p4 = num2str(rs->max_bw[i], 6, rs->kb_base, i2p);
278 log_info("%s: io=%sB, aggrb=%sB/s, minb=%sB/s, maxb=%sB/s,"
279 " mint=%llumsec, maxt=%llumsec\n", ddir_str[i], p1, p2,
280 p3, p4, rs->min_run[i],
290 #define ts_total_io_u(ts) \
291 ((ts)->total_io_u[0] + (ts)->total_io_u[1])
293 static void stat_calc_dist(unsigned int *map, unsigned long total,
299 * Do depth distribution calculations
301 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
303 io_u_dist[i] = (double) map[i] / (double) total;
304 io_u_dist[i] *= 100.0;
305 if (io_u_dist[i] < 0.1 && map[i])
312 static void stat_calc_lat(struct thread_stat *ts, double *dst,
313 unsigned int *src, int nr)
315 unsigned long total = ts_total_io_u(ts);
319 * Do latency distribution calculations
321 for (i = 0; i < nr; i++) {
323 dst[i] = (double) src[i] / (double) total;
325 if (dst[i] < 0.01 && src[i])
332 static void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat)
334 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_u, FIO_IO_U_LAT_U_NR);
337 static void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat)
339 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_m, FIO_IO_U_LAT_M_NR);
342 static int usec_to_msec(unsigned long *min, unsigned long *max, double *mean,
345 if (*min > 1000 && *max > 1000 && *mean > 1000.0 && *dev > 1000.0) {
356 static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
359 const char *ddir_str[] = { "read ", "write" };
360 unsigned long min, max, runt;
361 unsigned long long bw, iops;
363 char *io_p, *bw_p, *iops_p;
366 assert(ddir_rw(ddir));
368 if (!ts->runtime[ddir])
371 i2p = is_power_of_2(rs->kb_base);
372 runt = ts->runtime[ddir];
374 bw = (1000 * ts->io_bytes[ddir]) / runt;
375 io_p = num2str(ts->io_bytes[ddir], 6, 1, i2p);
376 bw_p = num2str(bw, 6, 1, i2p);
378 iops = (1000 * (uint64_t)ts->total_io_u[ddir]) / runt;
379 iops_p = num2str(iops, 6, 1, 0);
381 log_info(" %s: io=%sB, bw=%sB/s, iops=%s, runt=%6llumsec\n",
382 ddir_str[ddir], io_p, bw_p, iops_p,
389 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev)) {
390 const char *base = "(usec)";
393 if (!usec_to_msec(&min, &max, &mean, &dev))
396 minp = num2str(min, 6, 1, 0);
397 maxp = num2str(max, 6, 1, 0);
399 log_info(" slat %s: min=%s, max=%s, avg=%5.02f,"
400 " stdev=%5.02f\n", base, minp, maxp, mean, dev);
405 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev)) {
406 const char *base = "(usec)";
409 if (!usec_to_msec(&min, &max, &mean, &dev))
412 minp = num2str(min, 6, 1, 0);
413 maxp = num2str(max, 6, 1, 0);
415 log_info(" clat %s: min=%s, max=%s, avg=%5.02f,"
416 " stdev=%5.02f\n", base, minp, maxp, mean, dev);
421 if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev)) {
422 const char *base = "(usec)";
425 if (!usec_to_msec(&min, &max, &mean, &dev))
428 minp = num2str(min, 6, 1, 0);
429 maxp = num2str(max, 6, 1, 0);
431 log_info(" lat %s: min=%s, max=%s, avg=%5.02f,"
432 " stdev=%5.02f\n", base, minp, maxp, mean, dev);
437 if (ts->clat_percentiles) {
438 show_clat_percentiles(ts->io_u_plat[ddir],
439 ts->clat_stat[ddir].samples,
440 ts->percentile_list);
442 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
443 double p_of_agg = 100.0;
444 const char *bw_str = "KB";
447 p_of_agg = mean * 100 / (double) rs->agg[ddir];
448 if (p_of_agg > 100.0)
452 if (mean > 999999.9) {
460 log_info(" bw (%s/s) : min=%5lu, max=%5lu, per=%3.2f%%,"
461 " avg=%5.02f, stdev=%5.02f\n", bw_str, min, max,
462 p_of_agg, mean, dev);
466 static int show_lat(double *io_u_lat, int nr, const char **ranges,
469 int new_line = 1, i, line = 0, shown = 0;
471 for (i = 0; i < nr; i++) {
472 if (io_u_lat[i] <= 0.0)
478 log_info(" lat (%s) : ", msg);
484 log_info("%s%3.2f%%", ranges[i], io_u_lat[i]);
496 static void show_lat_u(double *io_u_lat_u)
498 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
499 "250=", "500=", "750=", "1000=", };
501 show_lat(io_u_lat_u, FIO_IO_U_LAT_U_NR, ranges, "usec");
504 static void show_lat_m(double *io_u_lat_m)
506 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
507 "250=", "500=", "750=", "1000=", "2000=",
510 show_lat(io_u_lat_m, FIO_IO_U_LAT_M_NR, ranges, "msec");
513 static void show_latencies(double *io_u_lat_u, double *io_u_lat_m)
515 show_lat_u(io_u_lat_u);
516 show_lat_m(io_u_lat_m);
519 void show_thread_status(struct thread_stat *ts, struct group_run_stats *rs)
521 double usr_cpu, sys_cpu;
522 unsigned long runtime;
523 double io_u_dist[FIO_IO_U_MAP_NR];
524 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
525 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
527 if (!(ts->io_bytes[0] + ts->io_bytes[1]) &&
528 !(ts->total_io_u[0] + ts->total_io_u[1]))
532 log_info("%s: (groupid=%d, jobs=%d): err=%2d: pid=%d\n",
533 ts->name, ts->groupid, ts->members,
534 ts->error, (int) ts->pid);
536 log_info("%s: (groupid=%d, jobs=%d): err=%2d (%s): pid=%d\n",
537 ts->name, ts->groupid, ts->members,
538 ts->error, ts->verror, (int) ts->pid);
541 if (strlen(ts->description))
542 log_info(" Description : [%s]\n", ts->description);
544 if (ts->io_bytes[DDIR_READ])
545 show_ddir_status(rs, ts, DDIR_READ);
546 if (ts->io_bytes[DDIR_WRITE])
547 show_ddir_status(rs, ts, DDIR_WRITE);
549 stat_calc_lat_u(ts, io_u_lat_u);
550 stat_calc_lat_m(ts, io_u_lat_m);
551 show_latencies(io_u_lat_u, io_u_lat_m);
553 runtime = ts->total_run_time;
555 double runt = (double) runtime;
557 usr_cpu = (double) ts->usr_time * 100 / runt;
558 sys_cpu = (double) ts->sys_time * 100 / runt;
564 log_info(" cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu, majf=%lu,"
565 " minf=%lu\n", usr_cpu, sys_cpu, ts->ctx, ts->majf, ts->minf);
567 stat_calc_dist(ts->io_u_map, ts_total_io_u(ts), io_u_dist);
568 log_info(" IO depths : 1=%3.1f%%, 2=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%,"
569 " 16=%3.1f%%, 32=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
570 io_u_dist[1], io_u_dist[2],
571 io_u_dist[3], io_u_dist[4],
572 io_u_dist[5], io_u_dist[6]);
574 stat_calc_dist(ts->io_u_submit, ts->total_submit, io_u_dist);
575 log_info(" submit : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
576 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
577 io_u_dist[1], io_u_dist[2],
578 io_u_dist[3], io_u_dist[4],
579 io_u_dist[5], io_u_dist[6]);
580 stat_calc_dist(ts->io_u_complete, ts->total_complete, io_u_dist);
581 log_info(" complete : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
582 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
583 io_u_dist[1], io_u_dist[2],
584 io_u_dist[3], io_u_dist[4],
585 io_u_dist[5], io_u_dist[6]);
586 log_info(" issued : total=r=%lu/w=%lu/d=%lu,"
587 " short=r=%lu/w=%lu/d=%lu\n",
588 ts->total_io_u[0], ts->total_io_u[1],
590 ts->short_io_u[0], ts->short_io_u[1],
592 if (ts->continue_on_error) {
593 log_info(" errors : total=%lu, first_error=%d/<%s>\n",
596 strerror(ts->first_error));
600 static void show_ddir_status_terse(struct thread_stat *ts,
601 struct group_run_stats *rs, int ddir)
603 unsigned long min, max;
604 unsigned long long bw, iops;
605 unsigned int *ovals = NULL;
607 unsigned int len, minv, maxv;
610 assert(ddir_rw(ddir));
613 if (ts->runtime[ddir]) {
614 uint64_t runt = ts->runtime[ddir];
616 bw = ((1000 * ts->io_bytes[ddir]) / runt) / 1024;
617 iops = (1000 * (uint64_t) ts->total_io_u[ddir]) / runt;
620 log_info(";%llu;%llu;%llu;%llu", ts->io_bytes[ddir] >> 10, bw, iops,
623 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
624 log_info(";%lu;%lu;%f;%f", min, max, mean, dev);
626 log_info(";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
628 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
629 log_info(";%lu;%lu;%f;%f", min, max, mean, dev);
631 log_info(";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
633 if (ts->clat_percentiles) {
634 len = calc_clat_percentiles(ts->io_u_plat[ddir],
635 ts->clat_stat[ddir].samples,
636 ts->percentile_list, &ovals, &maxv,
641 for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
646 log_info(";%2.2f%%=%u", ts->percentile_list[i].u.f, ovals[i]);
649 if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev))
650 log_info(";%lu;%lu;%f;%f", min, max, mean, dev);
652 log_info(";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
657 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
658 double p_of_agg = 100.0;
661 p_of_agg = mean * 100 / (double) rs->agg[ddir];
662 if (p_of_agg > 100.0)
666 log_info(";%lu;%lu;%f%%;%f;%f", min, max, p_of_agg, mean, dev);
668 log_info(";%lu;%lu;%f%%;%f;%f", 0UL, 0UL, 0.0, 0.0, 0.0);
671 static void show_thread_status_terse_v2(struct thread_stat *ts,
672 struct group_run_stats *rs)
674 double io_u_dist[FIO_IO_U_MAP_NR];
675 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
676 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
677 double usr_cpu, sys_cpu;
681 log_info("2;%s;%d;%d", ts->name, ts->groupid, ts->error);
682 /* Log Read Status */
683 show_ddir_status_terse(ts, rs, 0);
684 /* Log Write Status */
685 show_ddir_status_terse(ts, rs, 1);
688 if (ts->total_run_time) {
689 double runt = (double) ts->total_run_time;
691 usr_cpu = (double) ts->usr_time * 100 / runt;
692 sys_cpu = (double) ts->sys_time * 100 / runt;
698 log_info(";%f%%;%f%%;%lu;%lu;%lu", usr_cpu, sys_cpu, ts->ctx, ts->majf,
701 /* Calc % distribution of IO depths, usecond, msecond latency */
702 stat_calc_dist(ts->io_u_map, ts_total_io_u(ts), io_u_dist);
703 stat_calc_lat_u(ts, io_u_lat_u);
704 stat_calc_lat_m(ts, io_u_lat_m);
706 /* Only show fixed 7 I/O depth levels*/
707 log_info(";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%",
708 io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3],
709 io_u_dist[4], io_u_dist[5], io_u_dist[6]);
711 /* Microsecond latency */
712 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
713 log_info(";%3.2f%%", io_u_lat_u[i]);
714 /* Millisecond latency */
715 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
716 log_info(";%3.2f%%", io_u_lat_m[i]);
717 /* Additional output if continue_on_error set - default off*/
718 if (ts->continue_on_error)
719 log_info(";%lu;%d", ts->total_err_count, ts->first_error);
722 /* Additional output if description is set */
724 log_info(";%s", ts->description);
729 #define FIO_TERSE_VERSION "3"
731 static void show_thread_status_terse_v3(struct thread_stat *ts,
732 struct group_run_stats *rs)
734 double io_u_dist[FIO_IO_U_MAP_NR];
735 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
736 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
737 double usr_cpu, sys_cpu;
741 log_info("%s;%s;%s;%d;%d", FIO_TERSE_VERSION, fio_version_string,
742 ts->name, ts->groupid, ts->error);
743 /* Log Read Status */
744 show_ddir_status_terse(ts, rs, 0);
745 /* Log Write Status */
746 show_ddir_status_terse(ts, rs, 1);
749 if (ts->total_run_time) {
750 double runt = (double) ts->total_run_time;
752 usr_cpu = (double) ts->usr_time * 100 / runt;
753 sys_cpu = (double) ts->sys_time * 100 / runt;
759 log_info(";%f%%;%f%%;%lu;%lu;%lu", usr_cpu, sys_cpu, ts->ctx, ts->majf,
762 /* Calc % distribution of IO depths, usecond, msecond latency */
763 stat_calc_dist(ts->io_u_map, ts_total_io_u(ts), io_u_dist);
764 stat_calc_lat_u(ts, io_u_lat_u);
765 stat_calc_lat_m(ts, io_u_lat_m);
767 /* Only show fixed 7 I/O depth levels*/
768 log_info(";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%",
769 io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3],
770 io_u_dist[4], io_u_dist[5], io_u_dist[6]);
772 /* Microsecond latency */
773 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
774 log_info(";%3.2f%%", io_u_lat_u[i]);
775 /* Millisecond latency */
776 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
777 log_info(";%3.2f%%", io_u_lat_m[i]);
779 /* disk util stats, if any */
782 /* Additional output if continue_on_error set - default off*/
783 if (ts->continue_on_error)
784 log_info(";%lu;%d", ts->total_err_count, ts->first_error);
786 /* Additional output if description is set */
787 if (strlen(ts->description))
788 log_info(";%s", ts->description);
793 static void show_thread_status_terse(struct thread_stat *ts,
794 struct group_run_stats *rs)
796 if (terse_version == 2)
797 show_thread_status_terse_v2(ts, rs);
798 else if (terse_version == 3)
799 show_thread_status_terse_v3(ts, rs);
801 log_err("fio: bad terse version!? %d\n", terse_version);
804 static void sum_stat(struct io_stat *dst, struct io_stat *src, int nr)
808 if (src->samples == 0)
811 dst->min_val = min(dst->min_val, src->min_val);
812 dst->max_val = max(dst->max_val, src->max_val);
815 * Compute new mean and S after the merge
816 * <http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
817 * #Parallel_algorithm>
820 mean = src->mean.u.f;
823 double delta = src->mean.u.f - dst->mean.u.f;
825 mean = ((src->mean.u.f * src->samples) +
826 (dst->mean.u.f * dst->samples)) /
827 (dst->samples + src->samples);
829 S = src->S.u.f + dst->S.u.f + pow(delta, 2.0) *
830 (dst->samples * src->samples) /
831 (dst->samples + src->samples);
834 dst->samples += src->samples;
835 dst->mean.u.f = mean;
839 void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src)
843 for (i = 0; i < 2; i++) {
844 if (dst->max_run[i] < src->max_run[i])
845 dst->max_run[i] = src->max_run[i];
846 if (dst->min_run[i] && dst->min_run[i] > src->min_run[i])
847 dst->min_run[i] = src->min_run[i];
848 if (dst->max_bw[i] < src->max_bw[i])
849 dst->max_bw[i] = src->max_bw[i];
850 if (dst->min_bw[i] && dst->min_bw[i] > src->min_bw[i])
851 dst->min_bw[i] = src->min_bw[i];
853 dst->io_kb[i] += src->io_kb[i];
854 dst->agg[i] += src->agg[i];
859 void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src, int nr)
863 for (l = 0; l <= DDIR_WRITE; l++) {
864 sum_stat(&dst->clat_stat[l], &src->clat_stat[l], nr);
865 sum_stat(&dst->slat_stat[l], &src->slat_stat[l], nr);
866 sum_stat(&dst->lat_stat[l], &src->lat_stat[l], nr);
867 sum_stat(&dst->bw_stat[l], &src->bw_stat[l], nr);
869 dst->io_bytes[l] += src->io_bytes[l];
871 if (dst->runtime[l] < src->runtime[l])
872 dst->runtime[l] = src->runtime[l];
875 dst->usr_time += src->usr_time;
876 dst->sys_time += src->sys_time;
877 dst->ctx += src->ctx;
878 dst->majf += src->majf;
879 dst->minf += src->minf;
881 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
882 dst->io_u_map[k] += src->io_u_map[k];
883 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
884 dst->io_u_submit[k] += src->io_u_submit[k];
885 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
886 dst->io_u_complete[k] += src->io_u_complete[k];
887 for (k = 0; k < FIO_IO_U_LAT_U_NR; k++)
888 dst->io_u_lat_u[k] += src->io_u_lat_u[k];
889 for (k = 0; k < FIO_IO_U_LAT_M_NR; k++)
890 dst->io_u_lat_m[k] += src->io_u_lat_m[k];
892 for (k = 0; k <= 2; k++) {
893 dst->total_io_u[k] += src->total_io_u[k];
894 dst->short_io_u[k] += src->short_io_u[k];
897 for (k = 0; k <= DDIR_WRITE; k++) {
899 for (m = 0; m < FIO_IO_U_PLAT_NR; m++)
900 dst->io_u_plat[k][m] += src->io_u_plat[k][m];
903 dst->total_run_time += src->total_run_time;
904 dst->total_submit += src->total_submit;
905 dst->total_complete += src->total_complete;
908 void init_group_run_stat(struct group_run_stats *gs)
910 memset(gs, 0, sizeof(*gs));
911 gs->min_bw[0] = gs->min_run[0] = ~0UL;
912 gs->min_bw[1] = gs->min_run[1] = ~0UL;
915 void init_thread_stat(struct thread_stat *ts)
919 memset(ts, 0, sizeof(*ts));
921 for (j = 0; j <= DDIR_WRITE; j++) {
922 ts->lat_stat[j].min_val = -1UL;
923 ts->clat_stat[j].min_val = -1UL;
924 ts->slat_stat[j].min_val = -1UL;
925 ts->bw_stat[j].min_val = -1UL;
930 void show_run_stats(void)
932 struct group_run_stats *runstats, *rs;
933 struct thread_data *td;
934 struct thread_stat *threadstats, *ts;
935 int i, j, nr_ts, last_ts, idx;
936 int kb_base_warned = 0;
938 runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
940 for (i = 0; i < groupid + 1; i++)
941 init_group_run_stat(&runstats[i]);
944 * find out how many threads stats we need. if group reporting isn't
945 * enabled, it's one-per-td.
950 if (!td->o.group_reporting) {
954 if (last_ts == td->groupid)
957 last_ts = td->groupid;
961 threadstats = malloc(nr_ts * sizeof(struct thread_stat));
963 for (i = 0; i < nr_ts; i++)
964 init_thread_stat(&threadstats[i]);
970 if (idx && (!td->o.group_reporting ||
971 (td->o.group_reporting && last_ts != td->groupid))) {
976 last_ts = td->groupid;
978 ts = &threadstats[j];
980 ts->clat_percentiles = td->o.clat_percentiles;
981 if (td->o.overwrite_plist)
982 memcpy(ts->percentile_list, td->o.percentile_list, sizeof(td->o.percentile_list));
984 memcpy(ts->percentile_list, def_percentile_list, sizeof(def_percentile_list));
989 if (ts->groupid == -1) {
991 * These are per-group shared already
993 strncpy(ts->name, td->o.name, FIO_JOBNAME_SIZE);
994 if (td->o.description)
995 strncpy(ts->description, td->o.description,
998 memset(ts->description, 0, FIO_JOBNAME_SIZE);
1000 ts->groupid = td->groupid;
1003 * first pid in group, not very useful...
1007 ts->kb_base = td->o.kb_base;
1008 } else if (ts->kb_base != td->o.kb_base && !kb_base_warned) {
1009 log_info("fio: kb_base differs for jobs in group, using"
1010 " %u as the base\n", ts->kb_base);
1014 ts->continue_on_error = td->o.continue_on_error;
1015 ts->total_err_count += td->total_err_count;
1016 ts->first_error = td->first_error;
1018 if (!td->error && td->o.continue_on_error &&
1020 ts->error = td->first_error;
1021 strcpy(ts->verror, td->verror);
1022 } else if (td->error) {
1023 ts->error = td->error;
1024 strcpy(ts->verror, td->verror);
1028 sum_thread_stats(ts, &td->ts, idx);
1031 for (i = 0; i < nr_ts; i++) {
1032 unsigned long long bw;
1034 ts = &threadstats[i];
1035 rs = &runstats[ts->groupid];
1036 rs->kb_base = ts->kb_base;
1038 for (j = 0; j <= DDIR_WRITE; j++) {
1039 if (!ts->runtime[j])
1041 if (ts->runtime[j] < rs->min_run[j] || !rs->min_run[j])
1042 rs->min_run[j] = ts->runtime[j];
1043 if (ts->runtime[j] > rs->max_run[j])
1044 rs->max_run[j] = ts->runtime[j];
1047 if (ts->runtime[j]) {
1048 unsigned long runt = ts->runtime[j];
1049 unsigned long long kb;
1051 kb = ts->io_bytes[j] / rs->kb_base;
1052 bw = kb * 1000 / runt;
1054 if (bw < rs->min_bw[j])
1056 if (bw > rs->max_bw[j])
1059 rs->io_kb[j] += ts->io_bytes[j] / rs->kb_base;
1063 for (i = 0; i < groupid + 1; i++) {
1067 rs->agg[0] = (rs->io_kb[0] * 1000) / rs->max_run[0];
1069 rs->agg[1] = (rs->io_kb[1] * 1000) / rs->max_run[1];
1073 * don't overwrite last signal output
1078 for (i = 0; i < nr_ts; i++) {
1079 ts = &threadstats[i];
1080 rs = &runstats[ts->groupid];
1083 fio_server_send_ts(ts, rs);
1084 else if (terse_output)
1085 show_thread_status_terse(ts, rs);
1087 show_thread_status(ts, rs);
1090 for (i = 0; i < groupid + 1; i++) {
1095 fio_server_send_gs(rs);
1096 else if (!terse_output)
1097 show_group_stats(rs);
1101 fio_server_send_du();
1102 else if (!terse_output)
1109 static void *__show_running_run_stats(void *arg)
1111 struct thread_data *td;
1112 unsigned long long *rt;
1116 rt = malloc(thread_number * sizeof(unsigned long long));
1117 fio_gettime(&tv, NULL);
1119 for_each_td(td, i) {
1120 rt[i] = mtime_since(&td->start, &tv);
1121 if (td_read(td) && td->io_bytes[DDIR_READ])
1122 td->ts.runtime[DDIR_READ] += rt[i];
1123 if (td_write(td) && td->io_bytes[DDIR_WRITE])
1124 td->ts.runtime[DDIR_WRITE] += rt[i];
1126 update_rusage_stat(td);
1127 td->ts.io_bytes[0] = td->io_bytes[0];
1128 td->ts.io_bytes[1] = td->io_bytes[1];
1129 td->ts.total_run_time = mtime_since(&td->epoch, &tv);
1134 for_each_td(td, i) {
1135 if (td_read(td) && td->io_bytes[DDIR_READ])
1136 td->ts.runtime[DDIR_READ] -= rt[i];
1137 if (td_write(td) && td->io_bytes[DDIR_WRITE])
1138 td->ts.runtime[DDIR_WRITE] -= rt[i];
1146 * Called from signal handler. It _should_ be safe to just run this inline
1147 * in the sig handler, but we should be disturbing the system less by just
1148 * creating a thread to do it.
1150 void show_running_run_stats(void)
1154 pthread_create(&thread, NULL, __show_running_run_stats, NULL);
1155 pthread_detach(thread);
1158 static inline void add_stat_sample(struct io_stat *is, unsigned long data)
1163 if (data > is->max_val)
1165 if (data < is->min_val)
1168 delta = val - is->mean.u.f;
1170 is->mean.u.f += delta / (is->samples + 1.0);
1171 is->S.u.f += delta * (val - is->mean.u.f);
1177 static void __add_log_sample(struct io_log *iolog, unsigned long val,
1178 enum fio_ddir ddir, unsigned int bs,
1181 const int nr_samples = iolog->nr_samples;
1183 if (!iolog->nr_samples)
1184 iolog->avg_last = t;
1186 if (iolog->nr_samples == iolog->max_samples) {
1187 int new_size = sizeof(struct io_sample) * iolog->max_samples*2;
1189 iolog->log = realloc(iolog->log, new_size);
1190 iolog->max_samples <<= 1;
1193 iolog->log[nr_samples].val = val;
1194 iolog->log[nr_samples].time = t;
1195 iolog->log[nr_samples].ddir = ddir;
1196 iolog->log[nr_samples].bs = bs;
1197 iolog->nr_samples++;
1200 static inline void reset_io_stat(struct io_stat *ios)
1202 ios->max_val = ios->min_val = ios->samples = 0;
1203 ios->mean.u.f = ios->S.u.f = 0;
1206 static void add_log_sample(struct thread_data *td, struct io_log *iolog,
1207 unsigned long val, enum fio_ddir ddir,
1210 unsigned long elapsed, this_window;
1215 elapsed = mtime_since_now(&td->epoch);
1218 * If no time averaging, just add the log sample.
1220 if (!iolog->avg_msec) {
1221 __add_log_sample(iolog, val, ddir, bs, elapsed);
1226 * Add the sample. If the time period has passed, then
1227 * add that entry to the log and clear.
1229 add_stat_sample(&iolog->avg_window[ddir], val);
1232 * If period hasn't passed, adding the above sample is all we
1235 this_window = elapsed - iolog->avg_last;
1236 if (this_window < iolog->avg_msec)
1240 * Note an entry in the log. Use the mean from the logged samples,
1241 * making sure to properly round up. Only write a log entry if we
1242 * had actual samples done.
1244 if (iolog->avg_window[DDIR_READ].samples) {
1247 mr = iolog->avg_window[DDIR_READ].mean.u.f + 0.50;
1248 __add_log_sample(iolog, mr, DDIR_READ, 0, elapsed);
1250 if (iolog->avg_window[DDIR_WRITE].samples) {
1253 mw = iolog->avg_window[DDIR_WRITE].mean.u.f + 0.50;
1254 __add_log_sample(iolog, mw, DDIR_WRITE, 0, elapsed);
1257 reset_io_stat(&iolog->avg_window[DDIR_READ]);
1258 reset_io_stat(&iolog->avg_window[DDIR_WRITE]);
1259 iolog->avg_last = elapsed;
1262 void add_agg_sample(unsigned long val, enum fio_ddir ddir, unsigned int bs)
1264 struct io_log *iolog;
1269 iolog = agg_io_log[ddir];
1270 __add_log_sample(iolog, val, ddir, bs, mtime_since_genesis());
1273 static void add_clat_percentile_sample(struct thread_stat *ts,
1274 unsigned long usec, enum fio_ddir ddir)
1276 unsigned int idx = plat_val_to_idx(usec);
1277 assert(idx < FIO_IO_U_PLAT_NR);
1279 ts->io_u_plat[ddir][idx]++;
1282 void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
1283 unsigned long usec, unsigned int bs)
1285 struct thread_stat *ts = &td->ts;
1290 add_stat_sample(&ts->clat_stat[ddir], usec);
1293 add_log_sample(td, td->clat_log, usec, ddir, bs);
1295 if (ts->clat_percentiles)
1296 add_clat_percentile_sample(ts, usec, ddir);
1299 void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
1300 unsigned long usec, unsigned int bs)
1302 struct thread_stat *ts = &td->ts;
1307 add_stat_sample(&ts->slat_stat[ddir], usec);
1310 add_log_sample(td, td->slat_log, usec, ddir, bs);
1313 void add_lat_sample(struct thread_data *td, enum fio_ddir ddir,
1314 unsigned long usec, unsigned int bs)
1316 struct thread_stat *ts = &td->ts;
1321 add_stat_sample(&ts->lat_stat[ddir], usec);
1324 add_log_sample(td, td->lat_log, usec, ddir, bs);
1327 void add_bw_sample(struct thread_data *td, enum fio_ddir ddir, unsigned int bs,
1330 struct thread_stat *ts = &td->ts;
1331 unsigned long spent, rate;
1336 spent = mtime_since(&td->bw_sample_time, t);
1337 if (spent < td->o.bw_avg_time)
1341 * Compute both read and write rates for the interval.
1343 for (ddir = DDIR_READ; ddir <= DDIR_WRITE; ddir++) {
1346 delta = td->this_io_bytes[ddir] - td->stat_io_bytes[ddir];
1348 continue; /* No entries for interval */
1350 rate = delta * 1000 / spent / 1024;
1351 add_stat_sample(&ts->bw_stat[ddir], rate);
1354 add_log_sample(td, td->bw_log, rate, ddir, bs);
1356 td->stat_io_bytes[ddir] = td->this_io_bytes[ddir];
1359 fio_gettime(&td->bw_sample_time, NULL);
1362 void add_iops_sample(struct thread_data *td, enum fio_ddir ddir,
1365 struct thread_stat *ts = &td->ts;
1366 unsigned long spent, iops;
1371 spent = mtime_since(&td->iops_sample_time, t);
1372 if (spent < td->o.iops_avg_time)
1376 * Compute both read and write rates for the interval.
1378 for (ddir = DDIR_READ; ddir <= DDIR_WRITE; ddir++) {
1381 delta = td->this_io_blocks[ddir] - td->stat_io_blocks[ddir];
1383 continue; /* No entries for interval */
1385 iops = (delta * 1000) / spent;
1386 add_stat_sample(&ts->iops_stat[ddir], iops);
1389 add_log_sample(td, td->iops_log, iops, ddir, 0);
1391 td->stat_io_blocks[ddir] = td->this_io_blocks[ddir];
1394 fio_gettime(&td->iops_sample_time, NULL);