12 #include "lib/ieee754.h"
14 #include "lib/getrusage.h"
17 void update_rusage_stat(struct thread_data *td)
19 struct thread_stat *ts = &td->ts;
21 fio_getrusage(&td->ru_end);
22 ts->usr_time += mtime_since(&td->ru_start.ru_utime,
23 &td->ru_end.ru_utime);
24 ts->sys_time += mtime_since(&td->ru_start.ru_stime,
25 &td->ru_end.ru_stime);
26 ts->ctx += td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw
27 - (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
28 ts->minf += td->ru_end.ru_minflt - td->ru_start.ru_minflt;
29 ts->majf += td->ru_end.ru_majflt - td->ru_start.ru_majflt;
31 memcpy(&td->ru_start, &td->ru_end, sizeof(td->ru_end));
35 * Given a latency, return the index of the corresponding bucket in
36 * the structure tracking percentiles.
38 * (1) find the group (and error bits) that the value (latency)
39 * belongs to by looking at its MSB. (2) find the bucket number in the
40 * group by looking at the index bits.
43 static unsigned int plat_val_to_idx(unsigned int val)
45 unsigned int msb, error_bits, base, offset, idx;
47 /* Find MSB starting from bit 0 */
51 msb = (sizeof(val)*8) - __builtin_clz(val) - 1;
54 * MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
55 * all bits of the sample as index
57 if (msb <= FIO_IO_U_PLAT_BITS)
60 /* Compute the number of error bits to discard*/
61 error_bits = msb - FIO_IO_U_PLAT_BITS;
63 /* Compute the number of buckets before the group */
64 base = (error_bits + 1) << FIO_IO_U_PLAT_BITS;
67 * Discard the error bits and apply the mask to find the
68 * index for the buckets in the group
70 offset = (FIO_IO_U_PLAT_VAL - 1) & (val >> error_bits);
72 /* Make sure the index does not exceed (array size - 1) */
73 idx = (base + offset) < (FIO_IO_U_PLAT_NR - 1)?
74 (base + offset) : (FIO_IO_U_PLAT_NR - 1);
80 * Convert the given index of the bucket array to the value
81 * represented by the bucket
83 static unsigned int plat_idx_to_val(unsigned int idx)
85 unsigned int error_bits, k, base;
87 assert(idx < FIO_IO_U_PLAT_NR);
89 /* MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
90 * all bits of the sample as index */
91 if (idx < (FIO_IO_U_PLAT_VAL << 1) )
94 /* Find the group and compute the minimum value of that group */
95 error_bits = (idx >> FIO_IO_U_PLAT_BITS) -1;
96 base = 1 << (error_bits + FIO_IO_U_PLAT_BITS);
98 /* Find its bucket number of the group */
99 k = idx % FIO_IO_U_PLAT_VAL;
101 /* Return the mean of the range of the bucket */
102 return base + ((k + 0.5) * (1 << error_bits));
105 static int double_cmp(const void *a, const void *b)
107 const fio_fp64_t fa = *(const fio_fp64_t *) a;
108 const fio_fp64_t fb = *(const fio_fp64_t *) b;
113 else if (fa.u.f < fb.u.f)
119 static unsigned int calc_clat_percentiles(unsigned int *io_u_plat,
120 unsigned long nr, fio_fp64_t *plist,
121 unsigned int **output,
125 unsigned long sum = 0;
126 unsigned int len, i, j = 0;
127 unsigned int oval_len = 0;
128 unsigned int *ovals = NULL;
135 while (len < FIO_IO_U_LIST_MAX_LEN && plist[len].u.f != 0.0)
142 * Sort the percentile list. Note that it may already be sorted if
143 * we are using the default values, but since it's a short list this
144 * isn't a worry. Also note that this does not work for NaN values.
147 qsort((void*)plist, len, sizeof(plist[0]), double_cmp);
150 * Calculate bucket values, note down max and min values
153 for (i = 0; i < FIO_IO_U_PLAT_NR && !is_last; i++) {
155 while (sum >= (plist[j].u.f / 100.0 * nr)) {
156 assert(plist[j].u.f <= 100.0);
160 ovals = realloc(ovals, oval_len * sizeof(unsigned int));
163 ovals[j] = plat_idx_to_val(i);
164 if (ovals[j] < *minv)
166 if (ovals[j] > *maxv)
169 is_last = (j == len - 1);
182 * Find and display the p-th percentile of clat
184 static void show_clat_percentiles(unsigned int *io_u_plat, unsigned long nr,
185 fio_fp64_t *plist, unsigned int precision)
187 unsigned int len, j = 0, minv, maxv;
189 int is_last, per_line, scale_down;
192 len = calc_clat_percentiles(io_u_plat, nr, plist, &ovals, &maxv, &minv);
197 * We default to usecs, but if the value range is such that we
198 * should scale down to msecs, do that.
200 if (minv > 2000 && maxv > 99999) {
202 log_info(" clat percentiles (msec):\n |");
205 log_info(" clat percentiles (usec):\n |");
208 snprintf(fmt, sizeof(fmt), "%%1.%uf", precision);
209 per_line = (80 - 7) / (precision + 14);
211 for (j = 0; j < len; j++) {
212 char fbuf[16], *ptr = fbuf;
215 if (j != 0 && (j % per_line) == 0)
218 /* end of the list */
219 is_last = (j == len - 1);
221 if (plist[j].u.f < 10.0)
222 ptr += sprintf(fbuf, " ");
224 snprintf(ptr, sizeof(fbuf), fmt, plist[j].u.f);
227 ovals[j] = (ovals[j] + 999) / 1000;
229 log_info(" %sth=[%5u]%c", fbuf, ovals[j], is_last ? '\n' : ',');
234 if ((j % per_line) == per_line - 1) /* for formatting */
243 static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
244 double *mean, double *dev)
246 double n = is->samples;
248 if (is->samples == 0)
254 n = (double) is->samples;
255 *mean = is->mean.u.f;
258 *dev = sqrt(is->S.u.f / (n - 1.0));
265 void show_group_stats(struct group_run_stats *rs)
267 char *p1, *p2, *p3, *p4;
268 const char *ddir_str[] = { " READ", " WRITE" , " TRIM"};
271 log_info("\nRun status group %d (all jobs):\n", rs->groupid);
273 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
274 const int i2p = is_power_of_2(rs->kb_base);
279 p1 = num2str(rs->io_kb[i], 6, rs->kb_base, i2p, 8);
280 p2 = num2str(rs->agg[i], 6, rs->kb_base, i2p, rs->unit_base);
281 p3 = num2str(rs->min_bw[i], 6, rs->kb_base, i2p, rs->unit_base);
282 p4 = num2str(rs->max_bw[i], 6, rs->kb_base, i2p, rs->unit_base);
284 log_info("%s: io=%s, aggrb=%s/s, minb=%s/s, maxb=%s/s,"
285 " mint=%llumsec, maxt=%llumsec\n",
286 rs->unified_rw_rep ? " MIXED" : ddir_str[i],
287 p1, p2, p3, p4, rs->min_run[i], rs->max_run[i]);
296 #define ts_total_io_u(ts) \
297 ((ts)->total_io_u[DDIR_READ] + (ts)->total_io_u[DDIR_WRITE] +\
298 (ts)->total_io_u[DDIR_TRIM])
300 static void stat_calc_dist(unsigned int *map, unsigned long total,
306 * Do depth distribution calculations
308 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
310 io_u_dist[i] = (double) map[i] / (double) total;
311 io_u_dist[i] *= 100.0;
312 if (io_u_dist[i] < 0.1 && map[i])
319 static void stat_calc_lat(struct thread_stat *ts, double *dst,
320 unsigned int *src, int nr)
322 unsigned long total = ts_total_io_u(ts);
326 * Do latency distribution calculations
328 for (i = 0; i < nr; i++) {
330 dst[i] = (double) src[i] / (double) total;
332 if (dst[i] < 0.01 && src[i])
339 static void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat)
341 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_u, FIO_IO_U_LAT_U_NR);
344 static void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat)
346 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_m, FIO_IO_U_LAT_M_NR);
349 static int usec_to_msec(unsigned long *min, unsigned long *max, double *mean,
352 if (*min > 1000 && *max > 1000 && *mean > 1000.0 && *dev > 1000.0) {
363 static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
366 const char *ddir_str[] = { "read ", "write", "trim" };
367 unsigned long min, max, runt;
368 unsigned long long bw, iops;
370 char *io_p, *bw_p, *iops_p;
373 assert(ddir_rw(ddir));
375 if (!ts->runtime[ddir])
378 i2p = is_power_of_2(rs->kb_base);
379 runt = ts->runtime[ddir];
381 bw = (1000 * ts->io_bytes[ddir]) / runt;
382 io_p = num2str(ts->io_bytes[ddir], 6, 1, i2p, 8);
383 bw_p = num2str(bw, 6, 1, i2p, ts->unit_base);
385 iops = (1000 * (uint64_t)ts->total_io_u[ddir]) / runt;
386 iops_p = num2str(iops, 6, 1, 0, 0);
388 log_info(" %s: io=%s, bw=%s/s, iops=%s, runt=%6llumsec\n",
389 rs->unified_rw_rep ? "mixed" : ddir_str[ddir],
390 io_p, bw_p, iops_p, ts->runtime[ddir]);
396 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev)) {
397 const char *base = "(usec)";
400 if (!usec_to_msec(&min, &max, &mean, &dev))
403 minp = num2str(min, 6, 1, 0, 0);
404 maxp = num2str(max, 6, 1, 0, 0);
406 log_info(" slat %s: min=%s, max=%s, avg=%5.02f,"
407 " stdev=%5.02f\n", base, minp, maxp, mean, dev);
412 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev)) {
413 const char *base = "(usec)";
416 if (!usec_to_msec(&min, &max, &mean, &dev))
419 minp = num2str(min, 6, 1, 0, 0);
420 maxp = num2str(max, 6, 1, 0, 0);
422 log_info(" clat %s: min=%s, max=%s, avg=%5.02f,"
423 " stdev=%5.02f\n", base, minp, maxp, mean, dev);
428 if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev)) {
429 const char *base = "(usec)";
432 if (!usec_to_msec(&min, &max, &mean, &dev))
435 minp = num2str(min, 6, 1, 0, 0);
436 maxp = num2str(max, 6, 1, 0, 0);
438 log_info(" lat %s: min=%s, max=%s, avg=%5.02f,"
439 " stdev=%5.02f\n", base, minp, maxp, mean, dev);
444 if (ts->clat_percentiles) {
445 show_clat_percentiles(ts->io_u_plat[ddir],
446 ts->clat_stat[ddir].samples,
448 ts->percentile_precision);
450 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
451 double p_of_agg = 100.0, fkb_base = (double)rs->kb_base;
452 const char *bw_str = (rs->unit_base == 1 ? "Kbit" : "KB");
454 if (rs->unit_base == 1) {
462 p_of_agg = mean * 100 / (double) rs->agg[ddir];
463 if (p_of_agg > 100.0)
467 if (mean > fkb_base * fkb_base) {
472 bw_str = (rs->unit_base == 1 ? "Mbit" : "MB");
475 log_info(" bw (%-4s/s): min=%5lu, max=%5lu, per=%3.2f%%,"
476 " avg=%5.02f, stdev=%5.02f\n", bw_str, min, max,
477 p_of_agg, mean, dev);
481 static int show_lat(double *io_u_lat, int nr, const char **ranges,
484 int new_line = 1, i, line = 0, shown = 0;
486 for (i = 0; i < nr; i++) {
487 if (io_u_lat[i] <= 0.0)
493 log_info(" lat (%s) : ", msg);
499 log_info("%s%3.2f%%", ranges[i], io_u_lat[i]);
511 static void show_lat_u(double *io_u_lat_u)
513 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
514 "250=", "500=", "750=", "1000=", };
516 show_lat(io_u_lat_u, FIO_IO_U_LAT_U_NR, ranges, "usec");
519 static void show_lat_m(double *io_u_lat_m)
521 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
522 "250=", "500=", "750=", "1000=", "2000=",
525 show_lat(io_u_lat_m, FIO_IO_U_LAT_M_NR, ranges, "msec");
528 static void show_latencies(double *io_u_lat_u, double *io_u_lat_m)
530 show_lat_u(io_u_lat_u);
531 show_lat_m(io_u_lat_m);
534 void show_thread_status(struct thread_stat *ts, struct group_run_stats *rs)
536 double usr_cpu, sys_cpu;
537 unsigned long runtime;
538 double io_u_dist[FIO_IO_U_MAP_NR];
539 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
540 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
544 if (!(ts->io_bytes[DDIR_READ] + ts->io_bytes[DDIR_WRITE] +
545 ts->io_bytes[DDIR_TRIM]) && !(ts->total_io_u[DDIR_READ] +
546 ts->total_io_u[DDIR_WRITE] + ts->total_io_u[DDIR_TRIM]))
550 os_ctime_r((const time_t *) &time_p, time_buf, sizeof(time_buf));
553 log_info("%s: (groupid=%d, jobs=%d): err=%2d: pid=%d: %s",
554 ts->name, ts->groupid, ts->members,
555 ts->error, (int) ts->pid, time_buf);
557 log_info("%s: (groupid=%d, jobs=%d): err=%2d (%s): pid=%d: %s",
558 ts->name, ts->groupid, ts->members,
559 ts->error, ts->verror, (int) ts->pid,
563 if (strlen(ts->description))
564 log_info(" Description : [%s]\n", ts->description);
566 if (ts->io_bytes[DDIR_READ])
567 show_ddir_status(rs, ts, DDIR_READ);
568 if (ts->io_bytes[DDIR_WRITE])
569 show_ddir_status(rs, ts, DDIR_WRITE);
570 if (ts->io_bytes[DDIR_TRIM])
571 show_ddir_status(rs, ts, DDIR_TRIM);
573 stat_calc_lat_u(ts, io_u_lat_u);
574 stat_calc_lat_m(ts, io_u_lat_m);
575 show_latencies(io_u_lat_u, io_u_lat_m);
577 runtime = ts->total_run_time;
579 double runt = (double) runtime;
581 usr_cpu = (double) ts->usr_time * 100 / runt;
582 sys_cpu = (double) ts->sys_time * 100 / runt;
588 log_info(" cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu, majf=%lu,"
589 " minf=%lu\n", usr_cpu, sys_cpu, ts->ctx, ts->majf, ts->minf);
591 stat_calc_dist(ts->io_u_map, ts_total_io_u(ts), io_u_dist);
592 log_info(" IO depths : 1=%3.1f%%, 2=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%,"
593 " 16=%3.1f%%, 32=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
594 io_u_dist[1], io_u_dist[2],
595 io_u_dist[3], io_u_dist[4],
596 io_u_dist[5], io_u_dist[6]);
598 stat_calc_dist(ts->io_u_submit, ts->total_submit, io_u_dist);
599 log_info(" submit : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
600 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
601 io_u_dist[1], io_u_dist[2],
602 io_u_dist[3], io_u_dist[4],
603 io_u_dist[5], io_u_dist[6]);
604 stat_calc_dist(ts->io_u_complete, ts->total_complete, io_u_dist);
605 log_info(" complete : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
606 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
607 io_u_dist[1], io_u_dist[2],
608 io_u_dist[3], io_u_dist[4],
609 io_u_dist[5], io_u_dist[6]);
610 log_info(" issued : total=r=%lu/w=%lu/d=%lu,"
611 " short=r=%lu/w=%lu/d=%lu\n",
612 ts->total_io_u[0], ts->total_io_u[1],
614 ts->short_io_u[0], ts->short_io_u[1],
616 if (ts->continue_on_error) {
617 log_info(" errors : total=%lu, first_error=%d/<%s>\n",
620 strerror(ts->first_error));
624 static void show_ddir_status_terse(struct thread_stat *ts,
625 struct group_run_stats *rs, int ddir)
627 unsigned long min, max;
628 unsigned long long bw, iops;
629 unsigned int *ovals = NULL;
631 unsigned int len, minv, maxv;
634 assert(ddir_rw(ddir));
637 if (ts->runtime[ddir]) {
638 uint64_t runt = ts->runtime[ddir];
640 bw = ((1000 * ts->io_bytes[ddir]) / runt) / 1024;
641 iops = (1000 * (uint64_t) ts->total_io_u[ddir]) / runt;
644 log_info(";%llu;%llu;%llu;%llu", ts->io_bytes[ddir] >> 10, bw, iops,
647 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
648 log_info(";%lu;%lu;%f;%f", min, max, mean, dev);
650 log_info(";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
652 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
653 log_info(";%lu;%lu;%f;%f", min, max, mean, dev);
655 log_info(";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
657 if (ts->clat_percentiles) {
658 len = calc_clat_percentiles(ts->io_u_plat[ddir],
659 ts->clat_stat[ddir].samples,
660 ts->percentile_list, &ovals, &maxv,
665 for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
670 log_info(";%f%%=%u", ts->percentile_list[i].u.f, ovals[i]);
673 if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev))
674 log_info(";%lu;%lu;%f;%f", min, max, mean, dev);
676 log_info(";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
681 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
682 double p_of_agg = 100.0;
685 p_of_agg = mean * 100 / (double) rs->agg[ddir];
686 if (p_of_agg > 100.0)
690 log_info(";%lu;%lu;%f%%;%f;%f", min, max, p_of_agg, mean, dev);
692 log_info(";%lu;%lu;%f%%;%f;%f", 0UL, 0UL, 0.0, 0.0, 0.0);
695 static void add_ddir_status_json(struct thread_stat *ts,
696 struct group_run_stats *rs, int ddir, struct json_object *parent)
698 unsigned long min, max;
699 unsigned long long bw, iops;
700 unsigned int *ovals = NULL;
702 unsigned int len, minv, maxv;
704 const char *ddirname[] = {"read", "write", "trim"};
705 struct json_object *dir_object, *tmp_object, *percentile_object;
707 double p_of_agg = 100.0;
709 assert(ddir_rw(ddir));
711 if (ts->unified_rw_rep && ddir != DDIR_READ)
714 dir_object = json_create_object();
715 json_object_add_value_object(parent,
716 ts->unified_rw_rep ? "mixed" : ddirname[ddir], dir_object);
719 if (ts->runtime[ddir]) {
720 uint64_t runt = ts->runtime[ddir];
722 bw = ((1000 * ts->io_bytes[ddir]) / runt) / 1024;
723 iops = (1000 * (uint64_t) ts->total_io_u[ddir]) / runt;
726 json_object_add_value_int(dir_object, "io_bytes", ts->io_bytes[ddir] >> 10);
727 json_object_add_value_int(dir_object, "bw", bw);
728 json_object_add_value_int(dir_object, "iops", iops);
729 json_object_add_value_int(dir_object, "runtime", ts->runtime[ddir]);
731 if (!calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev)) {
735 tmp_object = json_create_object();
736 json_object_add_value_object(dir_object, "slat", tmp_object);
737 json_object_add_value_int(tmp_object, "min", min);
738 json_object_add_value_int(tmp_object, "max", max);
739 json_object_add_value_float(tmp_object, "mean", mean);
740 json_object_add_value_float(tmp_object, "stddev", dev);
742 if (!calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev)) {
746 tmp_object = json_create_object();
747 json_object_add_value_object(dir_object, "clat", tmp_object);
748 json_object_add_value_int(tmp_object, "min", min);
749 json_object_add_value_int(tmp_object, "max", max);
750 json_object_add_value_float(tmp_object, "mean", mean);
751 json_object_add_value_float(tmp_object, "stddev", dev);
753 if (ts->clat_percentiles) {
754 len = calc_clat_percentiles(ts->io_u_plat[ddir],
755 ts->clat_stat[ddir].samples,
756 ts->percentile_list, &ovals, &maxv,
761 percentile_object = json_create_object();
762 json_object_add_value_object(tmp_object, "percentile", percentile_object);
763 for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
765 json_object_add_value_int(percentile_object, "0.00", 0);
768 snprintf(buf, sizeof(buf), "%f", ts->percentile_list[i].u.f);
769 json_object_add_value_int(percentile_object, (const char *)buf, ovals[i]);
772 if (!calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev)) {
776 tmp_object = json_create_object();
777 json_object_add_value_object(dir_object, "lat", tmp_object);
778 json_object_add_value_int(tmp_object, "min", min);
779 json_object_add_value_int(tmp_object, "max", max);
780 json_object_add_value_float(tmp_object, "mean", mean);
781 json_object_add_value_float(tmp_object, "stddev", dev);
785 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
787 p_of_agg = mean * 100 / (double) rs->agg[ddir];
788 if (p_of_agg > 100.0)
793 p_of_agg = mean = dev = 0.0;
795 json_object_add_value_int(dir_object, "bw_min", min);
796 json_object_add_value_int(dir_object, "bw_max", max);
797 json_object_add_value_float(dir_object, "bw_agg", mean);
798 json_object_add_value_float(dir_object, "bw_mean", mean);
799 json_object_add_value_float(dir_object, "bw_dev", dev);
802 static void show_thread_status_terse_v2(struct thread_stat *ts,
803 struct group_run_stats *rs)
805 double io_u_dist[FIO_IO_U_MAP_NR];
806 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
807 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
808 double usr_cpu, sys_cpu;
812 log_info("2;%s;%d;%d", ts->name, ts->groupid, ts->error);
813 /* Log Read Status */
814 show_ddir_status_terse(ts, rs, DDIR_READ);
815 /* Log Write Status */
816 show_ddir_status_terse(ts, rs, DDIR_WRITE);
817 /* Log Trim Status */
818 show_ddir_status_terse(ts, rs, DDIR_TRIM);
821 if (ts->total_run_time) {
822 double runt = (double) ts->total_run_time;
824 usr_cpu = (double) ts->usr_time * 100 / runt;
825 sys_cpu = (double) ts->sys_time * 100 / runt;
831 log_info(";%f%%;%f%%;%lu;%lu;%lu", usr_cpu, sys_cpu, ts->ctx, ts->majf,
834 /* Calc % distribution of IO depths, usecond, msecond latency */
835 stat_calc_dist(ts->io_u_map, ts_total_io_u(ts), io_u_dist);
836 stat_calc_lat_u(ts, io_u_lat_u);
837 stat_calc_lat_m(ts, io_u_lat_m);
839 /* Only show fixed 7 I/O depth levels*/
840 log_info(";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%",
841 io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3],
842 io_u_dist[4], io_u_dist[5], io_u_dist[6]);
844 /* Microsecond latency */
845 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
846 log_info(";%3.2f%%", io_u_lat_u[i]);
847 /* Millisecond latency */
848 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
849 log_info(";%3.2f%%", io_u_lat_m[i]);
850 /* Additional output if continue_on_error set - default off*/
851 if (ts->continue_on_error)
852 log_info(";%lu;%d", ts->total_err_count, ts->first_error);
855 /* Additional output if description is set */
857 log_info(";%s", ts->description);
862 static void show_thread_status_terse_v3_v4(struct thread_stat *ts,
863 struct group_run_stats *rs, int ver)
865 double io_u_dist[FIO_IO_U_MAP_NR];
866 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
867 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
868 double usr_cpu, sys_cpu;
872 log_info("%d;%s;%s;%d;%d", ver, fio_version_string,
873 ts->name, ts->groupid, ts->error);
874 /* Log Read Status */
875 show_ddir_status_terse(ts, rs, DDIR_READ);
876 /* Log Write Status */
877 show_ddir_status_terse(ts, rs, DDIR_WRITE);
878 /* Log Trim Status */
880 show_ddir_status_terse(ts, rs, DDIR_TRIM);
883 if (ts->total_run_time) {
884 double runt = (double) ts->total_run_time;
886 usr_cpu = (double) ts->usr_time * 100 / runt;
887 sys_cpu = (double) ts->sys_time * 100 / runt;
893 log_info(";%f%%;%f%%;%lu;%lu;%lu", usr_cpu, sys_cpu, ts->ctx, ts->majf,
896 /* Calc % distribution of IO depths, usecond, msecond latency */
897 stat_calc_dist(ts->io_u_map, ts_total_io_u(ts), io_u_dist);
898 stat_calc_lat_u(ts, io_u_lat_u);
899 stat_calc_lat_m(ts, io_u_lat_m);
901 /* Only show fixed 7 I/O depth levels*/
902 log_info(";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%",
903 io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3],
904 io_u_dist[4], io_u_dist[5], io_u_dist[6]);
906 /* Microsecond latency */
907 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
908 log_info(";%3.2f%%", io_u_lat_u[i]);
909 /* Millisecond latency */
910 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
911 log_info(";%3.2f%%", io_u_lat_m[i]);
913 /* disk util stats, if any */
914 show_disk_util(1, NULL);
916 /* Additional output if continue_on_error set - default off*/
917 if (ts->continue_on_error)
918 log_info(";%lu;%d", ts->total_err_count, ts->first_error);
920 /* Additional output if description is set */
921 if (strlen(ts->description))
922 log_info(";%s", ts->description);
927 static struct json_object *show_thread_status_json(struct thread_stat *ts,
928 struct group_run_stats *rs)
930 struct json_object *root, *tmp;
931 double io_u_dist[FIO_IO_U_MAP_NR];
932 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
933 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
934 double usr_cpu, sys_cpu;
937 root = json_create_object();
938 json_object_add_value_string(root, "jobname", ts->name);
939 json_object_add_value_int(root, "groupid", ts->groupid);
940 json_object_add_value_int(root, "error", ts->error);
942 add_ddir_status_json(ts, rs, DDIR_READ, root);
943 add_ddir_status_json(ts, rs, DDIR_WRITE, root);
944 add_ddir_status_json(ts, rs, DDIR_TRIM, root);
947 if (ts->total_run_time) {
948 double runt = (double) ts->total_run_time;
950 usr_cpu = (double) ts->usr_time * 100 / runt;
951 sys_cpu = (double) ts->sys_time * 100 / runt;
956 json_object_add_value_float(root, "usr_cpu", usr_cpu);
957 json_object_add_value_float(root, "sys_cpu", sys_cpu);
958 json_object_add_value_int(root, "ctx", ts->ctx);
959 json_object_add_value_int(root, "majf", ts->majf);
960 json_object_add_value_int(root, "minf", ts->minf);
963 /* Calc % distribution of IO depths, usecond, msecond latency */
964 stat_calc_dist(ts->io_u_map, ts_total_io_u(ts), io_u_dist);
965 stat_calc_lat_u(ts, io_u_lat_u);
966 stat_calc_lat_m(ts, io_u_lat_m);
968 tmp = json_create_object();
969 json_object_add_value_object(root, "iodepth_level", tmp);
970 /* Only show fixed 7 I/O depth levels*/
971 for (i = 0; i < 7; i++) {
974 snprintf(name, 20, "%d", 1 << i);
976 snprintf(name, 20, ">=%d", 1 << i);
977 json_object_add_value_float(tmp, (const char *)name, io_u_dist[i]);
980 tmp = json_create_object();
981 json_object_add_value_object(root, "latency_us", tmp);
982 /* Microsecond latency */
983 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++) {
984 const char *ranges[] = { "2", "4", "10", "20", "50", "100",
985 "250", "500", "750", "1000", };
986 json_object_add_value_float(tmp, ranges[i], io_u_lat_u[i]);
988 /* Millisecond latency */
989 tmp = json_create_object();
990 json_object_add_value_object(root, "latency_ms", tmp);
991 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++) {
992 const char *ranges[] = { "2", "4", "10", "20", "50", "100",
993 "250", "500", "750", "1000", "2000",
995 json_object_add_value_float(tmp, ranges[i], io_u_lat_m[i]);
998 /* Additional output if continue_on_error set - default off*/
999 if (ts->continue_on_error) {
1000 json_object_add_value_int(root, "total_err", ts->total_err_count);
1001 json_object_add_value_int(root, "total_err", ts->first_error);
1004 /* Additional output if description is set */
1005 if (strlen(ts->description))
1006 json_object_add_value_string(root, "desc", ts->description);
1011 static void show_thread_status_terse(struct thread_stat *ts,
1012 struct group_run_stats *rs)
1014 if (terse_version == 2)
1015 show_thread_status_terse_v2(ts, rs);
1016 else if (terse_version == 3 || terse_version == 4)
1017 show_thread_status_terse_v3_v4(ts, rs, terse_version);
1019 log_err("fio: bad terse version!? %d\n", terse_version);
1022 static void sum_stat(struct io_stat *dst, struct io_stat *src, int nr)
1026 if (src->samples == 0)
1029 dst->min_val = min(dst->min_val, src->min_val);
1030 dst->max_val = max(dst->max_val, src->max_val);
1033 * Compute new mean and S after the merge
1034 * <http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
1035 * #Parallel_algorithm>
1038 mean = src->mean.u.f;
1041 double delta = src->mean.u.f - dst->mean.u.f;
1043 mean = ((src->mean.u.f * src->samples) +
1044 (dst->mean.u.f * dst->samples)) /
1045 (dst->samples + src->samples);
1047 S = src->S.u.f + dst->S.u.f + pow(delta, 2.0) *
1048 (dst->samples * src->samples) /
1049 (dst->samples + src->samples);
1052 dst->samples += src->samples;
1053 dst->mean.u.f = mean;
1057 void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src)
1061 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1062 if (dst->max_run[i] < src->max_run[i])
1063 dst->max_run[i] = src->max_run[i];
1064 if (dst->min_run[i] && dst->min_run[i] > src->min_run[i])
1065 dst->min_run[i] = src->min_run[i];
1066 if (dst->max_bw[i] < src->max_bw[i])
1067 dst->max_bw[i] = src->max_bw[i];
1068 if (dst->min_bw[i] && dst->min_bw[i] > src->min_bw[i])
1069 dst->min_bw[i] = src->min_bw[i];
1071 dst->io_kb[i] += src->io_kb[i];
1072 dst->agg[i] += src->agg[i];
1077 void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src, int nr)
1081 for (l = 0; l < DDIR_RWDIR_CNT; l++) {
1082 if (!dst->unified_rw_rep) {
1083 sum_stat(&dst->clat_stat[l], &src->clat_stat[l], nr);
1084 sum_stat(&dst->slat_stat[l], &src->slat_stat[l], nr);
1085 sum_stat(&dst->lat_stat[l], &src->lat_stat[l], nr);
1086 sum_stat(&dst->bw_stat[l], &src->bw_stat[l], nr);
1088 dst->io_bytes[l] += src->io_bytes[l];
1090 if (dst->runtime[l] < src->runtime[l])
1091 dst->runtime[l] = src->runtime[l];
1093 sum_stat(&dst->clat_stat[0], &src->clat_stat[l], nr);
1094 sum_stat(&dst->slat_stat[0], &src->slat_stat[l], nr);
1095 sum_stat(&dst->lat_stat[0], &src->lat_stat[l], nr);
1096 sum_stat(&dst->bw_stat[0], &src->bw_stat[l], nr);
1098 dst->io_bytes[0] += src->io_bytes[l];
1100 if (dst->runtime[0] < src->runtime[l])
1101 dst->runtime[0] = src->runtime[l];
1105 dst->usr_time += src->usr_time;
1106 dst->sys_time += src->sys_time;
1107 dst->ctx += src->ctx;
1108 dst->majf += src->majf;
1109 dst->minf += src->minf;
1111 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
1112 dst->io_u_map[k] += src->io_u_map[k];
1113 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
1114 dst->io_u_submit[k] += src->io_u_submit[k];
1115 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
1116 dst->io_u_complete[k] += src->io_u_complete[k];
1117 for (k = 0; k < FIO_IO_U_LAT_U_NR; k++)
1118 dst->io_u_lat_u[k] += src->io_u_lat_u[k];
1119 for (k = 0; k < FIO_IO_U_LAT_M_NR; k++)
1120 dst->io_u_lat_m[k] += src->io_u_lat_m[k];
1122 for (k = 0; k < DDIR_RWDIR_CNT; k++) {
1123 if (!dst->unified_rw_rep) {
1124 dst->total_io_u[k] += src->total_io_u[k];
1125 dst->short_io_u[k] += src->short_io_u[k];
1127 dst->total_io_u[0] += src->total_io_u[k];
1128 dst->short_io_u[0] += src->short_io_u[k];
1132 for (k = 0; k < DDIR_RWDIR_CNT; k++) {
1135 for (m = 0; m < FIO_IO_U_PLAT_NR; m++) {
1136 if (!dst->unified_rw_rep)
1137 dst->io_u_plat[k][m] += src->io_u_plat[k][m];
1139 dst->io_u_plat[0][m] += src->io_u_plat[k][m];
1143 dst->total_run_time += src->total_run_time;
1144 dst->total_submit += src->total_submit;
1145 dst->total_complete += src->total_complete;
1148 void init_group_run_stat(struct group_run_stats *gs)
1151 memset(gs, 0, sizeof(*gs));
1153 for (i = 0; i < DDIR_RWDIR_CNT; i++)
1154 gs->min_bw[i] = gs->min_run[i] = ~0UL;
1157 void init_thread_stat(struct thread_stat *ts)
1161 memset(ts, 0, sizeof(*ts));
1163 for (j = 0; j < DDIR_RWDIR_CNT; j++) {
1164 ts->lat_stat[j].min_val = -1UL;
1165 ts->clat_stat[j].min_val = -1UL;
1166 ts->slat_stat[j].min_val = -1UL;
1167 ts->bw_stat[j].min_val = -1UL;
1172 void show_run_stats(void)
1174 struct group_run_stats *runstats, *rs;
1175 struct thread_data *td;
1176 struct thread_stat *threadstats, *ts;
1177 int i, j, nr_ts, last_ts, idx;
1178 int kb_base_warned = 0;
1179 int unit_base_warned = 0;
1180 struct json_object *root = NULL;
1181 struct json_array *array = NULL;
1183 runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
1185 for (i = 0; i < groupid + 1; i++)
1186 init_group_run_stat(&runstats[i]);
1189 * find out how many threads stats we need. if group reporting isn't
1190 * enabled, it's one-per-td.
1194 for_each_td(td, i) {
1195 if (!td->o.group_reporting) {
1199 if (last_ts == td->groupid)
1202 last_ts = td->groupid;
1206 threadstats = malloc(nr_ts * sizeof(struct thread_stat));
1208 for (i = 0; i < nr_ts; i++)
1209 init_thread_stat(&threadstats[i]);
1214 for_each_td(td, i) {
1215 if (idx && (!td->o.group_reporting ||
1216 (td->o.group_reporting && last_ts != td->groupid))) {
1221 last_ts = td->groupid;
1223 ts = &threadstats[j];
1225 ts->clat_percentiles = td->o.clat_percentiles;
1226 ts->percentile_precision = td->o.percentile_precision;
1227 memcpy(ts->percentile_list, td->o.percentile_list, sizeof(td->o.percentile_list));
1232 if (ts->groupid == -1) {
1234 * These are per-group shared already
1236 strncpy(ts->name, td->o.name, FIO_JOBNAME_SIZE);
1237 if (td->o.description)
1238 strncpy(ts->description, td->o.description,
1241 memset(ts->description, 0, FIO_JOBNAME_SIZE);
1243 ts->groupid = td->groupid;
1246 * first pid in group, not very useful...
1250 ts->kb_base = td->o.kb_base;
1251 ts->unit_base = td->o.unit_base;
1252 ts->unified_rw_rep = td->o.unified_rw_rep;
1253 } else if (ts->kb_base != td->o.kb_base && !kb_base_warned) {
1254 log_info("fio: kb_base differs for jobs in group, using"
1255 " %u as the base\n", ts->kb_base);
1257 } else if (ts->unit_base != td->o.unit_base && !unit_base_warned) {
1258 log_info("fio: unit_base differs for jobs in group, using"
1259 " %u as the base\n", ts->unit_base);
1260 unit_base_warned = 1;
1263 ts->continue_on_error = td->o.continue_on_error;
1264 ts->total_err_count += td->total_err_count;
1265 ts->first_error = td->first_error;
1267 if (!td->error && td->o.continue_on_error &&
1269 ts->error = td->first_error;
1270 strcpy(ts->verror, td->verror);
1271 } else if (td->error) {
1272 ts->error = td->error;
1273 strcpy(ts->verror, td->verror);
1277 sum_thread_stats(ts, &td->ts, idx);
1280 for (i = 0; i < nr_ts; i++) {
1281 unsigned long long bw;
1283 ts = &threadstats[i];
1284 rs = &runstats[ts->groupid];
1285 rs->kb_base = ts->kb_base;
1286 rs->unit_base = ts->unit_base;
1287 rs->unified_rw_rep += ts->unified_rw_rep;
1289 for (j = 0; j < DDIR_RWDIR_CNT; j++) {
1290 if (!ts->runtime[j])
1292 if (ts->runtime[j] < rs->min_run[j] || !rs->min_run[j])
1293 rs->min_run[j] = ts->runtime[j];
1294 if (ts->runtime[j] > rs->max_run[j])
1295 rs->max_run[j] = ts->runtime[j];
1298 if (ts->runtime[j]) {
1299 unsigned long runt = ts->runtime[j];
1300 unsigned long long kb;
1302 kb = ts->io_bytes[j] / rs->kb_base;
1303 bw = kb * 1000 / runt;
1305 if (bw < rs->min_bw[j])
1307 if (bw > rs->max_bw[j])
1310 rs->io_kb[j] += ts->io_bytes[j] / rs->kb_base;
1314 for (i = 0; i < groupid + 1; i++) {
1319 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
1320 if (rs->max_run[ddir])
1321 rs->agg[ddir] = (rs->io_kb[ddir] * 1000) /
1327 * don't overwrite last signal output
1329 if (output_format == FIO_OUTPUT_NORMAL)
1331 else if (output_format == FIO_OUTPUT_JSON) {
1332 root = json_create_object();
1333 json_object_add_value_string(root, "fio version", fio_version_string);
1334 array = json_create_array();
1335 json_object_add_value_array(root, "jobs", array);
1338 for (i = 0; i < nr_ts; i++) {
1339 ts = &threadstats[i];
1340 rs = &runstats[ts->groupid];
1343 fio_server_send_ts(ts, rs);
1344 else if (output_format == FIO_OUTPUT_TERSE)
1345 show_thread_status_terse(ts, rs);
1346 else if (output_format == FIO_OUTPUT_JSON) {
1347 struct json_object *tmp = show_thread_status_json(ts, rs);
1348 json_array_add_value_object(array, tmp);
1350 show_thread_status(ts, rs);
1352 if (output_format == FIO_OUTPUT_JSON) {
1353 /* disk util stats, if any */
1354 show_disk_util(1, root);
1356 show_idle_prof_stats(FIO_OUTPUT_JSON, root);
1358 json_print_object(root);
1360 json_free_object(root);
1363 for (i = 0; i < groupid + 1; i++) {
1368 fio_server_send_gs(rs);
1369 else if (output_format == FIO_OUTPUT_NORMAL)
1370 show_group_stats(rs);
1374 fio_server_send_du();
1375 else if (output_format == FIO_OUTPUT_NORMAL) {
1376 show_disk_util(0, NULL);
1377 show_idle_prof_stats(FIO_OUTPUT_NORMAL, NULL);
1384 static void *__show_running_run_stats(void *arg)
1386 struct thread_data *td;
1387 unsigned long long *rt;
1391 rt = malloc(thread_number * sizeof(unsigned long long));
1392 fio_gettime(&tv, NULL);
1394 for_each_td(td, i) {
1395 rt[i] = mtime_since(&td->start, &tv);
1396 if (td_read(td) && td->io_bytes[DDIR_READ])
1397 td->ts.runtime[DDIR_READ] += rt[i];
1398 if (td_write(td) && td->io_bytes[DDIR_WRITE])
1399 td->ts.runtime[DDIR_WRITE] += rt[i];
1400 if (td_trim(td) && td->io_bytes[DDIR_TRIM])
1401 td->ts.runtime[DDIR_TRIM] += rt[i];
1403 td->update_rusage = 1;
1404 td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
1405 td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
1406 td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
1407 td->ts.total_run_time = mtime_since(&td->epoch, &tv);
1410 for_each_td(td, i) {
1411 if (td->rusage_sem) {
1412 td->update_rusage = 1;
1413 fio_mutex_down(td->rusage_sem);
1415 td->update_rusage = 0;
1420 for_each_td(td, i) {
1421 if (td_read(td) && td->io_bytes[DDIR_READ])
1422 td->ts.runtime[DDIR_READ] -= rt[i];
1423 if (td_write(td) && td->io_bytes[DDIR_WRITE])
1424 td->ts.runtime[DDIR_WRITE] -= rt[i];
1425 if (td_trim(td) && td->io_bytes[DDIR_TRIM])
1426 td->ts.runtime[DDIR_TRIM] -= rt[i];
1434 * Called from signal handler. It _should_ be safe to just run this inline
1435 * in the sig handler, but we should be disturbing the system less by just
1436 * creating a thread to do it.
1438 void show_running_run_stats(void)
1442 pthread_create(&thread, NULL, __show_running_run_stats, NULL);
1443 pthread_detach(thread);
1446 static inline void add_stat_sample(struct io_stat *is, unsigned long data)
1451 if (data > is->max_val)
1453 if (data < is->min_val)
1456 delta = val - is->mean.u.f;
1458 is->mean.u.f += delta / (is->samples + 1.0);
1459 is->S.u.f += delta * (val - is->mean.u.f);
1465 static void __add_log_sample(struct io_log *iolog, unsigned long val,
1466 enum fio_ddir ddir, unsigned int bs,
1469 const int nr_samples = iolog->nr_samples;
1471 if (!iolog->nr_samples)
1472 iolog->avg_last = t;
1474 if (iolog->nr_samples == iolog->max_samples) {
1475 int new_size = sizeof(struct io_sample) * iolog->max_samples*2;
1477 iolog->log = realloc(iolog->log, new_size);
1478 iolog->max_samples <<= 1;
1481 iolog->log[nr_samples].val = val;
1482 iolog->log[nr_samples].time = t;
1483 iolog->log[nr_samples].ddir = ddir;
1484 iolog->log[nr_samples].bs = bs;
1485 iolog->nr_samples++;
1488 static inline void reset_io_stat(struct io_stat *ios)
1490 ios->max_val = ios->min_val = ios->samples = 0;
1491 ios->mean.u.f = ios->S.u.f = 0;
1494 static void add_log_sample(struct thread_data *td, struct io_log *iolog,
1495 unsigned long val, enum fio_ddir ddir,
1498 unsigned long elapsed, this_window;
1503 elapsed = mtime_since_now(&td->epoch);
1506 * If no time averaging, just add the log sample.
1508 if (!iolog->avg_msec) {
1509 __add_log_sample(iolog, val, ddir, bs, elapsed);
1514 * Add the sample. If the time period has passed, then
1515 * add that entry to the log and clear.
1517 add_stat_sample(&iolog->avg_window[ddir], val);
1520 * If period hasn't passed, adding the above sample is all we
1523 this_window = elapsed - iolog->avg_last;
1524 if (this_window < iolog->avg_msec)
1528 * Note an entry in the log. Use the mean from the logged samples,
1529 * making sure to properly round up. Only write a log entry if we
1530 * had actual samples done.
1532 if (iolog->avg_window[DDIR_READ].samples) {
1535 mr = iolog->avg_window[DDIR_READ].mean.u.f + 0.50;
1536 __add_log_sample(iolog, mr, DDIR_READ, 0, elapsed);
1538 if (iolog->avg_window[DDIR_WRITE].samples) {
1541 mw = iolog->avg_window[DDIR_WRITE].mean.u.f + 0.50;
1542 __add_log_sample(iolog, mw, DDIR_WRITE, 0, elapsed);
1544 if (iolog->avg_window[DDIR_TRIM].samples) {
1547 mw = iolog->avg_window[DDIR_TRIM].mean.u.f + 0.50;
1548 __add_log_sample(iolog, mw, DDIR_TRIM, 0, elapsed);
1552 reset_io_stat(&iolog->avg_window[DDIR_READ]);
1553 reset_io_stat(&iolog->avg_window[DDIR_WRITE]);
1554 reset_io_stat(&iolog->avg_window[DDIR_TRIM]);
1555 iolog->avg_last = elapsed;
1558 void add_agg_sample(unsigned long val, enum fio_ddir ddir, unsigned int bs)
1560 struct io_log *iolog;
1565 iolog = agg_io_log[ddir];
1566 __add_log_sample(iolog, val, ddir, bs, mtime_since_genesis());
1569 static void add_clat_percentile_sample(struct thread_stat *ts,
1570 unsigned long usec, enum fio_ddir ddir)
1572 unsigned int idx = plat_val_to_idx(usec);
1573 assert(idx < FIO_IO_U_PLAT_NR);
1575 ts->io_u_plat[ddir][idx]++;
1578 void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
1579 unsigned long usec, unsigned int bs)
1581 struct thread_stat *ts = &td->ts;
1586 add_stat_sample(&ts->clat_stat[ddir], usec);
1589 add_log_sample(td, td->clat_log, usec, ddir, bs);
1591 if (ts->clat_percentiles)
1592 add_clat_percentile_sample(ts, usec, ddir);
1595 void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
1596 unsigned long usec, unsigned int bs)
1598 struct thread_stat *ts = &td->ts;
1603 add_stat_sample(&ts->slat_stat[ddir], usec);
1606 add_log_sample(td, td->slat_log, usec, ddir, bs);
1609 void add_lat_sample(struct thread_data *td, enum fio_ddir ddir,
1610 unsigned long usec, unsigned int bs)
1612 struct thread_stat *ts = &td->ts;
1617 add_stat_sample(&ts->lat_stat[ddir], usec);
1620 add_log_sample(td, td->lat_log, usec, ddir, bs);
1623 void add_bw_sample(struct thread_data *td, enum fio_ddir ddir, unsigned int bs,
1626 struct thread_stat *ts = &td->ts;
1627 unsigned long spent, rate;
1632 spent = mtime_since(&td->bw_sample_time, t);
1633 if (spent < td->o.bw_avg_time)
1637 * Compute both read and write rates for the interval.
1639 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
1642 delta = td->this_io_bytes[ddir] - td->stat_io_bytes[ddir];
1644 continue; /* No entries for interval */
1646 rate = delta * 1000 / spent / 1024;
1647 add_stat_sample(&ts->bw_stat[ddir], rate);
1650 add_log_sample(td, td->bw_log, rate, ddir, bs);
1652 td->stat_io_bytes[ddir] = td->this_io_bytes[ddir];
1655 fio_gettime(&td->bw_sample_time, NULL);
1658 void add_iops_sample(struct thread_data *td, enum fio_ddir ddir,
1661 struct thread_stat *ts = &td->ts;
1662 unsigned long spent, iops;
1667 spent = mtime_since(&td->iops_sample_time, t);
1668 if (spent < td->o.iops_avg_time)
1672 * Compute both read and write rates for the interval.
1674 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
1677 delta = td->this_io_blocks[ddir] - td->stat_io_blocks[ddir];
1679 continue; /* No entries for interval */
1681 iops = (delta * 1000) / spent;
1682 add_stat_sample(&ts->iops_stat[ddir], iops);
1685 add_log_sample(td, td->iops_log, iops, ddir, 0);
1687 td->stat_io_blocks[ddir] = td->this_io_blocks[ddir];
1690 fio_gettime(&td->iops_sample_time, NULL);