12 #include "lib/ieee754.h"
14 #include "lib/getrusage.h"
17 #include "lib/output_buffer.h"
18 #include "helper_thread.h"
21 struct fio_mutex *stat_mutex;
23 void clear_rusage_stat(struct thread_data *td)
25 struct thread_stat *ts = &td->ts;
27 fio_getrusage(&td->ru_start);
28 ts->usr_time = ts->sys_time = 0;
30 ts->minf = ts->majf = 0;
33 void update_rusage_stat(struct thread_data *td)
35 struct thread_stat *ts = &td->ts;
37 fio_getrusage(&td->ru_end);
38 ts->usr_time += mtime_since(&td->ru_start.ru_utime,
39 &td->ru_end.ru_utime);
40 ts->sys_time += mtime_since(&td->ru_start.ru_stime,
41 &td->ru_end.ru_stime);
42 ts->ctx += td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw
43 - (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
44 ts->minf += td->ru_end.ru_minflt - td->ru_start.ru_minflt;
45 ts->majf += td->ru_end.ru_majflt - td->ru_start.ru_majflt;
47 memcpy(&td->ru_start, &td->ru_end, sizeof(td->ru_end));
51 * Given a latency, return the index of the corresponding bucket in
52 * the structure tracking percentiles.
54 * (1) find the group (and error bits) that the value (latency)
55 * belongs to by looking at its MSB. (2) find the bucket number in the
56 * group by looking at the index bits.
59 static unsigned int plat_val_to_idx(unsigned int val)
61 unsigned int msb, error_bits, base, offset, idx;
63 /* Find MSB starting from bit 0 */
67 msb = (sizeof(val)*8) - __builtin_clz(val) - 1;
70 * MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
71 * all bits of the sample as index
73 if (msb <= FIO_IO_U_PLAT_BITS)
76 /* Compute the number of error bits to discard*/
77 error_bits = msb - FIO_IO_U_PLAT_BITS;
79 /* Compute the number of buckets before the group */
80 base = (error_bits + 1) << FIO_IO_U_PLAT_BITS;
83 * Discard the error bits and apply the mask to find the
84 * index for the buckets in the group
86 offset = (FIO_IO_U_PLAT_VAL - 1) & (val >> error_bits);
88 /* Make sure the index does not exceed (array size - 1) */
89 idx = (base + offset) < (FIO_IO_U_PLAT_NR - 1) ?
90 (base + offset) : (FIO_IO_U_PLAT_NR - 1);
96 * Convert the given index of the bucket array to the value
97 * represented by the bucket
99 static unsigned int plat_idx_to_val(unsigned int idx)
101 unsigned int error_bits, k, base;
103 assert(idx < FIO_IO_U_PLAT_NR);
105 /* MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
106 * all bits of the sample as index */
107 if (idx < (FIO_IO_U_PLAT_VAL << 1))
110 /* Find the group and compute the minimum value of that group */
111 error_bits = (idx >> FIO_IO_U_PLAT_BITS) - 1;
112 base = 1 << (error_bits + FIO_IO_U_PLAT_BITS);
114 /* Find its bucket number of the group */
115 k = idx % FIO_IO_U_PLAT_VAL;
117 /* Return the mean of the range of the bucket */
118 return base + ((k + 0.5) * (1 << error_bits));
121 static int double_cmp(const void *a, const void *b)
123 const fio_fp64_t fa = *(const fio_fp64_t *) a;
124 const fio_fp64_t fb = *(const fio_fp64_t *) b;
129 else if (fa.u.f < fb.u.f)
135 unsigned int calc_clat_percentiles(unsigned int *io_u_plat, unsigned long nr,
136 fio_fp64_t *plist, unsigned int **output,
137 unsigned int *maxv, unsigned int *minv)
139 unsigned long sum = 0;
140 unsigned int len, i, j = 0;
141 unsigned int oval_len = 0;
142 unsigned int *ovals = NULL;
149 while (len < FIO_IO_U_LIST_MAX_LEN && plist[len].u.f != 0.0)
156 * Sort the percentile list. Note that it may already be sorted if
157 * we are using the default values, but since it's a short list this
158 * isn't a worry. Also note that this does not work for NaN values.
161 qsort((void *)plist, len, sizeof(plist[0]), double_cmp);
164 * Calculate bucket values, note down max and min values
167 for (i = 0; i < FIO_IO_U_PLAT_NR && !is_last; i++) {
169 while (sum >= (plist[j].u.f / 100.0 * nr)) {
170 assert(plist[j].u.f <= 100.0);
174 ovals = realloc(ovals, oval_len * sizeof(unsigned int));
177 ovals[j] = plat_idx_to_val(i);
178 if (ovals[j] < *minv)
180 if (ovals[j] > *maxv)
183 is_last = (j == len - 1);
196 * Find and display the p-th percentile of clat
198 static void show_clat_percentiles(unsigned int *io_u_plat, unsigned long nr,
199 fio_fp64_t *plist, unsigned int precision,
200 struct buf_output *out)
202 unsigned int len, j = 0, minv, maxv;
204 int is_last, per_line, scale_down;
207 len = calc_clat_percentiles(io_u_plat, nr, plist, &ovals, &maxv, &minv);
212 * We default to usecs, but if the value range is such that we
213 * should scale down to msecs, do that.
215 if (minv > 2000 && maxv > 99999) {
217 log_buf(out, " clat percentiles (msec):\n |");
220 log_buf(out, " clat percentiles (usec):\n |");
223 snprintf(fmt, sizeof(fmt), "%%1.%uf", precision);
224 per_line = (80 - 7) / (precision + 14);
226 for (j = 0; j < len; j++) {
227 char fbuf[16], *ptr = fbuf;
230 if (j != 0 && (j % per_line) == 0)
233 /* end of the list */
234 is_last = (j == len - 1);
236 if (plist[j].u.f < 10.0)
237 ptr += sprintf(fbuf, " ");
239 snprintf(ptr, sizeof(fbuf), fmt, plist[j].u.f);
242 ovals[j] = (ovals[j] + 999) / 1000;
244 log_buf(out, " %sth=[%5u]%c", fbuf, ovals[j], is_last ? '\n' : ',');
249 if ((j % per_line) == per_line - 1) /* for formatting */
258 int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
259 double *mean, double *dev)
261 double n = (double) is->samples;
268 *mean = is->mean.u.f;
271 *dev = sqrt(is->S.u.f / (n - 1.0));
278 void show_group_stats(struct group_run_stats *rs, struct buf_output *out)
280 char *p1, *p2, *p3, *p4;
281 const char *str[] = { " READ", " WRITE" , " TRIM"};
284 log_buf(out, "\nRun status group %d (all jobs):\n", rs->groupid);
286 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
287 const int i2p = is_power_of_2(rs->kb_base);
292 p1 = num2str(rs->io_kb[i], 6, rs->kb_base, i2p, 8);
293 p2 = num2str(rs->agg[i], 6, rs->kb_base, i2p, rs->unit_base);
294 p3 = num2str(rs->min_bw[i], 6, rs->kb_base, i2p, rs->unit_base);
295 p4 = num2str(rs->max_bw[i], 6, rs->kb_base, i2p, rs->unit_base);
297 log_buf(out, "%s: io=%s, aggrb=%s/s, minb=%s/s, maxb=%s/s,"
298 " mint=%llumsec, maxt=%llumsec\n",
299 rs->unified_rw_rep ? " MIXED" : str[i],
301 (unsigned long long) rs->min_run[i],
302 (unsigned long long) rs->max_run[i]);
311 void stat_calc_dist(unsigned int *map, unsigned long total, double *io_u_dist)
316 * Do depth distribution calculations
318 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
320 io_u_dist[i] = (double) map[i] / (double) total;
321 io_u_dist[i] *= 100.0;
322 if (io_u_dist[i] < 0.1 && map[i])
329 static void stat_calc_lat(struct thread_stat *ts, double *dst,
330 unsigned int *src, int nr)
332 unsigned long total = ddir_rw_sum(ts->total_io_u);
336 * Do latency distribution calculations
338 for (i = 0; i < nr; i++) {
340 dst[i] = (double) src[i] / (double) total;
342 if (dst[i] < 0.01 && src[i])
349 void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat)
351 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_u, FIO_IO_U_LAT_U_NR);
354 void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat)
356 stat_calc_lat(ts, io_u_lat, ts->io_u_lat_m, FIO_IO_U_LAT_M_NR);
359 static void display_lat(const char *name, unsigned long min, unsigned long max,
360 double mean, double dev, struct buf_output *out)
362 const char *base = "(usec)";
365 if (!usec_to_msec(&min, &max, &mean, &dev))
368 minp = num2str(min, 6, 1, 0, 0);
369 maxp = num2str(max, 6, 1, 0, 0);
371 log_buf(out, " %s %s: min=%s, max=%s, avg=%5.02f,"
372 " stdev=%5.02f\n", name, base, minp, maxp, mean, dev);
378 static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
379 int ddir, struct buf_output *out)
381 const char *str[] = { "read ", "write", "trim" };
382 unsigned long min, max, runt;
383 unsigned long long bw, iops;
385 char *io_p, *bw_p, *iops_p;
388 assert(ddir_rw(ddir));
390 if (!ts->runtime[ddir])
393 i2p = is_power_of_2(rs->kb_base);
394 runt = ts->runtime[ddir];
396 bw = (1000 * ts->io_bytes[ddir]) / runt;
397 io_p = num2str(ts->io_bytes[ddir], 6, 1, i2p, 8);
398 bw_p = num2str(bw, 6, 1, i2p, ts->unit_base);
400 iops = (1000 * (uint64_t)ts->total_io_u[ddir]) / runt;
401 iops_p = num2str(iops, 6, 1, 0, 0);
403 log_buf(out, " %s: io=%s, bw=%s/s, iops=%s, runt=%6llumsec\n",
404 rs->unified_rw_rep ? "mixed" : str[ddir],
406 (unsigned long long) ts->runtime[ddir]);
412 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
413 display_lat("slat", min, max, mean, dev, out);
414 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
415 display_lat("clat", min, max, mean, dev, out);
416 if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev))
417 display_lat(" lat", min, max, mean, dev, out);
419 if (ts->clat_percentiles) {
420 show_clat_percentiles(ts->io_u_plat[ddir],
421 ts->clat_stat[ddir].samples,
423 ts->percentile_precision, out);
425 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
426 double p_of_agg = 100.0, fkb_base = (double)rs->kb_base;
427 const char *bw_str = (rs->unit_base == 1 ? "Kbit" : "KB");
429 if (rs->unit_base == 1) {
437 p_of_agg = mean * 100 / (double) rs->agg[ddir];
438 if (p_of_agg > 100.0)
442 if (mean > fkb_base * fkb_base) {
447 bw_str = (rs->unit_base == 1 ? "Mbit" : "MB");
450 log_buf(out, " bw (%-4s/s): min=%5lu, max=%5lu, per=%3.2f%%,"
451 " avg=%5.02f, stdev=%5.02f\n", bw_str, min, max,
452 p_of_agg, mean, dev);
456 static int show_lat(double *io_u_lat, int nr, const char **ranges,
457 const char *msg, struct buf_output *out)
459 int new_line = 1, i, line = 0, shown = 0;
461 for (i = 0; i < nr; i++) {
462 if (io_u_lat[i] <= 0.0)
468 log_buf(out, " lat (%s) : ", msg);
474 log_buf(out, "%s%3.2f%%", ranges[i], io_u_lat[i]);
486 static void show_lat_u(double *io_u_lat_u, struct buf_output *out)
488 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
489 "250=", "500=", "750=", "1000=", };
491 show_lat(io_u_lat_u, FIO_IO_U_LAT_U_NR, ranges, "usec", out);
494 static void show_lat_m(double *io_u_lat_m, struct buf_output *out)
496 const char *ranges[] = { "2=", "4=", "10=", "20=", "50=", "100=",
497 "250=", "500=", "750=", "1000=", "2000=",
500 show_lat(io_u_lat_m, FIO_IO_U_LAT_M_NR, ranges, "msec", out);
503 static void show_latencies(struct thread_stat *ts, struct buf_output *out)
505 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
506 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
508 stat_calc_lat_u(ts, io_u_lat_u);
509 stat_calc_lat_m(ts, io_u_lat_m);
511 show_lat_u(io_u_lat_u, out);
512 show_lat_m(io_u_lat_m, out);
515 static int block_state_category(int block_state)
517 switch (block_state) {
518 case BLOCK_STATE_UNINIT:
520 case BLOCK_STATE_TRIMMED:
521 case BLOCK_STATE_WRITTEN:
523 case BLOCK_STATE_WRITE_FAILURE:
524 case BLOCK_STATE_TRIM_FAILURE:
527 /* Silence compile warning on some BSDs and have a return */
533 static int compare_block_infos(const void *bs1, const void *bs2)
535 uint32_t block1 = *(uint32_t *)bs1;
536 uint32_t block2 = *(uint32_t *)bs2;
537 int state1 = BLOCK_INFO_STATE(block1);
538 int state2 = BLOCK_INFO_STATE(block2);
539 int bscat1 = block_state_category(state1);
540 int bscat2 = block_state_category(state2);
541 int cycles1 = BLOCK_INFO_TRIMS(block1);
542 int cycles2 = BLOCK_INFO_TRIMS(block2);
549 if (cycles1 < cycles2)
551 if (cycles1 > cycles2)
559 assert(block1 == block2);
563 static int calc_block_percentiles(int nr_block_infos, uint32_t *block_infos,
564 fio_fp64_t *plist, unsigned int **percentiles,
570 qsort(block_infos, nr_block_infos, sizeof(uint32_t), compare_block_infos);
572 while (len < FIO_IO_U_LIST_MAX_LEN && plist[len].u.f != 0.0)
579 * Sort the percentile list. Note that it may already be sorted if
580 * we are using the default values, but since it's a short list this
581 * isn't a worry. Also note that this does not work for NaN values.
584 qsort((void *)plist, len, sizeof(plist[0]), double_cmp);
587 /* Start only after the uninit entries end */
589 nr_uninit < nr_block_infos
590 && BLOCK_INFO_STATE(block_infos[nr_uninit]) == BLOCK_STATE_UNINIT;
594 if (nr_uninit == nr_block_infos)
597 *percentiles = calloc(len, sizeof(**percentiles));
599 for (i = 0; i < len; i++) {
600 int idx = (plist[i].u.f * (nr_block_infos - nr_uninit) / 100)
602 (*percentiles)[i] = BLOCK_INFO_TRIMS(block_infos[idx]);
605 memset(types, 0, sizeof(*types) * BLOCK_STATE_COUNT);
606 for (i = 0; i < nr_block_infos; i++)
607 types[BLOCK_INFO_STATE(block_infos[i])]++;
612 static const char *block_state_names[] = {
613 [BLOCK_STATE_UNINIT] = "unwritten",
614 [BLOCK_STATE_TRIMMED] = "trimmed",
615 [BLOCK_STATE_WRITTEN] = "written",
616 [BLOCK_STATE_TRIM_FAILURE] = "trim failure",
617 [BLOCK_STATE_WRITE_FAILURE] = "write failure",
620 static void show_block_infos(int nr_block_infos, uint32_t *block_infos,
621 fio_fp64_t *plist, struct buf_output *out)
624 unsigned int *percentiles = NULL;
625 unsigned int block_state_counts[BLOCK_STATE_COUNT];
627 len = calc_block_percentiles(nr_block_infos, block_infos, plist,
628 &percentiles, block_state_counts);
630 log_buf(out, " block lifetime percentiles :\n |");
632 for (i = 0; i < len; i++) {
633 uint32_t block_info = percentiles[i];
634 #define LINE_LENGTH 75
635 char str[LINE_LENGTH];
636 int strln = snprintf(str, LINE_LENGTH, " %3.2fth=%u%c",
637 plist[i].u.f, block_info,
638 i == len - 1 ? '\n' : ',');
639 assert(strln < LINE_LENGTH);
640 if (pos + strln > LINE_LENGTH) {
642 log_buf(out, "\n |");
644 log_buf(out, "%s", str);
651 log_buf(out, " states :");
652 for (i = 0; i < BLOCK_STATE_COUNT; i++)
653 log_buf(out, " %s=%u%c",
654 block_state_names[i], block_state_counts[i],
655 i == BLOCK_STATE_COUNT - 1 ? '\n' : ',');
658 static void show_thread_status_normal(struct thread_stat *ts,
659 struct group_run_stats *rs,
660 struct buf_output *out)
662 double usr_cpu, sys_cpu;
663 unsigned long runtime;
664 double io_u_dist[FIO_IO_U_MAP_NR];
668 if (!ddir_rw_sum(ts->io_bytes) && !ddir_rw_sum(ts->total_io_u))
672 os_ctime_r((const time_t *) &time_p, time_buf, sizeof(time_buf));
675 log_buf(out, "%s: (groupid=%d, jobs=%d): err=%2d: pid=%d: %s",
676 ts->name, ts->groupid, ts->members,
677 ts->error, (int) ts->pid, time_buf);
679 log_buf(out, "%s: (groupid=%d, jobs=%d): err=%2d (%s): pid=%d: %s",
680 ts->name, ts->groupid, ts->members,
681 ts->error, ts->verror, (int) ts->pid,
685 if (strlen(ts->description))
686 log_buf(out, " Description : [%s]\n", ts->description);
688 if (ts->io_bytes[DDIR_READ])
689 show_ddir_status(rs, ts, DDIR_READ, out);
690 if (ts->io_bytes[DDIR_WRITE])
691 show_ddir_status(rs, ts, DDIR_WRITE, out);
692 if (ts->io_bytes[DDIR_TRIM])
693 show_ddir_status(rs, ts, DDIR_TRIM, out);
695 show_latencies(ts, out);
697 runtime = ts->total_run_time;
699 double runt = (double) runtime;
701 usr_cpu = (double) ts->usr_time * 100 / runt;
702 sys_cpu = (double) ts->sys_time * 100 / runt;
708 log_buf(out, " cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%llu,"
709 " majf=%llu, minf=%llu\n", usr_cpu, sys_cpu,
710 (unsigned long long) ts->ctx,
711 (unsigned long long) ts->majf,
712 (unsigned long long) ts->minf);
714 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
715 log_buf(out, " IO depths : 1=%3.1f%%, 2=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%,"
716 " 16=%3.1f%%, 32=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
717 io_u_dist[1], io_u_dist[2],
718 io_u_dist[3], io_u_dist[4],
719 io_u_dist[5], io_u_dist[6]);
721 stat_calc_dist(ts->io_u_submit, ts->total_submit, io_u_dist);
722 log_buf(out, " submit : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
723 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
724 io_u_dist[1], io_u_dist[2],
725 io_u_dist[3], io_u_dist[4],
726 io_u_dist[5], io_u_dist[6]);
727 stat_calc_dist(ts->io_u_complete, ts->total_complete, io_u_dist);
728 log_buf(out, " complete : 0=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%,"
729 " 32=%3.1f%%, 64=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0],
730 io_u_dist[1], io_u_dist[2],
731 io_u_dist[3], io_u_dist[4],
732 io_u_dist[5], io_u_dist[6]);
733 log_buf(out, " issued : total=r=%llu/w=%llu/d=%llu,"
734 " short=r=%llu/w=%llu/d=%llu,"
735 " drop=r=%llu/w=%llu/d=%llu\n",
736 (unsigned long long) ts->total_io_u[0],
737 (unsigned long long) ts->total_io_u[1],
738 (unsigned long long) ts->total_io_u[2],
739 (unsigned long long) ts->short_io_u[0],
740 (unsigned long long) ts->short_io_u[1],
741 (unsigned long long) ts->short_io_u[2],
742 (unsigned long long) ts->drop_io_u[0],
743 (unsigned long long) ts->drop_io_u[1],
744 (unsigned long long) ts->drop_io_u[2]);
745 if (ts->continue_on_error) {
746 log_buf(out, " errors : total=%llu, first_error=%d/<%s>\n",
747 (unsigned long long)ts->total_err_count,
749 strerror(ts->first_error));
751 if (ts->latency_depth) {
752 log_buf(out, " latency : target=%llu, window=%llu, percentile=%.2f%%, depth=%u\n",
753 (unsigned long long)ts->latency_target,
754 (unsigned long long)ts->latency_window,
755 ts->latency_percentile.u.f,
759 if (ts->nr_block_infos)
760 show_block_infos(ts->nr_block_infos, ts->block_infos,
761 ts->percentile_list, out);
764 static void show_ddir_status_terse(struct thread_stat *ts,
765 struct group_run_stats *rs, int ddir,
766 struct buf_output *out)
768 unsigned long min, max;
769 unsigned long long bw, iops;
770 unsigned int *ovals = NULL;
772 unsigned int len, minv, maxv;
775 assert(ddir_rw(ddir));
778 if (ts->runtime[ddir]) {
779 uint64_t runt = ts->runtime[ddir];
781 bw = ((1000 * ts->io_bytes[ddir]) / runt) / 1024;
782 iops = (1000 * (uint64_t) ts->total_io_u[ddir]) / runt;
785 log_buf(out, ";%llu;%llu;%llu;%llu",
786 (unsigned long long) ts->io_bytes[ddir] >> 10, bw, iops,
787 (unsigned long long) ts->runtime[ddir]);
789 if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
790 log_buf(out, ";%lu;%lu;%f;%f", min, max, mean, dev);
792 log_buf(out, ";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
794 if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
795 log_buf(out, ";%lu;%lu;%f;%f", min, max, mean, dev);
797 log_buf(out, ";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
799 if (ts->clat_percentiles) {
800 len = calc_clat_percentiles(ts->io_u_plat[ddir],
801 ts->clat_stat[ddir].samples,
802 ts->percentile_list, &ovals, &maxv,
807 for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
809 log_buf(out, ";0%%=0");
812 log_buf(out, ";%f%%=%u", ts->percentile_list[i].u.f, ovals[i]);
815 if (calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev))
816 log_buf(out, ";%lu;%lu;%f;%f", min, max, mean, dev);
818 log_buf(out, ";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
823 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
824 double p_of_agg = 100.0;
827 p_of_agg = mean * 100 / (double) rs->agg[ddir];
828 if (p_of_agg > 100.0)
832 log_buf(out, ";%lu;%lu;%f%%;%f;%f", min, max, p_of_agg, mean, dev);
834 log_buf(out, ";%lu;%lu;%f%%;%f;%f", 0UL, 0UL, 0.0, 0.0, 0.0);
837 static void add_ddir_status_json(struct thread_stat *ts,
838 struct group_run_stats *rs, int ddir, struct json_object *parent)
840 unsigned long min, max;
841 unsigned long long bw;
842 unsigned int *ovals = NULL;
843 double mean, dev, iops;
844 unsigned int len, minv, maxv;
846 const char *ddirname[] = {"read", "write", "trim"};
847 struct json_object *dir_object, *tmp_object, *percentile_object, *clat_bins_object;
849 double p_of_agg = 100.0;
851 assert(ddir_rw(ddir));
853 if (ts->unified_rw_rep && ddir != DDIR_READ)
856 dir_object = json_create_object();
857 json_object_add_value_object(parent,
858 ts->unified_rw_rep ? "mixed" : ddirname[ddir], dir_object);
862 if (ts->runtime[ddir]) {
863 uint64_t runt = ts->runtime[ddir];
865 bw = ((1000 * ts->io_bytes[ddir]) / runt) / 1024;
866 iops = (1000.0 * (uint64_t) ts->total_io_u[ddir]) / runt;
869 json_object_add_value_int(dir_object, "io_bytes", ts->io_bytes[ddir] >> 10);
870 json_object_add_value_int(dir_object, "bw", bw);
871 json_object_add_value_float(dir_object, "iops", iops);
872 json_object_add_value_int(dir_object, "runtime", ts->runtime[ddir]);
873 json_object_add_value_int(dir_object, "total_ios", ts->total_io_u[ddir]);
874 json_object_add_value_int(dir_object, "short_ios", ts->short_io_u[ddir]);
875 json_object_add_value_int(dir_object, "drop_ios", ts->drop_io_u[ddir]);
877 if (!calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev)) {
881 tmp_object = json_create_object();
882 json_object_add_value_object(dir_object, "slat", tmp_object);
883 json_object_add_value_int(tmp_object, "min", min);
884 json_object_add_value_int(tmp_object, "max", max);
885 json_object_add_value_float(tmp_object, "mean", mean);
886 json_object_add_value_float(tmp_object, "stddev", dev);
888 if (!calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev)) {
892 tmp_object = json_create_object();
893 json_object_add_value_object(dir_object, "clat", tmp_object);
894 json_object_add_value_int(tmp_object, "min", min);
895 json_object_add_value_int(tmp_object, "max", max);
896 json_object_add_value_float(tmp_object, "mean", mean);
897 json_object_add_value_float(tmp_object, "stddev", dev);
899 if (ts->clat_percentiles) {
900 len = calc_clat_percentiles(ts->io_u_plat[ddir],
901 ts->clat_stat[ddir].samples,
902 ts->percentile_list, &ovals, &maxv,
907 percentile_object = json_create_object();
908 json_object_add_value_object(tmp_object, "percentile", percentile_object);
909 for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
911 json_object_add_value_int(percentile_object, "0.00", 0);
914 snprintf(buf, sizeof(buf), "%f", ts->percentile_list[i].u.f);
915 json_object_add_value_int(percentile_object, (const char *)buf, ovals[i]);
918 if (output_format & FIO_OUTPUT_JSON_PLUS) {
919 clat_bins_object = json_create_object();
920 json_object_add_value_object(tmp_object, "bins", clat_bins_object);
921 for(i = 0; i < FIO_IO_U_PLAT_NR; i++) {
922 snprintf(buf, sizeof(buf), "%d", i);
923 json_object_add_value_int(clat_bins_object, (const char *)buf, ts->io_u_plat[ddir][i]);
925 json_object_add_value_int(clat_bins_object, "FIO_IO_U_PLAT_BITS", FIO_IO_U_PLAT_BITS);
926 json_object_add_value_int(clat_bins_object, "FIO_IO_U_PLAT_VAL", FIO_IO_U_PLAT_VAL);
927 json_object_add_value_int(clat_bins_object, "FIO_IO_U_PLAT_NR", FIO_IO_U_PLAT_NR);
930 if (!calc_lat(&ts->lat_stat[ddir], &min, &max, &mean, &dev)) {
934 tmp_object = json_create_object();
935 json_object_add_value_object(dir_object, "lat", tmp_object);
936 json_object_add_value_int(tmp_object, "min", min);
937 json_object_add_value_int(tmp_object, "max", max);
938 json_object_add_value_float(tmp_object, "mean", mean);
939 json_object_add_value_float(tmp_object, "stddev", dev);
943 if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
945 p_of_agg = mean * 100 / (double) rs->agg[ddir];
946 if (p_of_agg > 100.0)
951 p_of_agg = mean = dev = 0.0;
953 json_object_add_value_int(dir_object, "bw_min", min);
954 json_object_add_value_int(dir_object, "bw_max", max);
955 json_object_add_value_float(dir_object, "bw_agg", p_of_agg);
956 json_object_add_value_float(dir_object, "bw_mean", mean);
957 json_object_add_value_float(dir_object, "bw_dev", dev);
960 static void show_thread_status_terse_v2(struct thread_stat *ts,
961 struct group_run_stats *rs,
962 struct buf_output *out)
964 double io_u_dist[FIO_IO_U_MAP_NR];
965 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
966 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
967 double usr_cpu, sys_cpu;
971 log_buf(out, "2;%s;%d;%d", ts->name, ts->groupid, ts->error);
972 /* Log Read Status */
973 show_ddir_status_terse(ts, rs, DDIR_READ, out);
974 /* Log Write Status */
975 show_ddir_status_terse(ts, rs, DDIR_WRITE, out);
976 /* Log Trim Status */
977 show_ddir_status_terse(ts, rs, DDIR_TRIM, out);
980 if (ts->total_run_time) {
981 double runt = (double) ts->total_run_time;
983 usr_cpu = (double) ts->usr_time * 100 / runt;
984 sys_cpu = (double) ts->sys_time * 100 / runt;
990 log_buf(out, ";%f%%;%f%%;%llu;%llu;%llu", usr_cpu, sys_cpu,
991 (unsigned long long) ts->ctx,
992 (unsigned long long) ts->majf,
993 (unsigned long long) ts->minf);
995 /* Calc % distribution of IO depths, usecond, msecond latency */
996 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
997 stat_calc_lat_u(ts, io_u_lat_u);
998 stat_calc_lat_m(ts, io_u_lat_m);
1000 /* Only show fixed 7 I/O depth levels*/
1001 log_buf(out, ";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%",
1002 io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3],
1003 io_u_dist[4], io_u_dist[5], io_u_dist[6]);
1005 /* Microsecond latency */
1006 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
1007 log_buf(out, ";%3.2f%%", io_u_lat_u[i]);
1008 /* Millisecond latency */
1009 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
1010 log_buf(out, ";%3.2f%%", io_u_lat_m[i]);
1011 /* Additional output if continue_on_error set - default off*/
1012 if (ts->continue_on_error)
1013 log_buf(out, ";%llu;%d", (unsigned long long) ts->total_err_count, ts->first_error);
1016 /* Additional output if description is set */
1017 if (strlen(ts->description))
1018 log_buf(out, ";%s", ts->description);
1023 static void show_thread_status_terse_v3_v4(struct thread_stat *ts,
1024 struct group_run_stats *rs, int ver,
1025 struct buf_output *out)
1027 double io_u_dist[FIO_IO_U_MAP_NR];
1028 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
1029 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
1030 double usr_cpu, sys_cpu;
1034 log_buf(out, "%d;%s;%s;%d;%d", ver, fio_version_string,
1035 ts->name, ts->groupid, ts->error);
1036 /* Log Read Status */
1037 show_ddir_status_terse(ts, rs, DDIR_READ, out);
1038 /* Log Write Status */
1039 show_ddir_status_terse(ts, rs, DDIR_WRITE, out);
1040 /* Log Trim Status */
1042 show_ddir_status_terse(ts, rs, DDIR_TRIM, out);
1045 if (ts->total_run_time) {
1046 double runt = (double) ts->total_run_time;
1048 usr_cpu = (double) ts->usr_time * 100 / runt;
1049 sys_cpu = (double) ts->sys_time * 100 / runt;
1055 log_buf(out, ";%f%%;%f%%;%llu;%llu;%llu", usr_cpu, sys_cpu,
1056 (unsigned long long) ts->ctx,
1057 (unsigned long long) ts->majf,
1058 (unsigned long long) ts->minf);
1060 /* Calc % distribution of IO depths, usecond, msecond latency */
1061 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
1062 stat_calc_lat_u(ts, io_u_lat_u);
1063 stat_calc_lat_m(ts, io_u_lat_m);
1065 /* Only show fixed 7 I/O depth levels*/
1066 log_buf(out, ";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%",
1067 io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3],
1068 io_u_dist[4], io_u_dist[5], io_u_dist[6]);
1070 /* Microsecond latency */
1071 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++)
1072 log_buf(out, ";%3.2f%%", io_u_lat_u[i]);
1073 /* Millisecond latency */
1074 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++)
1075 log_buf(out, ";%3.2f%%", io_u_lat_m[i]);
1077 /* disk util stats, if any */
1078 show_disk_util(1, NULL, out);
1080 /* Additional output if continue_on_error set - default off*/
1081 if (ts->continue_on_error)
1082 log_buf(out, ";%llu;%d", (unsigned long long) ts->total_err_count, ts->first_error);
1084 /* Additional output if description is set */
1085 if (strlen(ts->description))
1086 log_buf(out, ";%s", ts->description);
1091 void json_add_job_opts(struct json_object *root, const char *name,
1092 struct flist_head *opt_list, bool num_jobs)
1094 struct json_object *dir_object;
1095 struct flist_head *entry;
1096 struct print_option *p;
1098 if (flist_empty(opt_list))
1101 dir_object = json_create_object();
1102 json_object_add_value_object(root, name, dir_object);
1104 flist_for_each(entry, opt_list) {
1105 const char *pos = "";
1107 p = flist_entry(entry, struct print_option, list);
1108 if (!num_jobs && !strcmp(p->name, "numjobs"))
1112 json_object_add_value_string(dir_object, p->name, pos);
1116 static struct json_object *show_thread_status_json(struct thread_stat *ts,
1117 struct group_run_stats *rs,
1118 struct flist_head *opt_list)
1120 struct json_object *root, *tmp;
1121 struct jobs_eta *je;
1122 double io_u_dist[FIO_IO_U_MAP_NR];
1123 double io_u_lat_u[FIO_IO_U_LAT_U_NR];
1124 double io_u_lat_m[FIO_IO_U_LAT_M_NR];
1125 double usr_cpu, sys_cpu;
1129 root = json_create_object();
1130 json_object_add_value_string(root, "jobname", ts->name);
1131 json_object_add_value_int(root, "groupid", ts->groupid);
1132 json_object_add_value_int(root, "error", ts->error);
1135 je = get_jobs_eta(true, &size);
1137 json_object_add_value_int(root, "eta", je->eta_sec);
1138 json_object_add_value_int(root, "elapsed", je->elapsed_sec);
1142 json_add_job_opts(root, "job options", opt_list, true);
1144 add_ddir_status_json(ts, rs, DDIR_READ, root);
1145 add_ddir_status_json(ts, rs, DDIR_WRITE, root);
1146 add_ddir_status_json(ts, rs, DDIR_TRIM, root);
1149 if (ts->total_run_time) {
1150 double runt = (double) ts->total_run_time;
1152 usr_cpu = (double) ts->usr_time * 100 / runt;
1153 sys_cpu = (double) ts->sys_time * 100 / runt;
1158 json_object_add_value_float(root, "usr_cpu", usr_cpu);
1159 json_object_add_value_float(root, "sys_cpu", sys_cpu);
1160 json_object_add_value_int(root, "ctx", ts->ctx);
1161 json_object_add_value_int(root, "majf", ts->majf);
1162 json_object_add_value_int(root, "minf", ts->minf);
1165 /* Calc % distribution of IO depths, usecond, msecond latency */
1166 stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
1167 stat_calc_lat_u(ts, io_u_lat_u);
1168 stat_calc_lat_m(ts, io_u_lat_m);
1170 tmp = json_create_object();
1171 json_object_add_value_object(root, "iodepth_level", tmp);
1172 /* Only show fixed 7 I/O depth levels*/
1173 for (i = 0; i < 7; i++) {
1176 snprintf(name, 20, "%d", 1 << i);
1178 snprintf(name, 20, ">=%d", 1 << i);
1179 json_object_add_value_float(tmp, (const char *)name, io_u_dist[i]);
1182 tmp = json_create_object();
1183 json_object_add_value_object(root, "latency_us", tmp);
1184 /* Microsecond latency */
1185 for (i = 0; i < FIO_IO_U_LAT_U_NR; i++) {
1186 const char *ranges[] = { "2", "4", "10", "20", "50", "100",
1187 "250", "500", "750", "1000", };
1188 json_object_add_value_float(tmp, ranges[i], io_u_lat_u[i]);
1190 /* Millisecond latency */
1191 tmp = json_create_object();
1192 json_object_add_value_object(root, "latency_ms", tmp);
1193 for (i = 0; i < FIO_IO_U_LAT_M_NR; i++) {
1194 const char *ranges[] = { "2", "4", "10", "20", "50", "100",
1195 "250", "500", "750", "1000", "2000",
1197 json_object_add_value_float(tmp, ranges[i], io_u_lat_m[i]);
1200 /* Additional output if continue_on_error set - default off*/
1201 if (ts->continue_on_error) {
1202 json_object_add_value_int(root, "total_err", ts->total_err_count);
1203 json_object_add_value_int(root, "first_error", ts->first_error);
1206 if (ts->latency_depth) {
1207 json_object_add_value_int(root, "latency_depth", ts->latency_depth);
1208 json_object_add_value_int(root, "latency_target", ts->latency_target);
1209 json_object_add_value_float(root, "latency_percentile", ts->latency_percentile.u.f);
1210 json_object_add_value_int(root, "latency_window", ts->latency_window);
1213 /* Additional output if description is set */
1214 if (strlen(ts->description))
1215 json_object_add_value_string(root, "desc", ts->description);
1217 if (ts->nr_block_infos) {
1218 /* Block error histogram and types */
1220 unsigned int *percentiles = NULL;
1221 unsigned int block_state_counts[BLOCK_STATE_COUNT];
1223 len = calc_block_percentiles(ts->nr_block_infos, ts->block_infos,
1224 ts->percentile_list,
1225 &percentiles, block_state_counts);
1228 struct json_object *block, *percentile_object, *states;
1230 block = json_create_object();
1231 json_object_add_value_object(root, "block", block);
1233 percentile_object = json_create_object();
1234 json_object_add_value_object(block, "percentiles",
1236 for (i = 0; i < len; i++) {
1238 snprintf(buf, sizeof(buf), "%f",
1239 ts->percentile_list[i].u.f);
1240 json_object_add_value_int(percentile_object,
1245 states = json_create_object();
1246 json_object_add_value_object(block, "states", states);
1247 for (state = 0; state < BLOCK_STATE_COUNT; state++) {
1248 json_object_add_value_int(states,
1249 block_state_names[state],
1250 block_state_counts[state]);
1259 static void show_thread_status_terse(struct thread_stat *ts,
1260 struct group_run_stats *rs,
1261 struct buf_output *out)
1263 if (terse_version == 2)
1264 show_thread_status_terse_v2(ts, rs, out);
1265 else if (terse_version == 3 || terse_version == 4)
1266 show_thread_status_terse_v3_v4(ts, rs, terse_version, out);
1268 log_err("fio: bad terse version!? %d\n", terse_version);
1271 struct json_object *show_thread_status(struct thread_stat *ts,
1272 struct group_run_stats *rs,
1273 struct flist_head *opt_list,
1274 struct buf_output *out)
1276 struct json_object *ret = NULL;
1278 if (output_format & FIO_OUTPUT_TERSE)
1279 show_thread_status_terse(ts, rs, out);
1280 if (output_format & FIO_OUTPUT_JSON)
1281 ret = show_thread_status_json(ts, rs, opt_list);
1282 if (output_format & FIO_OUTPUT_NORMAL)
1283 show_thread_status_normal(ts, rs, out);
1288 static void sum_stat(struct io_stat *dst, struct io_stat *src, bool first)
1292 if (src->samples == 0)
1295 dst->min_val = min(dst->min_val, src->min_val);
1296 dst->max_val = max(dst->max_val, src->max_val);
1299 * Compute new mean and S after the merge
1300 * <http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
1301 * #Parallel_algorithm>
1304 mean = src->mean.u.f;
1307 double delta = src->mean.u.f - dst->mean.u.f;
1309 mean = ((src->mean.u.f * src->samples) +
1310 (dst->mean.u.f * dst->samples)) /
1311 (dst->samples + src->samples);
1313 S = src->S.u.f + dst->S.u.f + pow(delta, 2.0) *
1314 (dst->samples * src->samples) /
1315 (dst->samples + src->samples);
1318 dst->samples += src->samples;
1319 dst->mean.u.f = mean;
1323 void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src)
1327 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1328 if (dst->max_run[i] < src->max_run[i])
1329 dst->max_run[i] = src->max_run[i];
1330 if (dst->min_run[i] && dst->min_run[i] > src->min_run[i])
1331 dst->min_run[i] = src->min_run[i];
1332 if (dst->max_bw[i] < src->max_bw[i])
1333 dst->max_bw[i] = src->max_bw[i];
1334 if (dst->min_bw[i] && dst->min_bw[i] > src->min_bw[i])
1335 dst->min_bw[i] = src->min_bw[i];
1337 dst->io_kb[i] += src->io_kb[i];
1338 dst->agg[i] += src->agg[i];
1342 dst->kb_base = src->kb_base;
1343 if (!dst->unit_base)
1344 dst->unit_base = src->unit_base;
1347 void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src,
1352 for (l = 0; l < DDIR_RWDIR_CNT; l++) {
1353 if (!dst->unified_rw_rep) {
1354 sum_stat(&dst->clat_stat[l], &src->clat_stat[l], first);
1355 sum_stat(&dst->slat_stat[l], &src->slat_stat[l], first);
1356 sum_stat(&dst->lat_stat[l], &src->lat_stat[l], first);
1357 sum_stat(&dst->bw_stat[l], &src->bw_stat[l], first);
1359 dst->io_bytes[l] += src->io_bytes[l];
1361 if (dst->runtime[l] < src->runtime[l])
1362 dst->runtime[l] = src->runtime[l];
1364 sum_stat(&dst->clat_stat[0], &src->clat_stat[l], first);
1365 sum_stat(&dst->slat_stat[0], &src->slat_stat[l], first);
1366 sum_stat(&dst->lat_stat[0], &src->lat_stat[l], first);
1367 sum_stat(&dst->bw_stat[0], &src->bw_stat[l], first);
1369 dst->io_bytes[0] += src->io_bytes[l];
1371 if (dst->runtime[0] < src->runtime[l])
1372 dst->runtime[0] = src->runtime[l];
1375 * We're summing to the same destination, so override
1376 * 'first' after the first iteration of the loop
1382 dst->usr_time += src->usr_time;
1383 dst->sys_time += src->sys_time;
1384 dst->ctx += src->ctx;
1385 dst->majf += src->majf;
1386 dst->minf += src->minf;
1388 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
1389 dst->io_u_map[k] += src->io_u_map[k];
1390 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
1391 dst->io_u_submit[k] += src->io_u_submit[k];
1392 for (k = 0; k < FIO_IO_U_MAP_NR; k++)
1393 dst->io_u_complete[k] += src->io_u_complete[k];
1394 for (k = 0; k < FIO_IO_U_LAT_U_NR; k++)
1395 dst->io_u_lat_u[k] += src->io_u_lat_u[k];
1396 for (k = 0; k < FIO_IO_U_LAT_M_NR; k++)
1397 dst->io_u_lat_m[k] += src->io_u_lat_m[k];
1399 for (k = 0; k < DDIR_RWDIR_CNT; k++) {
1400 if (!dst->unified_rw_rep) {
1401 dst->total_io_u[k] += src->total_io_u[k];
1402 dst->short_io_u[k] += src->short_io_u[k];
1403 dst->drop_io_u[k] += src->drop_io_u[k];
1405 dst->total_io_u[0] += src->total_io_u[k];
1406 dst->short_io_u[0] += src->short_io_u[k];
1407 dst->drop_io_u[0] += src->drop_io_u[k];
1411 for (k = 0; k < DDIR_RWDIR_CNT; k++) {
1414 for (m = 0; m < FIO_IO_U_PLAT_NR; m++) {
1415 if (!dst->unified_rw_rep)
1416 dst->io_u_plat[k][m] += src->io_u_plat[k][m];
1418 dst->io_u_plat[0][m] += src->io_u_plat[k][m];
1422 dst->total_run_time += src->total_run_time;
1423 dst->total_submit += src->total_submit;
1424 dst->total_complete += src->total_complete;
1427 void init_group_run_stat(struct group_run_stats *gs)
1430 memset(gs, 0, sizeof(*gs));
1432 for (i = 0; i < DDIR_RWDIR_CNT; i++)
1433 gs->min_bw[i] = gs->min_run[i] = ~0UL;
1436 void init_thread_stat(struct thread_stat *ts)
1440 memset(ts, 0, sizeof(*ts));
1442 for (j = 0; j < DDIR_RWDIR_CNT; j++) {
1443 ts->lat_stat[j].min_val = -1UL;
1444 ts->clat_stat[j].min_val = -1UL;
1445 ts->slat_stat[j].min_val = -1UL;
1446 ts->bw_stat[j].min_val = -1UL;
1451 void __show_run_stats(void)
1453 struct group_run_stats *runstats, *rs;
1454 struct thread_data *td;
1455 struct thread_stat *threadstats, *ts;
1456 int i, j, k, nr_ts, last_ts, idx;
1457 int kb_base_warned = 0;
1458 int unit_base_warned = 0;
1459 struct json_object *root = NULL;
1460 struct json_array *array = NULL;
1461 struct buf_output output[FIO_OUTPUT_NR];
1462 struct flist_head **opt_lists;
1464 runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
1466 for (i = 0; i < groupid + 1; i++)
1467 init_group_run_stat(&runstats[i]);
1470 * find out how many threads stats we need. if group reporting isn't
1471 * enabled, it's one-per-td.
1475 for_each_td(td, i) {
1476 if (!td->o.group_reporting) {
1480 if (last_ts == td->groupid)
1483 last_ts = td->groupid;
1487 threadstats = malloc(nr_ts * sizeof(struct thread_stat));
1488 opt_lists = malloc(nr_ts * sizeof(struct flist_head *));
1490 for (i = 0; i < nr_ts; i++) {
1491 init_thread_stat(&threadstats[i]);
1492 opt_lists[i] = NULL;
1498 for_each_td(td, i) {
1499 if (idx && (!td->o.group_reporting ||
1500 (td->o.group_reporting && last_ts != td->groupid))) {
1505 last_ts = td->groupid;
1507 ts = &threadstats[j];
1509 ts->clat_percentiles = td->o.clat_percentiles;
1510 ts->percentile_precision = td->o.percentile_precision;
1511 memcpy(ts->percentile_list, td->o.percentile_list, sizeof(td->o.percentile_list));
1512 opt_lists[j] = &td->opt_list;
1517 if (ts->groupid == -1) {
1519 * These are per-group shared already
1521 strncpy(ts->name, td->o.name, FIO_JOBNAME_SIZE - 1);
1522 if (td->o.description)
1523 strncpy(ts->description, td->o.description,
1524 FIO_JOBDESC_SIZE - 1);
1526 memset(ts->description, 0, FIO_JOBDESC_SIZE);
1529 * If multiple entries in this group, this is
1532 ts->thread_number = td->thread_number;
1533 ts->groupid = td->groupid;
1536 * first pid in group, not very useful...
1540 ts->kb_base = td->o.kb_base;
1541 ts->unit_base = td->o.unit_base;
1542 ts->unified_rw_rep = td->o.unified_rw_rep;
1543 } else if (ts->kb_base != td->o.kb_base && !kb_base_warned) {
1544 log_info("fio: kb_base differs for jobs in group, using"
1545 " %u as the base\n", ts->kb_base);
1547 } else if (ts->unit_base != td->o.unit_base && !unit_base_warned) {
1548 log_info("fio: unit_base differs for jobs in group, using"
1549 " %u as the base\n", ts->unit_base);
1550 unit_base_warned = 1;
1553 ts->continue_on_error = td->o.continue_on_error;
1554 ts->total_err_count += td->total_err_count;
1555 ts->first_error = td->first_error;
1557 if (!td->error && td->o.continue_on_error &&
1559 ts->error = td->first_error;
1560 ts->verror[sizeof(ts->verror) - 1] = '\0';
1561 strncpy(ts->verror, td->verror, sizeof(ts->verror) - 1);
1562 } else if (td->error) {
1563 ts->error = td->error;
1564 ts->verror[sizeof(ts->verror) - 1] = '\0';
1565 strncpy(ts->verror, td->verror, sizeof(ts->verror) - 1);
1569 ts->latency_depth = td->latency_qd;
1570 ts->latency_target = td->o.latency_target;
1571 ts->latency_percentile = td->o.latency_percentile;
1572 ts->latency_window = td->o.latency_window;
1574 ts->nr_block_infos = td->ts.nr_block_infos;
1575 for (k = 0; k < ts->nr_block_infos; k++)
1576 ts->block_infos[k] = td->ts.block_infos[k];
1578 sum_thread_stats(ts, &td->ts, idx == 1);
1581 for (i = 0; i < nr_ts; i++) {
1582 unsigned long long bw;
1584 ts = &threadstats[i];
1585 if (ts->groupid == -1)
1587 rs = &runstats[ts->groupid];
1588 rs->kb_base = ts->kb_base;
1589 rs->unit_base = ts->unit_base;
1590 rs->unified_rw_rep += ts->unified_rw_rep;
1592 for (j = 0; j < DDIR_RWDIR_CNT; j++) {
1593 if (!ts->runtime[j])
1595 if (ts->runtime[j] < rs->min_run[j] || !rs->min_run[j])
1596 rs->min_run[j] = ts->runtime[j];
1597 if (ts->runtime[j] > rs->max_run[j])
1598 rs->max_run[j] = ts->runtime[j];
1601 if (ts->runtime[j]) {
1602 unsigned long runt = ts->runtime[j];
1603 unsigned long long kb;
1605 kb = ts->io_bytes[j] / rs->kb_base;
1606 bw = kb * 1000 / runt;
1608 if (bw < rs->min_bw[j])
1610 if (bw > rs->max_bw[j])
1613 rs->io_kb[j] += ts->io_bytes[j] / rs->kb_base;
1617 for (i = 0; i < groupid + 1; i++) {
1622 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
1623 if (rs->max_run[ddir])
1624 rs->agg[ddir] = (rs->io_kb[ddir] * 1000) /
1629 for (i = 0; i < FIO_OUTPUT_NR; i++)
1630 buf_output_init(&output[i]);
1633 * don't overwrite last signal output
1635 if (output_format & FIO_OUTPUT_NORMAL)
1636 log_buf(&output[__FIO_OUTPUT_NORMAL], "\n");
1637 if (output_format & FIO_OUTPUT_JSON) {
1638 struct thread_data *global;
1641 unsigned long long ms_since_epoch;
1643 gettimeofday(&now, NULL);
1644 ms_since_epoch = (unsigned long long)(now.tv_sec) * 1000 +
1645 (unsigned long long)(now.tv_usec) / 1000;
1647 os_ctime_r((const time_t *) &now.tv_sec, time_buf,
1649 time_buf[strlen(time_buf) - 1] = '\0';
1651 root = json_create_object();
1652 json_object_add_value_string(root, "fio version", fio_version_string);
1653 json_object_add_value_int(root, "timestamp", now.tv_sec);
1654 json_object_add_value_int(root, "timestamp_ms", ms_since_epoch);
1655 json_object_add_value_string(root, "time", time_buf);
1656 global = get_global_options();
1657 json_add_job_opts(root, "global options", &global->opt_list, false);
1658 array = json_create_array();
1659 json_object_add_value_array(root, "jobs", array);
1663 fio_server_send_job_options(&get_global_options()->opt_list, -1U);
1665 for (i = 0; i < nr_ts; i++) {
1666 ts = &threadstats[i];
1667 rs = &runstats[ts->groupid];
1670 fio_server_send_job_options(opt_lists[i], i);
1671 fio_server_send_ts(ts, rs);
1673 if (output_format & FIO_OUTPUT_TERSE)
1674 show_thread_status_terse(ts, rs, &output[__FIO_OUTPUT_TERSE]);
1675 if (output_format & FIO_OUTPUT_JSON) {
1676 struct json_object *tmp = show_thread_status_json(ts, rs, opt_lists[i]);
1677 json_array_add_value_object(array, tmp);
1679 if (output_format & FIO_OUTPUT_NORMAL)
1680 show_thread_status_normal(ts, rs, &output[__FIO_OUTPUT_NORMAL]);
1683 if (!is_backend && (output_format & FIO_OUTPUT_JSON)) {
1684 /* disk util stats, if any */
1685 show_disk_util(1, root, &output[__FIO_OUTPUT_JSON]);
1687 show_idle_prof_stats(FIO_OUTPUT_JSON, root, &output[__FIO_OUTPUT_JSON]);
1689 json_print_object(root, &output[__FIO_OUTPUT_JSON]);
1690 log_buf(&output[__FIO_OUTPUT_JSON], "\n");
1691 json_free_object(root);
1694 for (i = 0; i < groupid + 1; i++) {
1699 fio_server_send_gs(rs);
1700 else if (output_format & FIO_OUTPUT_NORMAL)
1701 show_group_stats(rs, &output[__FIO_OUTPUT_NORMAL]);
1705 fio_server_send_du();
1706 else if (output_format & FIO_OUTPUT_NORMAL) {
1707 show_disk_util(0, NULL, &output[__FIO_OUTPUT_NORMAL]);
1708 show_idle_prof_stats(FIO_OUTPUT_NORMAL, NULL, &output[__FIO_OUTPUT_NORMAL]);
1711 for (i = 0; i < FIO_OUTPUT_NR; i++) {
1712 buf_output_flush(&output[i]);
1713 buf_output_free(&output[i]);
1722 void show_run_stats(void)
1724 fio_mutex_down(stat_mutex);
1726 fio_mutex_up(stat_mutex);
1729 void __show_running_run_stats(void)
1731 struct thread_data *td;
1732 unsigned long long *rt;
1736 fio_mutex_down(stat_mutex);
1738 rt = malloc(thread_number * sizeof(unsigned long long));
1739 fio_gettime(&tv, NULL);
1741 for_each_td(td, i) {
1742 td->update_rusage = 1;
1743 td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
1744 td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
1745 td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
1746 td->ts.total_run_time = mtime_since(&td->epoch, &tv);
1748 rt[i] = mtime_since(&td->start, &tv);
1749 if (td_read(td) && td->ts.io_bytes[DDIR_READ])
1750 td->ts.runtime[DDIR_READ] += rt[i];
1751 if (td_write(td) && td->ts.io_bytes[DDIR_WRITE])
1752 td->ts.runtime[DDIR_WRITE] += rt[i];
1753 if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM])
1754 td->ts.runtime[DDIR_TRIM] += rt[i];
1757 for_each_td(td, i) {
1758 if (td->runstate >= TD_EXITED)
1760 if (td->rusage_sem) {
1761 td->update_rusage = 1;
1762 fio_mutex_down(td->rusage_sem);
1764 td->update_rusage = 0;
1769 for_each_td(td, i) {
1770 if (td_read(td) && td->ts.io_bytes[DDIR_READ])
1771 td->ts.runtime[DDIR_READ] -= rt[i];
1772 if (td_write(td) && td->ts.io_bytes[DDIR_WRITE])
1773 td->ts.runtime[DDIR_WRITE] -= rt[i];
1774 if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM])
1775 td->ts.runtime[DDIR_TRIM] -= rt[i];
1779 fio_mutex_up(stat_mutex);
1782 static int status_interval_init;
1783 static struct timeval status_time;
1784 static int status_file_disabled;
1786 #define FIO_STATUS_FILE "fio-dump-status"
1788 static int check_status_file(void)
1791 const char *temp_dir;
1792 char fio_status_file_path[PATH_MAX];
1794 if (status_file_disabled)
1797 temp_dir = getenv("TMPDIR");
1798 if (temp_dir == NULL) {
1799 temp_dir = getenv("TEMP");
1800 if (temp_dir && strlen(temp_dir) >= PATH_MAX)
1803 if (temp_dir == NULL)
1806 snprintf(fio_status_file_path, sizeof(fio_status_file_path), "%s/%s", temp_dir, FIO_STATUS_FILE);
1808 if (stat(fio_status_file_path, &sb))
1811 if (unlink(fio_status_file_path) < 0) {
1812 log_err("fio: failed to unlink %s: %s\n", fio_status_file_path,
1814 log_err("fio: disabling status file updates\n");
1815 status_file_disabled = 1;
1821 void check_for_running_stats(void)
1823 if (status_interval) {
1824 if (!status_interval_init) {
1825 fio_gettime(&status_time, NULL);
1826 status_interval_init = 1;
1827 } else if (mtime_since_now(&status_time) >= status_interval) {
1828 show_running_run_stats();
1829 fio_gettime(&status_time, NULL);
1833 if (check_status_file()) {
1834 show_running_run_stats();
1839 static inline void add_stat_sample(struct io_stat *is, unsigned long data)
1844 if (data > is->max_val)
1846 if (data < is->min_val)
1849 delta = val - is->mean.u.f;
1851 is->mean.u.f += delta / (is->samples + 1.0);
1852 is->S.u.f += delta * (val - is->mean.u.f);
1859 * Return a struct io_logs, which is added to the tail of the log
1862 static struct io_logs *get_new_log(struct io_log *iolog)
1864 size_t new_size, new_samples;
1865 struct io_logs *cur_log;
1868 * Cap the size at MAX_LOG_ENTRIES, so we don't keep doubling
1871 if (!iolog->cur_log_max)
1872 new_samples = DEF_LOG_ENTRIES;
1874 new_samples = iolog->cur_log_max * 2;
1875 if (new_samples > MAX_LOG_ENTRIES)
1876 new_samples = MAX_LOG_ENTRIES;
1879 new_size = new_samples * log_entry_sz(iolog);
1881 cur_log = smalloc(sizeof(*cur_log));
1883 INIT_FLIST_HEAD(&cur_log->list);
1884 cur_log->log = malloc(new_size);
1886 cur_log->nr_samples = 0;
1887 cur_log->max_samples = new_samples;
1888 flist_add_tail(&cur_log->list, &iolog->io_logs);
1889 iolog->cur_log_max = new_samples;
1899 * Add and return a new log chunk, or return current log if big enough
1901 static struct io_logs *regrow_log(struct io_log *iolog)
1903 struct io_logs *cur_log;
1906 if (!iolog || iolog->disabled)
1909 cur_log = iolog_cur_log(iolog);
1911 cur_log = get_new_log(iolog);
1916 if (cur_log->nr_samples < cur_log->max_samples)
1920 * No room for a new sample. If we're compressing on the fly, flush
1921 * out the current chunk
1923 if (iolog->log_gz) {
1924 if (iolog_cur_flush(iolog, cur_log)) {
1925 log_err("fio: failed flushing iolog! Will stop logging.\n");
1931 * Get a new log array, and add to our list
1933 cur_log = get_new_log(iolog);
1935 log_err("fio: failed extending iolog! Will stop logging.\n");
1939 if (!iolog->pending || !iolog->pending->nr_samples)
1943 * Flush pending items to new log
1945 for (i = 0; i < iolog->pending->nr_samples; i++) {
1946 struct io_sample *src, *dst;
1948 src = get_sample(iolog, iolog->pending, i);
1949 dst = get_sample(iolog, cur_log, i);
1950 memcpy(dst, src, log_entry_sz(iolog));
1953 iolog->pending->nr_samples = 0;
1957 iolog->disabled = true;
1961 void regrow_logs(struct thread_data *td)
1963 regrow_log(td->slat_log);
1964 regrow_log(td->clat_log);
1965 regrow_log(td->lat_log);
1966 regrow_log(td->bw_log);
1967 regrow_log(td->iops_log);
1968 td->flags &= ~TD_F_REGROW_LOGS;
1971 static struct io_logs *get_cur_log(struct io_log *iolog)
1973 struct io_logs *cur_log;
1975 cur_log = iolog_cur_log(iolog);
1977 cur_log = get_new_log(iolog);
1982 if (cur_log->nr_samples < cur_log->max_samples)
1986 * Out of space. If we're in IO offload mode, or we're not doing
1987 * per unit logging (hence logging happens outside of the IO thread
1988 * as well), add a new log chunk inline. If we're doing inline
1989 * submissions, flag 'td' as needing a log regrow and we'll take
1990 * care of it on the submission side.
1992 if (iolog->td->o.io_submit_mode == IO_MODE_OFFLOAD ||
1993 !per_unit_log(iolog))
1994 return regrow_log(iolog);
1996 iolog->td->flags |= TD_F_REGROW_LOGS;
1997 assert(iolog->pending->nr_samples < iolog->pending->max_samples);
1998 return iolog->pending;
2001 static void __add_log_sample(struct io_log *iolog, unsigned long val,
2002 enum fio_ddir ddir, unsigned int bs,
2003 unsigned long t, uint64_t offset)
2005 struct io_logs *cur_log;
2007 if (iolog->disabled)
2009 if (flist_empty(&iolog->io_logs))
2010 iolog->avg_last = t;
2012 cur_log = get_cur_log(iolog);
2014 struct io_sample *s;
2016 s = get_sample(iolog, cur_log, cur_log->nr_samples);
2020 io_sample_set_ddir(iolog, s, ddir);
2023 if (iolog->log_offset) {
2024 struct io_sample_offset *so = (void *) s;
2026 so->offset = offset;
2029 cur_log->nr_samples++;
2033 iolog->disabled = true;
2036 static inline void reset_io_stat(struct io_stat *ios)
2038 ios->max_val = ios->min_val = ios->samples = 0;
2039 ios->mean.u.f = ios->S.u.f = 0;
2042 void reset_io_stats(struct thread_data *td)
2044 struct thread_stat *ts = &td->ts;
2047 for (i = 0; i < DDIR_RWDIR_CNT; i++) {
2048 reset_io_stat(&ts->clat_stat[i]);
2049 reset_io_stat(&ts->slat_stat[i]);
2050 reset_io_stat(&ts->lat_stat[i]);
2051 reset_io_stat(&ts->bw_stat[i]);
2052 reset_io_stat(&ts->iops_stat[i]);
2054 ts->io_bytes[i] = 0;
2057 for (j = 0; j < FIO_IO_U_PLAT_NR; j++)
2058 ts->io_u_plat[i][j] = 0;
2061 for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
2062 ts->io_u_map[i] = 0;
2063 ts->io_u_submit[i] = 0;
2064 ts->io_u_complete[i] = 0;
2065 ts->io_u_lat_u[i] = 0;
2066 ts->io_u_lat_m[i] = 0;
2067 ts->total_submit = 0;
2068 ts->total_complete = 0;
2071 for (i = 0; i < 3; i++) {
2072 ts->total_io_u[i] = 0;
2073 ts->short_io_u[i] = 0;
2074 ts->drop_io_u[i] = 0;
2078 static void __add_stat_to_log(struct io_log *iolog, enum fio_ddir ddir,
2079 unsigned long elapsed, bool log_max)
2082 * Note an entry in the log. Use the mean from the logged samples,
2083 * making sure to properly round up. Only write a log entry if we
2084 * had actual samples done.
2086 if (iolog->avg_window[ddir].samples) {
2090 val = iolog->avg_window[ddir].max_val;
2092 val = iolog->avg_window[ddir].mean.u.f + 0.50;
2094 __add_log_sample(iolog, val, ddir, 0, elapsed, 0);
2097 reset_io_stat(&iolog->avg_window[ddir]);
2100 static void _add_stat_to_log(struct io_log *iolog, unsigned long elapsed,
2105 for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
2106 __add_stat_to_log(iolog, ddir, elapsed, log_max);
2109 static void add_log_sample(struct thread_data *td, struct io_log *iolog,
2110 unsigned long val, enum fio_ddir ddir,
2111 unsigned int bs, uint64_t offset)
2113 unsigned long elapsed, this_window;
2118 elapsed = mtime_since_now(&td->epoch);
2121 * If no time averaging, just add the log sample.
2123 if (!iolog->avg_msec) {
2124 __add_log_sample(iolog, val, ddir, bs, elapsed, offset);
2129 * Add the sample. If the time period has passed, then
2130 * add that entry to the log and clear.
2132 add_stat_sample(&iolog->avg_window[ddir], val);
2135 * If period hasn't passed, adding the above sample is all we
2138 this_window = elapsed - iolog->avg_last;
2139 if (this_window < iolog->avg_msec)
2142 _add_stat_to_log(iolog, elapsed, td->o.log_max != 0);
2144 iolog->avg_last = elapsed;
2147 void finalize_logs(struct thread_data *td, bool unit_logs)
2149 unsigned long elapsed;
2151 elapsed = mtime_since_now(&td->epoch);
2153 if (td->clat_log && unit_logs)
2154 _add_stat_to_log(td->clat_log, elapsed, td->o.log_max != 0);
2155 if (td->slat_log && unit_logs)
2156 _add_stat_to_log(td->slat_log, elapsed, td->o.log_max != 0);
2157 if (td->lat_log && unit_logs)
2158 _add_stat_to_log(td->lat_log, elapsed, td->o.log_max != 0);
2159 if (td->bw_log && (unit_logs == per_unit_log(td->bw_log)))
2160 _add_stat_to_log(td->bw_log, elapsed, td->o.log_max != 0);
2161 if (td->iops_log && (unit_logs == per_unit_log(td->iops_log)))
2162 _add_stat_to_log(td->iops_log, elapsed, td->o.log_max != 0);
2165 void add_agg_sample(unsigned long val, enum fio_ddir ddir, unsigned int bs)
2167 struct io_log *iolog;
2172 iolog = agg_io_log[ddir];
2173 __add_log_sample(iolog, val, ddir, bs, mtime_since_genesis(), 0);
2176 static void add_clat_percentile_sample(struct thread_stat *ts,
2177 unsigned long usec, enum fio_ddir ddir)
2179 unsigned int idx = plat_val_to_idx(usec);
2180 assert(idx < FIO_IO_U_PLAT_NR);
2182 ts->io_u_plat[ddir][idx]++;
2185 void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
2186 unsigned long usec, unsigned int bs, uint64_t offset)
2188 struct thread_stat *ts = &td->ts;
2192 add_stat_sample(&ts->clat_stat[ddir], usec);
2195 add_log_sample(td, td->clat_log, usec, ddir, bs, offset);
2197 if (ts->clat_percentiles)
2198 add_clat_percentile_sample(ts, usec, ddir);
2203 void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
2204 unsigned long usec, unsigned int bs, uint64_t offset)
2206 struct thread_stat *ts = &td->ts;
2213 add_stat_sample(&ts->slat_stat[ddir], usec);
2216 add_log_sample(td, td->slat_log, usec, ddir, bs, offset);
2221 void add_lat_sample(struct thread_data *td, enum fio_ddir ddir,
2222 unsigned long usec, unsigned int bs, uint64_t offset)
2224 struct thread_stat *ts = &td->ts;
2231 add_stat_sample(&ts->lat_stat[ddir], usec);
2234 add_log_sample(td, td->lat_log, usec, ddir, bs, offset);
2239 void add_bw_sample(struct thread_data *td, struct io_u *io_u,
2240 unsigned int bytes, unsigned long spent)
2242 struct thread_stat *ts = &td->ts;
2246 rate = bytes * 1000 / spent;
2252 add_stat_sample(&ts->bw_stat[io_u->ddir], rate);
2255 add_log_sample(td, td->bw_log, rate, io_u->ddir, bytes, io_u->offset);
2257 td->stat_io_bytes[io_u->ddir] = td->this_io_bytes[io_u->ddir];
2261 static int add_bw_samples(struct thread_data *td, struct timeval *t)
2263 struct thread_stat *ts = &td->ts;
2264 unsigned long spent, rate;
2267 spent = mtime_since(&td->bw_sample_time, t);
2268 if (spent < td->o.bw_avg_time &&
2269 td->o.bw_avg_time - spent >= 10)
2270 return td->o.bw_avg_time - spent;
2275 * Compute both read and write rates for the interval.
2277 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
2280 delta = td->this_io_bytes[ddir] - td->stat_io_bytes[ddir];
2282 continue; /* No entries for interval */
2285 rate = delta * 1000 / spent / 1024;
2289 add_stat_sample(&ts->bw_stat[ddir], rate);
2292 unsigned int bs = 0;
2294 if (td->o.min_bs[ddir] == td->o.max_bs[ddir])
2295 bs = td->o.min_bs[ddir];
2297 add_log_sample(td, td->bw_log, rate, ddir, bs, 0);
2300 td->stat_io_bytes[ddir] = td->this_io_bytes[ddir];
2303 timeval_add_msec(&td->bw_sample_time, td->o.bw_avg_time);
2307 if (spent <= td->o.bw_avg_time)
2308 return td->o.bw_avg_time;
2310 return td->o.bw_avg_time - (1 + spent - td->o.bw_avg_time);
2313 void add_iops_sample(struct thread_data *td, struct io_u *io_u,
2316 struct thread_stat *ts = &td->ts;
2320 add_stat_sample(&ts->iops_stat[io_u->ddir], 1);
2323 add_log_sample(td, td->iops_log, 1, io_u->ddir, bytes, io_u->offset);
2325 td->stat_io_blocks[io_u->ddir] = td->this_io_blocks[io_u->ddir];
2329 static int add_iops_samples(struct thread_data *td, struct timeval *t)
2331 struct thread_stat *ts = &td->ts;
2332 unsigned long spent, iops;
2335 spent = mtime_since(&td->iops_sample_time, t);
2336 if (spent < td->o.iops_avg_time &&
2337 td->o.iops_avg_time - spent >= 10)
2338 return td->o.iops_avg_time - spent;
2343 * Compute both read and write rates for the interval.
2345 for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
2348 delta = td->this_io_blocks[ddir] - td->stat_io_blocks[ddir];
2350 continue; /* No entries for interval */
2353 iops = (delta * 1000) / spent;
2357 add_stat_sample(&ts->iops_stat[ddir], iops);
2360 unsigned int bs = 0;
2362 if (td->o.min_bs[ddir] == td->o.max_bs[ddir])
2363 bs = td->o.min_bs[ddir];
2365 add_log_sample(td, td->iops_log, iops, ddir, bs, 0);
2368 td->stat_io_blocks[ddir] = td->this_io_blocks[ddir];
2371 timeval_add_msec(&td->iops_sample_time, td->o.iops_avg_time);
2375 if (spent <= td->o.iops_avg_time)
2376 return td->o.iops_avg_time;
2378 return td->o.iops_avg_time - (1 + spent - td->o.iops_avg_time);
2382 * Returns msecs to next event
2384 int calc_log_samples(void)
2386 struct thread_data *td;
2387 unsigned int next = ~0U, tmp;
2391 fio_gettime(&now, NULL);
2393 for_each_td(td, i) {
2394 if (in_ramp_time(td) ||
2395 !(td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING)) {
2396 next = min(td->o.iops_avg_time, td->o.bw_avg_time);
2399 if (!per_unit_log(td->bw_log)) {
2400 tmp = add_bw_samples(td, &now);
2404 if (!per_unit_log(td->iops_log)) {
2405 tmp = add_iops_samples(td, &now);
2411 return next == ~0U ? 0 : next;
2414 void stat_init(void)
2416 stat_mutex = fio_mutex_init(FIO_MUTEX_UNLOCKED);
2419 void stat_exit(void)
2422 * When we have the mutex, we know out-of-band access to it
2425 fio_mutex_down(stat_mutex);
2426 fio_mutex_remove(stat_mutex);
2430 * Called from signal handler. Wake up status thread.
2432 void show_running_run_stats(void)
2437 uint32_t *io_u_block_info(struct thread_data *td, struct io_u *io_u)
2439 /* Ignore io_u's which span multiple blocks--they will just get
2440 * inaccurate counts. */
2441 int idx = (io_u->offset - io_u->file->file_offset)
2442 / td->o.bs[DDIR_TRIM];
2443 uint32_t *info = &td->ts.block_infos[idx];
2444 assert(idx < td->ts.nr_block_infos);