+ tmp = json_create_object();
+ json_object_add_value_object(root, "latency_us", tmp);
+ /* Microsecond latency */
+ for (i = 0; i < FIO_IO_U_LAT_U_NR; i++) {
+ const char *ranges[] = { "2", "4", "10", "20", "50", "100",
+ "250", "500", "750", "1000", };
+ json_object_add_value_float(tmp, ranges[i], io_u_lat_u[i]);
+ }
+ /* Millisecond latency */
+ tmp = json_create_object();
+ json_object_add_value_object(root, "latency_ms", tmp);
+ for (i = 0; i < FIO_IO_U_LAT_M_NR; i++) {
+ const char *ranges[] = { "2", "4", "10", "20", "50", "100",
+ "250", "500", "750", "1000", "2000",
+ ">=2000", };
+ json_object_add_value_float(tmp, ranges[i], io_u_lat_m[i]);
+ }
+
+ /* Additional output if continue_on_error set - default off*/
+ if (ts->continue_on_error) {
+ json_object_add_value_int(root, "total_err", ts->total_err_count);
+ json_object_add_value_int(root, "first_error", ts->first_error);
+ }
+
+ if (ts->latency_depth) {
+ json_object_add_value_int(root, "latency_depth", ts->latency_depth);
+ json_object_add_value_int(root, "latency_target", ts->latency_target);
+ json_object_add_value_float(root, "latency_percentile", ts->latency_percentile.u.f);
+ json_object_add_value_int(root, "latency_window", ts->latency_window);
+ }
+
+ /* Additional output if description is set */
+ if (strlen(ts->description))
+ json_object_add_value_string(root, "desc", ts->description);
+
+ return root;
+}
+
+static void show_thread_status_terse(struct thread_stat *ts,
+ struct group_run_stats *rs)
+{
+ if (terse_version == 2)
+ show_thread_status_terse_v2(ts, rs);
+ else if (terse_version == 3 || terse_version == 4)
+ show_thread_status_terse_v3_v4(ts, rs, terse_version);
+ else
+ log_err("fio: bad terse version!? %d\n", terse_version);
+}
+
+struct json_object *show_thread_status(struct thread_stat *ts,
+ struct group_run_stats *rs)
+{
+ if (output_format == FIO_OUTPUT_TERSE)
+ show_thread_status_terse(ts, rs);
+ else if (output_format == FIO_OUTPUT_JSON)
+ return(show_thread_status_json(ts, rs));
+ else
+ show_thread_status_normal(ts, rs);
+ return NULL;
+}
+
+static void sum_stat(struct io_stat *dst, struct io_stat *src, int nr)
+{
+ double mean, S;
+
+ if (src->samples == 0)
+ return;
+
+ dst->min_val = min(dst->min_val, src->min_val);
+ dst->max_val = max(dst->max_val, src->max_val);
+
+ /*
+ * Compute new mean and S after the merge
+ * <http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
+ * #Parallel_algorithm>
+ */
+ if (nr == 1) {
+ mean = src->mean.u.f;
+ S = src->S.u.f;
+ } else {
+ double delta = src->mean.u.f - dst->mean.u.f;
+
+ mean = ((src->mean.u.f * src->samples) +
+ (dst->mean.u.f * dst->samples)) /
+ (dst->samples + src->samples);
+
+ S = src->S.u.f + dst->S.u.f + pow(delta, 2.0) *
+ (dst->samples * src->samples) /
+ (dst->samples + src->samples);
+ }
+
+ dst->samples += src->samples;
+ dst->mean.u.f = mean;
+ dst->S.u.f = S;
+}
+
+void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src)
+{
+ int i;
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++) {
+ if (dst->max_run[i] < src->max_run[i])
+ dst->max_run[i] = src->max_run[i];
+ if (dst->min_run[i] && dst->min_run[i] > src->min_run[i])
+ dst->min_run[i] = src->min_run[i];
+ if (dst->max_bw[i] < src->max_bw[i])
+ dst->max_bw[i] = src->max_bw[i];
+ if (dst->min_bw[i] && dst->min_bw[i] > src->min_bw[i])
+ dst->min_bw[i] = src->min_bw[i];
+
+ dst->io_kb[i] += src->io_kb[i];
+ dst->agg[i] += src->agg[i];
+ }
+
+}
+
+void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src, int nr)
+{
+ int l, k;
+
+ for (l = 0; l < DDIR_RWDIR_CNT; l++) {
+ if (!dst->unified_rw_rep) {
+ sum_stat(&dst->clat_stat[l], &src->clat_stat[l], nr);
+ sum_stat(&dst->slat_stat[l], &src->slat_stat[l], nr);
+ sum_stat(&dst->lat_stat[l], &src->lat_stat[l], nr);
+ sum_stat(&dst->bw_stat[l], &src->bw_stat[l], nr);
+
+ dst->io_bytes[l] += src->io_bytes[l];
+
+ if (dst->runtime[l] < src->runtime[l])
+ dst->runtime[l] = src->runtime[l];
+ } else {
+ sum_stat(&dst->clat_stat[0], &src->clat_stat[l], nr);
+ sum_stat(&dst->slat_stat[0], &src->slat_stat[l], nr);
+ sum_stat(&dst->lat_stat[0], &src->lat_stat[l], nr);
+ sum_stat(&dst->bw_stat[0], &src->bw_stat[l], nr);
+
+ dst->io_bytes[0] += src->io_bytes[l];
+
+ if (dst->runtime[0] < src->runtime[l])
+ dst->runtime[0] = src->runtime[l];
+ }
+ }
+
+ dst->usr_time += src->usr_time;
+ dst->sys_time += src->sys_time;
+ dst->ctx += src->ctx;
+ dst->majf += src->majf;
+ dst->minf += src->minf;
+
+ for (k = 0; k < FIO_IO_U_MAP_NR; k++)
+ dst->io_u_map[k] += src->io_u_map[k];
+ for (k = 0; k < FIO_IO_U_MAP_NR; k++)
+ dst->io_u_submit[k] += src->io_u_submit[k];
+ for (k = 0; k < FIO_IO_U_MAP_NR; k++)
+ dst->io_u_complete[k] += src->io_u_complete[k];
+ for (k = 0; k < FIO_IO_U_LAT_U_NR; k++)
+ dst->io_u_lat_u[k] += src->io_u_lat_u[k];
+ for (k = 0; k < FIO_IO_U_LAT_M_NR; k++)
+ dst->io_u_lat_m[k] += src->io_u_lat_m[k];
+
+ for (k = 0; k < DDIR_RWDIR_CNT; k++) {
+ if (!dst->unified_rw_rep) {
+ dst->total_io_u[k] += src->total_io_u[k];
+ dst->short_io_u[k] += src->short_io_u[k];
+ } else {
+ dst->total_io_u[0] += src->total_io_u[k];
+ dst->short_io_u[0] += src->short_io_u[k];
+ }
+ }
+
+ for (k = 0; k < DDIR_RWDIR_CNT; k++) {
+ int m;
+
+ for (m = 0; m < FIO_IO_U_PLAT_NR; m++) {
+ if (!dst->unified_rw_rep)
+ dst->io_u_plat[k][m] += src->io_u_plat[k][m];
+ else
+ dst->io_u_plat[0][m] += src->io_u_plat[k][m];
+ }
+ }
+
+ dst->total_run_time += src->total_run_time;
+ dst->total_submit += src->total_submit;
+ dst->total_complete += src->total_complete;
+}
+
+void init_group_run_stat(struct group_run_stats *gs)
+{
+ int i;
+ memset(gs, 0, sizeof(*gs));
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++)
+ gs->min_bw[i] = gs->min_run[i] = ~0UL;
+}
+
+void init_thread_stat(struct thread_stat *ts)
+{
+ int j;
+
+ memset(ts, 0, sizeof(*ts));
+
+ for (j = 0; j < DDIR_RWDIR_CNT; j++) {
+ ts->lat_stat[j].min_val = -1UL;
+ ts->clat_stat[j].min_val = -1UL;
+ ts->slat_stat[j].min_val = -1UL;
+ ts->bw_stat[j].min_val = -1UL;
+ }
+ ts->groupid = -1;
+}
+
+static void __show_run_stats(void)
+{
+ struct group_run_stats *runstats, *rs;
+ struct thread_data *td;
+ struct thread_stat *threadstats, *ts;
+ int i, j, nr_ts, last_ts, idx;
+ int kb_base_warned = 0;
+ int unit_base_warned = 0;
+ struct json_object *root = NULL;
+ struct json_array *array = NULL;
+
+ runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
+
+ for (i = 0; i < groupid + 1; i++)
+ init_group_run_stat(&runstats[i]);
+
+ /*
+ * find out how many threads stats we need. if group reporting isn't
+ * enabled, it's one-per-td.