+ if (ts->latency_depth) {
+ json_object_add_value_int(root, "latency_depth", ts->latency_depth);
+ json_object_add_value_int(root, "latency_target", ts->latency_target);
+ json_object_add_value_float(root, "latency_percentile", ts->latency_percentile.u.f);
+ json_object_add_value_int(root, "latency_window", ts->latency_window);
+ }
+
+ /* Additional output if description is set */
+ if (strlen(ts->description))
+ json_object_add_value_string(root, "desc", ts->description);
+
+ if (ts->nr_block_infos) {
+ /* Block error histogram and types */
+ int len;
+ unsigned int *percentiles = NULL;
+ unsigned int block_state_counts[BLOCK_STATE_COUNT];
+
+ len = calc_block_percentiles(ts->nr_block_infos, ts->block_infos,
+ ts->percentile_list,
+ &percentiles, block_state_counts);
+
+ if (len) {
+ struct json_object *block, *percentile_object, *states;
+ int state;
+ block = json_create_object();
+ json_object_add_value_object(root, "block", block);
+
+ percentile_object = json_create_object();
+ json_object_add_value_object(block, "percentiles",
+ percentile_object);
+ for (i = 0; i < len; i++) {
+ char buf[20];
+ snprintf(buf, sizeof(buf), "%f",
+ ts->percentile_list[i].u.f);
+ json_object_add_value_int(percentile_object,
+ (const char *)buf,
+ percentiles[i]);
+ }
+
+ states = json_create_object();
+ json_object_add_value_object(block, "states", states);
+ for (state = 0; state < BLOCK_STATE_COUNT; state++) {
+ json_object_add_value_int(states,
+ block_state_names[state],
+ block_state_counts[state]);
+ }
+ free(percentiles);
+ }
+ }
+
+ return root;
+}
+
+static void show_thread_status_terse(struct thread_stat *ts,
+ struct group_run_stats *rs,
+ struct buf_output *out)
+{
+ if (terse_version == 2)
+ show_thread_status_terse_v2(ts, rs, out);
+ else if (terse_version == 3 || terse_version == 4)
+ show_thread_status_terse_v3_v4(ts, rs, terse_version, out);
+ else
+ log_err("fio: bad terse version!? %d\n", terse_version);
+}
+
+struct json_object *show_thread_status(struct thread_stat *ts,
+ struct group_run_stats *rs,
+ struct flist_head *opt_list,
+ struct buf_output *out)
+{
+ struct json_object *ret = NULL;
+
+ if (output_format & FIO_OUTPUT_TERSE)
+ show_thread_status_terse(ts, rs, out);
+ if (output_format & FIO_OUTPUT_JSON)
+ ret = show_thread_status_json(ts, rs, opt_list);
+ if (output_format & FIO_OUTPUT_NORMAL)
+ show_thread_status_normal(ts, rs, out);
+
+ return ret;
+}
+
+static void sum_stat(struct io_stat *dst, struct io_stat *src, bool first)
+{
+ double mean, S;
+
+ if (src->samples == 0)
+ return;
+
+ dst->min_val = min(dst->min_val, src->min_val);
+ dst->max_val = max(dst->max_val, src->max_val);
+
+ /*
+ * Compute new mean and S after the merge
+ * <http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
+ * #Parallel_algorithm>
+ */
+ if (first) {
+ mean = src->mean.u.f;
+ S = src->S.u.f;
+ } else {
+ double delta = src->mean.u.f - dst->mean.u.f;
+
+ mean = ((src->mean.u.f * src->samples) +
+ (dst->mean.u.f * dst->samples)) /
+ (dst->samples + src->samples);
+
+ S = src->S.u.f + dst->S.u.f + pow(delta, 2.0) *
+ (dst->samples * src->samples) /
+ (dst->samples + src->samples);
+ }
+
+ dst->samples += src->samples;
+ dst->mean.u.f = mean;
+ dst->S.u.f = S;
+}
+
+void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src)
+{
+ int i;
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++) {
+ if (dst->max_run[i] < src->max_run[i])
+ dst->max_run[i] = src->max_run[i];
+ if (dst->min_run[i] && dst->min_run[i] > src->min_run[i])
+ dst->min_run[i] = src->min_run[i];
+ if (dst->max_bw[i] < src->max_bw[i])
+ dst->max_bw[i] = src->max_bw[i];
+ if (dst->min_bw[i] && dst->min_bw[i] > src->min_bw[i])
+ dst->min_bw[i] = src->min_bw[i];
+
+ dst->io_kb[i] += src->io_kb[i];
+ dst->agg[i] += src->agg[i];
+ }
+
+ if (!dst->kb_base)
+ dst->kb_base = src->kb_base;
+ if (!dst->unit_base)
+ dst->unit_base = src->unit_base;
+}
+
+void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src,
+ bool first)
+{
+ int l, k;
+
+ for (l = 0; l < DDIR_RWDIR_CNT; l++) {
+ if (!dst->unified_rw_rep) {
+ sum_stat(&dst->clat_stat[l], &src->clat_stat[l], first);
+ sum_stat(&dst->slat_stat[l], &src->slat_stat[l], first);
+ sum_stat(&dst->lat_stat[l], &src->lat_stat[l], first);
+ sum_stat(&dst->bw_stat[l], &src->bw_stat[l], first);
+
+ dst->io_bytes[l] += src->io_bytes[l];
+
+ if (dst->runtime[l] < src->runtime[l])
+ dst->runtime[l] = src->runtime[l];
+ } else {
+ sum_stat(&dst->clat_stat[0], &src->clat_stat[l], first);
+ sum_stat(&dst->slat_stat[0], &src->slat_stat[l], first);
+ sum_stat(&dst->lat_stat[0], &src->lat_stat[l], first);
+ sum_stat(&dst->bw_stat[0], &src->bw_stat[l], first);
+
+ dst->io_bytes[0] += src->io_bytes[l];
+
+ if (dst->runtime[0] < src->runtime[l])
+ dst->runtime[0] = src->runtime[l];
+
+ /*
+ * We're summing to the same destination, so override
+ * 'first' after the first iteration of the loop
+ */
+ first = false;
+ }
+ }
+
+ dst->usr_time += src->usr_time;