show_lat_m(io_u_lat_m);
}
-void show_thread_status(struct thread_stat *ts, struct group_run_stats *rs)
+void show_thread_status_normal(struct thread_stat *ts, struct group_run_stats *rs)
{
double usr_cpu, sys_cpu;
unsigned long runtime;
ts->first_error,
strerror(ts->first_error));
}
+ if (ts->latency_depth) {
+ log_info(" latency : target=%llu, window=%llu, percentile=%.2f%%, depth=%u\n",
+ (unsigned long long)ts->latency_target,
+ (unsigned long long)ts->latency_window,
+ ts->latency_percentile.u.f,
+ ts->latency_depth);
+ }
}
static void show_ddir_status_terse(struct thread_stat *ts,
log_info(";%3.2f%%", io_u_lat_m[i]);
/* disk util stats, if any */
- show_disk_util(1, NULL);
+ if (is_backend)
+ show_disk_util(1, NULL);
/* Additional output if continue_on_error set - default off*/
if (ts->continue_on_error)
/* Additional output if continue_on_error set - default off*/
if (ts->continue_on_error) {
json_object_add_value_int(root, "total_err", ts->total_err_count);
- json_object_add_value_int(root, "total_err", ts->first_error);
+ json_object_add_value_int(root, "first_error", ts->first_error);
+ }
+
+ if (ts->latency_depth) {
+ json_object_add_value_int(root, "latency_depth", ts->latency_depth);
+ json_object_add_value_int(root, "latency_target", ts->latency_target);
+ json_object_add_value_float(root, "latency_percentile", ts->latency_percentile.u.f);
+ json_object_add_value_int(root, "latency_window", ts->latency_window);
}
/* Additional output if description is set */
log_err("fio: bad terse version!? %d\n", terse_version);
}
+struct json_object *show_thread_status(struct thread_stat *ts,
+ struct group_run_stats *rs)
+{
+ if (output_format == FIO_OUTPUT_TERSE)
+ show_thread_status_terse(ts, rs);
+ else if (output_format == FIO_OUTPUT_JSON)
+ return(show_thread_status_json(ts, rs));
+ else
+ show_thread_status_normal(ts, rs);
+ return NULL;
+}
+
static void sum_stat(struct io_stat *dst, struct io_stat *src, int nr)
{
double mean, S;
}
}
+ ts->latency_depth = td->latency_qd;
+ ts->latency_target = td->o.latency_target;
+ ts->latency_percentile = td->o.latency_percentile;
+ ts->latency_window = td->o.latency_window;
+
sum_thread_stats(ts, &td->ts, idx);
}
struct json_object *tmp = show_thread_status_json(ts, rs);
json_array_add_value_object(array, tmp);
} else
- show_thread_status(ts, rs);
+ show_thread_status_normal(ts, rs);
}
if (output_format == FIO_OUTPUT_JSON) {
/* disk util stats, if any */
show_idle_prof_stats(FIO_OUTPUT_NORMAL, NULL);
}
+ if ( !(output_format == FIO_OUTPUT_TERSE) && append_terse_output) {
+ log_info("\nAdditional Terse Output:\n");
+
+ for (i = 0; i < nr_ts; i++) {
+ ts = &threadstats[i];
+ rs = &runstats[ts->groupid];
+ show_thread_status_terse(ts, rs);
+ }
+ }
+
log_info_flush();
free(runstats);
free(threadstats);
ios->mean.u.f = ios->S.u.f = 0;
}
+void reset_io_stats(struct thread_data *td)
+{
+ struct thread_stat *ts = &td->ts;
+ int i, j;
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++) {
+ reset_io_stat(&ts->clat_stat[i]);
+ reset_io_stat(&ts->slat_stat[i]);
+ reset_io_stat(&ts->lat_stat[i]);
+ reset_io_stat(&ts->bw_stat[i]);
+ reset_io_stat(&ts->iops_stat[i]);
+
+ ts->io_bytes[i] = 0;
+ ts->runtime[i] = 0;
+
+ for (j = 0; j < FIO_IO_U_PLAT_NR; j++)
+ ts->io_u_plat[i][j] = 0;
+ }
+
+ for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
+ ts->io_u_map[i] = 0;
+ ts->io_u_submit[i] = 0;
+ ts->io_u_complete[i] = 0;
+ ts->io_u_lat_u[i] = 0;
+ ts->io_u_lat_m[i] = 0;
+ ts->total_submit = 0;
+ ts->total_complete = 0;
+ }
+
+ for (i = 0; i < 3; i++) {
+ ts->total_io_u[i] = 0;
+ ts->short_io_u[i] = 0;
+ }
+}
+
+static void _add_stat_to_log(struct io_log *iolog, unsigned long elapsed)
+{
+ /*
+ * Note an entry in the log. Use the mean from the logged samples,
+ * making sure to properly round up. Only write a log entry if we
+ * had actual samples done.
+ */
+ if (iolog->avg_window[DDIR_READ].samples) {
+ unsigned long mr;
+
+ mr = iolog->avg_window[DDIR_READ].mean.u.f + 0.50;
+ __add_log_sample(iolog, mr, DDIR_READ, 0, elapsed);
+ }
+ if (iolog->avg_window[DDIR_WRITE].samples) {
+ unsigned long mw;
+
+ mw = iolog->avg_window[DDIR_WRITE].mean.u.f + 0.50;
+ __add_log_sample(iolog, mw, DDIR_WRITE, 0, elapsed);
+ }
+ if (iolog->avg_window[DDIR_TRIM].samples) {
+ unsigned long mw;
+
+ mw = iolog->avg_window[DDIR_TRIM].mean.u.f + 0.50;
+ __add_log_sample(iolog, mw, DDIR_TRIM, 0, elapsed);
+ }
+
+ reset_io_stat(&iolog->avg_window[DDIR_READ]);
+ reset_io_stat(&iolog->avg_window[DDIR_WRITE]);
+ reset_io_stat(&iolog->avg_window[DDIR_TRIM]);
+}
+
static void add_log_sample(struct thread_data *td, struct io_log *iolog,
unsigned long val, enum fio_ddir ddir,
unsigned int bs)
if (this_window < iolog->avg_msec)
return;
- /*
- * Note an entry in the log. Use the mean from the logged samples,
- * making sure to properly round up. Only write a log entry if we
- * had actual samples done.
- */
- if (iolog->avg_window[DDIR_READ].samples) {
- unsigned long mr;
+ _add_stat_to_log(iolog, elapsed);
- mr = iolog->avg_window[DDIR_READ].mean.u.f + 0.50;
- __add_log_sample(iolog, mr, DDIR_READ, 0, elapsed);
- }
- if (iolog->avg_window[DDIR_WRITE].samples) {
- unsigned long mw;
-
- mw = iolog->avg_window[DDIR_WRITE].mean.u.f + 0.50;
- __add_log_sample(iolog, mw, DDIR_WRITE, 0, elapsed);
- }
- if (iolog->avg_window[DDIR_TRIM].samples) {
- unsigned long mw;
+ iolog->avg_last = elapsed;
+}
- mw = iolog->avg_window[DDIR_TRIM].mean.u.f + 0.50;
- __add_log_sample(iolog, mw, DDIR_TRIM, 0, elapsed);
- }
+void finalize_logs(struct thread_data *td)
+{
+ unsigned long elapsed;
+ elapsed = mtime_since_now(&td->epoch);
- reset_io_stat(&iolog->avg_window[DDIR_READ]);
- reset_io_stat(&iolog->avg_window[DDIR_WRITE]);
- reset_io_stat(&iolog->avg_window[DDIR_TRIM]);
- iolog->avg_last = elapsed;
+ if (td->clat_log)
+ _add_stat_to_log(td->clat_log, elapsed);
+ if (td->slat_log)
+ _add_stat_to_log(td->slat_log, elapsed);
+ if (td->lat_log)
+ _add_stat_to_log(td->lat_log, elapsed);
+ if (td->bw_log)
+ _add_stat_to_log(td->bw_log, elapsed);
+ if (td->iops_log)
+ _add_stat_to_log(td->iops_log, elapsed);
}
void add_agg_sample(unsigned long val, enum fio_ddir ddir, unsigned int bs)