/*
* Discard the error bits and apply the mask to find the
- * index for the buckets in the group
+ * index for the buckets in the group
*/
offset = (FIO_IO_U_PLAT_VAL - 1) & (val >> error_bits);
/* Make sure the index does not exceed (array size - 1) */
- idx = (base + offset) < (FIO_IO_U_PLAT_NR - 1)?
+ idx = (base + offset) < (FIO_IO_U_PLAT_NR - 1) ?
(base + offset) : (FIO_IO_U_PLAT_NR - 1);
return idx;
/* MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
* all bits of the sample as index */
- if (idx < (FIO_IO_U_PLAT_VAL << 1) )
+ if (idx < (FIO_IO_U_PLAT_VAL << 1))
return idx;
/* Find the group and compute the minimum value of that group */
- error_bits = (idx >> FIO_IO_U_PLAT_BITS) -1;
+ error_bits = (idx >> FIO_IO_U_PLAT_BITS) - 1;
base = 1 << (error_bits + FIO_IO_U_PLAT_BITS);
/* Find its bucket number of the group */
return cmp;
}
-static unsigned int calc_clat_percentiles(unsigned int *io_u_plat,
- unsigned long nr, fio_fp64_t *plist,
- unsigned int **output,
- unsigned int *maxv,
- unsigned int *minv)
+unsigned int calc_clat_percentiles(unsigned int *io_u_plat, unsigned long nr,
+ fio_fp64_t *plist, unsigned int **output,
+ unsigned int *maxv, unsigned int *minv)
{
unsigned long sum = 0;
unsigned int len, i, j = 0;
* isn't a worry. Also note that this does not work for NaN values.
*/
if (len > 1)
- qsort((void*)plist, len, sizeof(plist[0]), double_cmp);
+ qsort((void *)plist, len, sizeof(plist[0]), double_cmp);
/*
* Calculate bucket values, note down max and min values
}
}
-#define ts_total_io_u(ts) \
- ((ts)->total_io_u[0] + (ts)->total_io_u[1])
-
-static void stat_calc_dist(unsigned int *map, unsigned long total,
- double *io_u_dist)
+void stat_calc_dist(unsigned int *map, unsigned long total, double *io_u_dist)
{
int i;
}
}
-static void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat)
+void stat_calc_lat_u(struct thread_stat *ts, double *io_u_lat)
{
stat_calc_lat(ts, io_u_lat, ts->io_u_lat_u, FIO_IO_U_LAT_U_NR);
}
-static void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat)
+void stat_calc_lat_m(struct thread_stat *ts, double *io_u_lat)
{
stat_calc_lat(ts, io_u_lat, ts->io_u_lat_m, FIO_IO_U_LAT_M_NR);
}
show_lat(io_u_lat_m, FIO_IO_U_LAT_M_NR, ranges, "msec");
}
-static void show_latencies(double *io_u_lat_u, double *io_u_lat_m)
+static void show_latencies(struct thread_stat *ts)
{
+ double io_u_lat_u[FIO_IO_U_LAT_U_NR];
+ double io_u_lat_m[FIO_IO_U_LAT_M_NR];
+
+ stat_calc_lat_u(ts, io_u_lat_u);
+ stat_calc_lat_m(ts, io_u_lat_m);
+
show_lat_u(io_u_lat_u);
show_lat_m(io_u_lat_m);
}
double usr_cpu, sys_cpu;
unsigned long runtime;
double io_u_dist[FIO_IO_U_MAP_NR];
- double io_u_lat_u[FIO_IO_U_LAT_U_NR];
- double io_u_lat_m[FIO_IO_U_LAT_M_NR];
if (!(ts->io_bytes[0] + ts->io_bytes[1]) &&
!(ts->total_io_u[0] + ts->total_io_u[1]))
if (ts->io_bytes[DDIR_WRITE])
show_ddir_status(rs, ts, DDIR_WRITE);
- stat_calc_lat_u(ts, io_u_lat_u);
- stat_calc_lat_m(ts, io_u_lat_m);
- show_latencies(io_u_lat_u, io_u_lat_m);
+ show_latencies(ts);
runtime = ts->total_run_time;
if (runtime) {
if (ts->runtime[ddir]) {
uint64_t runt = ts->runtime[ddir];
- bw = ts->io_bytes[ddir] / runt;
+ bw = ((1000 * ts->io_bytes[ddir]) / runt) / 1024;
iops = (1000 * (uint64_t) ts->total_io_u[ddir]) / runt;
}
}
static void show_thread_status_terse_v2(struct thread_stat *ts,
- struct group_run_stats *rs)
+ struct group_run_stats *rs)
{
double io_u_dist[FIO_IO_U_MAP_NR];
double io_u_lat_u[FIO_IO_U_LAT_U_NR];
/* Additional output if continue_on_error set - default off*/
if (ts->continue_on_error)
log_info(";%lu;%d", ts->total_err_count, ts->first_error);
- log_info("\n");
/* Additional output if description is set */
if (strlen(ts->description))
log_info(";%s", ts->description);
+
+ log_info("\n");
}
static void show_thread_status_terse(struct thread_stat *ts,
else
memset(ts->description, 0, FIO_JOBNAME_SIZE);
+ /*
+ * If multiple entries in this group, this is
+ * the first member.
+ */
+ ts->thread_number = td->thread_number;
ts->groupid = td->groupid;
/*
bw = 0;
if (ts->runtime[j]) {
- unsigned long runt;
+ unsigned long runt = ts->runtime[j];
+ unsigned long long kb;
- runt = ts->runtime[j];
- bw = ts->io_bytes[j] / runt;
+ kb = ts->io_bytes[j] / rs->kb_base;
+ bw = kb * 1000 / runt;
}
if (bw < rs->min_bw[j])
rs->min_bw[j] = bw;
}
for (i = 0; i < groupid + 1; i++) {
- unsigned long max_run[2];
-
rs = &runstats[i];
- max_run[0] = rs->max_run[0];
- max_run[1] = rs->max_run[1];
if (rs->max_run[0])
- rs->agg[0] = (rs->io_kb[0] * 1000) / max_run[0];
+ rs->agg[0] = (rs->io_kb[0] * 1000) / rs->max_run[0];
if (rs->max_run[1])
- rs->agg[1] = (rs->io_kb[1] * 1000) / max_run[1];
+ rs->agg[1] = (rs->io_kb[1] * 1000) / rs->max_run[1];
}
/*
else if (!terse_output)
show_disk_util(0);
- free_disk_util();
-
free(runstats);
free(threadstats);
}
+static void *__show_running_run_stats(void *arg)
+{
+ struct thread_data *td;
+ unsigned long long *rt;
+ struct timeval tv;
+ int i;
+
+ rt = malloc(thread_number * sizeof(unsigned long long));
+ fio_gettime(&tv, NULL);
+
+ for_each_td(td, i) {
+ rt[i] = mtime_since(&td->start, &tv);
+ if (td_read(td) && td->io_bytes[DDIR_READ])
+ td->ts.runtime[DDIR_READ] += rt[i];
+ if (td_write(td) && td->io_bytes[DDIR_WRITE])
+ td->ts.runtime[DDIR_WRITE] += rt[i];
+
+ update_rusage_stat(td);
+ td->ts.io_bytes[0] = td->io_bytes[0];
+ td->ts.io_bytes[1] = td->io_bytes[1];
+ td->ts.total_run_time = mtime_since(&td->epoch, &tv);
+ }
+
+ show_run_stats();
+
+ for_each_td(td, i) {
+ if (td_read(td) && td->io_bytes[DDIR_READ])
+ td->ts.runtime[DDIR_READ] -= rt[i];
+ if (td_write(td) && td->io_bytes[DDIR_WRITE])
+ td->ts.runtime[DDIR_WRITE] -= rt[i];
+ }
+
+ free(rt);
+ return NULL;
+}
+
+/*
+ * Called from signal handler. It _should_ be safe to just run this inline
+ * in the sig handler, but we should be disturbing the system less by just
+ * creating a thread to do it.
+ */
+void show_running_run_stats(void)
+{
+ pthread_t thread;
+
+ pthread_create(&thread, NULL, __show_running_run_stats, NULL);
+ pthread_detach(thread);
+}
+
static inline void add_stat_sample(struct io_stat *is, unsigned long data)
{
double val = data;