#include <stdio.h>
#include <string.h>
#include <sys/time.h>
-#include <sys/types.h>
#include <sys/stat.h>
-#include <dirent.h>
-#include <libgen.h>
#include <math.h>
#include "fio.h"
#include "helper_thread.h"
#include "smalloc.h"
-#define LOG_MSEC_SLACK 10
+#define LOG_MSEC_SLACK 1
-struct fio_mutex *stat_mutex;
+struct fio_sem *stat_sem;
void clear_rusage_stat(struct thread_data *td)
{
return cmp;
}
-unsigned int calc_clat_percentiles(unsigned int *io_u_plat, unsigned long long nr,
+unsigned int calc_clat_percentiles(uint64_t *io_u_plat, unsigned long long nr,
fio_fp64_t *plist, unsigned long long **output,
unsigned long long *maxv, unsigned long long *minv)
{
/*
* Find and display the p-th percentile of clat
*/
-static void show_clat_percentiles(unsigned int *io_u_plat, unsigned long long nr,
+static void show_clat_percentiles(uint64_t *io_u_plat, unsigned long long nr,
fio_fp64_t *plist, unsigned int precision,
const char *pre, struct buf_output *out)
{
}
}
-void stat_calc_dist(unsigned int *map, unsigned long total, double *io_u_dist)
+void stat_calc_dist(uint64_t *map, unsigned long total, double *io_u_dist)
{
int i;
}
static void stat_calc_lat(struct thread_stat *ts, double *dst,
- unsigned int *src, int nr)
+ uint64_t *src, int nr)
{
unsigned long total = ddir_rw_sum(ts->total_io_u);
int i;
* To keep the terse format unaltered, add all of the ns latency
* buckets to the first us latency bucket
*/
-void stat_calc_lat_nu(struct thread_stat *ts, double *io_u_lat_u)
+static void stat_calc_lat_nu(struct thread_stat *ts, double *io_u_lat_u)
{
unsigned long ntotal = 0, total = ddir_rw_sum(ts->total_io_u);
int i;
if (len > 1)
qsort((void *)plist, len, sizeof(plist[0]), double_cmp);
- nr_uninit = 0;
/* Start only after the uninit entries end */
for (nr_uninit = 0;
nr_uninit < nr_block_infos
usr_cpu = 0;
sys_cpu = 0;
}
+ json_object_add_value_int(root, "job_runtime", ts->total_run_time);
json_object_add_value_float(root, "usr_cpu", usr_cpu);
json_object_add_value_float(root, "sys_cpu", sys_cpu);
json_object_add_value_int(root, "ctx", ts->ctx);
json_object_add_value_int(root, "majf", ts->majf);
json_object_add_value_int(root, "minf", ts->minf);
-
- /* Calc % distribution of IO depths, usecond, msecond latency */
+ /* Calc % distribution of IO depths */
stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
- stat_calc_lat_n(ts, io_u_lat_n);
- stat_calc_lat_u(ts, io_u_lat_u);
- stat_calc_lat_m(ts, io_u_lat_m);
-
tmp = json_create_object();
json_object_add_value_object(root, "iodepth_level", tmp);
/* Only show fixed 7 I/O depth levels*/
json_object_add_value_float(tmp, (const char *)name, io_u_dist[i]);
}
+ /* Calc % distribution of submit IO depths */
+ stat_calc_dist(ts->io_u_submit, ts->total_submit, io_u_dist);
+ tmp = json_create_object();
+ json_object_add_value_object(root, "iodepth_submit", tmp);
+ /* Only show fixed 7 I/O depth levels*/
+ for (i = 0; i < 7; i++) {
+ char name[20];
+ if (i == 0)
+ snprintf(name, 20, "0");
+ else if (i < 6)
+ snprintf(name, 20, "%d", 1 << (i+1));
+ else
+ snprintf(name, 20, ">=%d", 1 << i);
+ json_object_add_value_float(tmp, (const char *)name, io_u_dist[i]);
+ }
+
+ /* Calc % distribution of completion IO depths */
+ stat_calc_dist(ts->io_u_complete, ts->total_complete, io_u_dist);
+ tmp = json_create_object();
+ json_object_add_value_object(root, "iodepth_complete", tmp);
+ /* Only show fixed 7 I/O depth levels*/
+ for (i = 0; i < 7; i++) {
+ char name[20];
+ if (i == 0)
+ snprintf(name, 20, "0");
+ else if (i < 6)
+ snprintf(name, 20, "%d", 1 << (i+1));
+ else
+ snprintf(name, 20, ">=%d", 1 << i);
+ json_object_add_value_float(tmp, (const char *)name, io_u_dist[i]);
+ }
+
+ /* Calc % distribution of nsecond, usecond, msecond latency */
+ stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist);
+ stat_calc_lat_n(ts, io_u_lat_n);
+ stat_calc_lat_u(ts, io_u_lat_u);
+ stat_calc_lat_m(ts, io_u_lat_m);
+
/* Nanosecond latency */
tmp = json_create_object();
json_object_add_value_object(root, "latency_ns", tmp);
if (ts->ss_dur) {
struct json_object *data;
struct json_array *iops, *bw;
- int i, j, k;
+ int j, k, l;
char ss_buf[64];
snprintf(ss_buf, sizeof(ss_buf), "%s%s:%f%s",
j = ts->ss_head;
else
j = ts->ss_head == 0 ? ts->ss_dur - 1 : ts->ss_head - 1;
- for (i = 0; i < ts->ss_dur; i++) {
- k = (j + i) % ts->ss_dur;
+ for (l = 0; l < ts->ss_dur; l++) {
+ k = (j + l) % ts->ss_dur;
json_array_add_value_int(bw, ts->ss_bw_data[k]);
json_array_add_value_int(iops, ts->ss_iops_data[k]);
}
char time_buf[32];
struct timeval now;
unsigned long long ms_since_epoch;
+ time_t tv_sec;
gettimeofday(&now, NULL);
ms_since_epoch = (unsigned long long)(now.tv_sec) * 1000 +
(unsigned long long)(now.tv_usec) / 1000;
- os_ctime_r((const time_t *) &now.tv_sec, time_buf,
- sizeof(time_buf));
+ tv_sec = now.tv_sec;
+ os_ctime_r(&tv_sec, time_buf, sizeof(time_buf));
if (time_buf[strlen(time_buf) - 1] == '\n')
time_buf[strlen(time_buf) - 1] = '\0';
buf_output_free(out);
}
+ fio_idle_prof_cleanup();
+
log_info_flush();
free(runstats);
free(threadstats);
free(opt_lists);
}
-void show_run_stats(void)
-{
- fio_mutex_down(stat_mutex);
- __show_run_stats();
- fio_mutex_up(stat_mutex);
-}
-
void __show_running_run_stats(void)
{
struct thread_data *td;
struct timespec ts;
int i;
- fio_mutex_down(stat_mutex);
+ fio_sem_down(stat_sem);
rt = malloc(thread_number * sizeof(unsigned long long));
fio_gettime(&ts, NULL);
continue;
if (td->rusage_sem) {
td->update_rusage = 1;
- fio_mutex_down(td->rusage_sem);
+ fio_sem_down(td->rusage_sem);
}
td->update_rusage = 0;
}
}
free(rt);
- fio_mutex_up(stat_mutex);
+ fio_sem_up(stat_sem);
}
static bool status_interval_init;
* submissions, flag 'td' as needing a log regrow and we'll take
* care of it on the submission side.
*/
- if (iolog->td->o.io_submit_mode == IO_MODE_OFFLOAD ||
+ if ((iolog->td && iolog->td->o.io_submit_mode == IO_MODE_OFFLOAD) ||
!per_unit_log(iolog))
return regrow_log(iolog);
- iolog->td->flags |= TD_F_REGROW_LOGS;
- assert(iolog->pending->nr_samples < iolog->pending->max_samples);
+ if (iolog->td)
+ iolog->td->flags |= TD_F_REGROW_LOGS;
+ if (iolog->pending)
+ assert(iolog->pending->nr_samples < iolog->pending->max_samples);
return iolog->pending;
}
__add_stat_to_log(iolog, ddir, elapsed, log_max);
}
-static long add_log_sample(struct thread_data *td, struct io_log *iolog,
- union io_sample_data data, enum fio_ddir ddir,
- unsigned int bs, uint64_t offset)
+static unsigned long add_log_sample(struct thread_data *td,
+ struct io_log *iolog,
+ union io_sample_data data,
+ enum fio_ddir ddir, unsigned int bs,
+ uint64_t offset)
{
unsigned long elapsed, this_window;
if (elapsed < iolog->avg_last[ddir])
return iolog->avg_last[ddir] - elapsed;
else if (this_window < iolog->avg_msec) {
- int diff = iolog->avg_msec - this_window;
+ unsigned long diff = iolog->avg_msec - this_window;
if (inline_log(iolog) || diff > LOG_MSEC_SLACK)
return diff;
this_window = elapsed - hw->hist_last;
if (this_window >= iolog->hist_msec) {
- unsigned int *io_u_plat;
+ uint64_t *io_u_plat;
struct io_u_plat_entry *dst;
/*
* located in iolog.c after printing this sample to the
* log file.
*/
- io_u_plat = (unsigned int *) td->ts.io_u_plat[ddir];
+ io_u_plat = (uint64_t *) td->ts.io_u_plat[ddir];
dst = malloc(sizeof(struct io_u_plat_entry));
memcpy(&(dst->io_u_plat), io_u_plat,
FIO_IO_U_PLAT_NR * sizeof(unsigned int));
{
unsigned long spent, rate;
enum fio_ddir ddir;
- unsigned int next, next_log;
+ unsigned long next, next_log;
next_log = avg_time;
void stat_init(void)
{
- stat_mutex = fio_mutex_init(FIO_MUTEX_UNLOCKED);
+ stat_sem = fio_sem_init(FIO_SEM_UNLOCKED);
}
void stat_exit(void)
* When we have the mutex, we know out-of-band access to it
* have ended.
*/
- fio_mutex_down(stat_mutex);
- fio_mutex_remove(stat_mutex);
+ fio_sem_down(stat_sem);
+ fio_sem_remove(stat_sem);
}
/*