/*
* Cheesy number->string conversion, complete with carry rounding error.
*/
-static char *num2str(unsigned long num, int maxlen, int base)
+static char *num2str(unsigned long num, int maxlen, int base, int pow2)
{
- /*
- * could be passed in for 10^3 base, but every caller expects
- * 2^10 base right now.
- */
- const unsigned int thousand = 1024;
- char postfix[] = { 'K', 'M', 'G', 'P', 'E' };
+ char postfix[] = { ' ', 'K', 'M', 'G', 'P', 'E' };
+ unsigned int thousand;
char *buf;
int i;
+ if (pow2)
+ thousand = 1024;
+ else
+ thousand = 1000;
+
buf = malloc(128);
for (i = 0; base > 1; i++)
len = sprintf(buf, "%'lu", num);
if (len <= maxlen) {
- buf[len] = postfix[i];
- buf[len + 1] = '\0';
+ if (i >= 1) {
+ buf[len] = postfix[i];
+ buf[len + 1] = '\0';
+ }
return buf;
}
return found;
}
-void init_disk_util(struct thread_data *td)
+static void __init_disk_util(struct thread_data *td, struct fio_file *f)
{
- struct fio_file *f;
struct stat st;
char foo[PATH_MAX], tmp[PATH_MAX];
dev_t dev;
char *p;
- if (!td->do_disk_util || (td->io_ops->flags & FIO_DISKLESSIO))
- return;
-
- /*
- * Just use the same file, they are on the same device.
- */
- f = &td->files[0];
if (!stat(f->file_name, &st)) {
if (S_ISBLK(st.st_mode))
dev = st.st_rdev;
sprintf(foo, "%s", tmp);
}
- if (td->ioscheduler)
+ if (td->ioscheduler && !td->sysfs_root)
td->sysfs_root = strdup(foo);
disk_util_add(dev, foo);
}
+void init_disk_util(struct thread_data *td)
+{
+ struct fio_file *f;
+ unsigned int i;
+
+ if (!td->do_disk_util ||
+ (td->io_ops->flags & (FIO_DISKLESSIO | FIO_NODISKUTIL)))
+ return;
+
+ for_each_file(td, f, i)
+ __init_disk_util(td, f);
+}
+
void disk_util_timer_arm(void)
{
itimer.it_value.tv_sec = 0;
void update_rusage_stat(struct thread_data *td)
{
- struct thread_stat *ts = td->ts;
+ struct thread_stat *ts = &td->ts;
getrusage(RUSAGE_SELF, &ts->ru_end);
if (!rs->max_run[i])
continue;
- p1 = num2str(rs->io_kb[i], 6, 1);
- p2 = num2str(rs->agg[i], 6, 1);
- p3 = num2str(rs->min_bw[i], 6, 1);
- p4 = num2str(rs->max_bw[i], 6, 1);
+ p1 = num2str(rs->io_kb[i], 6, 1000, 1);
+ p2 = num2str(rs->agg[i], 6, 1000, 1);
+ p3 = num2str(rs->min_bw[i], 6, 1000, 1);
+ p4 = num2str(rs->max_bw[i], 6, 1000, 1);
- fprintf(f_out, "%s: io=%siB, aggrb=%siB/s, minb=%siB/s, maxb=%siB/s, mint=%llumsec, maxt=%llumsec\n", ddir_str[i], p1, p2, p3, p4, rs->min_run[0], rs->max_run[0]);
+ fprintf(f_out, "%s: io=%siB, aggrb=%siB/s, minb=%siB/s, maxb=%siB/s, mint=%llumsec, maxt=%llumsec\n", ddir_str[i], p1, p2, p3, p4, rs->min_run[i], rs->max_run[i]);
free(p1);
free(p2);
}
}
-static void show_ddir_status(struct thread_data *td, struct group_run_stats *rs,
+#define ts_total_io_u(ts) \
+ ((ts)->total_io_u[0] + (ts)->total_io_u[1])
+
+static void stat_calc_dist(struct thread_stat *ts, double *io_u_dist)
+{
+ int i;
+
+ /*
+ * Do depth distribution calculations
+ */
+ for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
+ io_u_dist[i] = (double) ts->io_u_map[i] / (double) ts_total_io_u(ts);
+ io_u_dist[i] *= 100.0;
+ }
+}
+
+static void stat_calc_lat(struct thread_stat *ts, double *io_u_lat)
+{
+ int i;
+
+ /*
+ * Do latency distribution calculations
+ */
+ for (i = 0; i < FIO_IO_U_LAT_NR; i++) {
+ io_u_lat[i] = (double) ts->io_u_lat[i] / (double) ts_total_io_u(ts);
+ io_u_lat[i] *= 100.0;
+ }
+}
+
+static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
int ddir)
{
const char *ddir_str[] = { "read ", "write" };
- struct thread_stat *ts;
unsigned long min, max;
- unsigned long long bw;
+ unsigned long long bw, iops;
double mean, dev;
- char *io_p, *bw_p;
+ char *io_p, *bw_p, *iops_p;
- if (!td->runtime[ddir])
+ if (!ts->runtime[ddir])
return;
- bw = td->io_bytes[ddir] / td->runtime[ddir];
- io_p = num2str(td->io_bytes[ddir] >> 10, 6, 1);
- bw_p = num2str(bw, 6, 1);
+ bw = ts->io_bytes[ddir] / ts->runtime[ddir];
+ iops = (1000 * ts->total_io_u[ddir]) / ts->runtime[ddir];
+ io_p = num2str(ts->io_bytes[ddir] >> 10, 6, 1000, 1);
+ bw_p = num2str(bw, 6, 1000, 1);
+ iops_p = num2str(iops, 6, 1, 0);
- fprintf(f_out, " %s: io=%siB, bw=%siB/s, runt=%6lumsec\n", ddir_str[ddir], io_p, bw_p, td->runtime[ddir]);
+ fprintf(f_out, " %s: io=%siB, bw=%siB/s, iops=%s, runt=%6lumsec\n", ddir_str[ddir], io_p, bw_p, iops_p, ts->runtime[ddir]);
free(io_p);
free(bw_p);
+ free(iops_p);
- ts = td->ts;
if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
fprintf(f_out, " slat (msec): min=%5lu, max=%5lu, avg=%5.02f, stdev=%5.02f\n", min, max, mean, dev);
}
}
-static void show_thread_status(struct thread_data *td,
+static void show_thread_status(struct thread_stat *ts,
struct group_run_stats *rs)
{
double usr_cpu, sys_cpu;
unsigned long runtime;
double io_u_dist[FIO_IO_U_MAP_NR];
double io_u_lat[FIO_IO_U_LAT_NR];
- int i;
- if (!(td->io_bytes[0] + td->io_bytes[1]))
+ if (!(ts->io_bytes[0] + ts->io_bytes[1]))
return;
- if (!td->error)
- fprintf(f_out, "%s: (groupid=%d): err=%2d: pid=%d\n",td->name, td->groupid, td->error, td->pid);
+ if (!ts->error)
+ fprintf(f_out, "%s: (groupid=%d, jobs=%d): err=%2d: pid=%d\n", ts->name, ts->groupid, ts->members, ts->error, ts->pid);
else
- fprintf(f_out, "%s: (groupid=%d): err=%2d (%s): pid=%d\n",td->name, td->groupid, td->error, td->verror, td->pid);
+ fprintf(f_out, "%s: (groupid=%d, jobs=%d): err=%2d (%s): pid=%d\n", ts->name, ts->groupid, ts->members, ts->error, ts->verror, ts->pid);
+
+ if (ts->description)
+ fprintf(f_out, " Description : [%s]\n", ts->description);
- if (td_read(td))
- show_ddir_status(td, rs, DDIR_READ);
- if (td_write(td))
- show_ddir_status(td, rs, DDIR_WRITE);
+ if (ts->io_bytes[DDIR_READ])
+ show_ddir_status(rs, ts, DDIR_READ);
+ if (ts->io_bytes[DDIR_WRITE])
+ show_ddir_status(rs, ts, DDIR_WRITE);
- runtime = mtime_since(&td->epoch, &td->end_time);
+ runtime = ts->total_run_time;
if (runtime) {
double runt = (double) runtime;
- usr_cpu = (double) td->ts->usr_time * 100 / runt;
- sys_cpu = (double) td->ts->sys_time * 100 / runt;
+ usr_cpu = (double) ts->usr_time * 100 / runt;
+ sys_cpu = (double) ts->sys_time * 100 / runt;
} else {
usr_cpu = 0;
sys_cpu = 0;
}
- fprintf(f_out, " cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, td->ts->ctx);
+ fprintf(f_out, " cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, ts->ctx);
- /*
- * Do depth distribution calculations
- */
- for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
- io_u_dist[i] = (double) td->ts->io_u_map[i] / (double) td->ts->total_io_u;
- io_u_dist[i] *= 100.0;
- }
+ stat_calc_dist(ts, io_u_dist);
+ stat_calc_lat(ts, io_u_lat);
fprintf(f_out, " IO depths : 1=%3.1f%%, 2=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%, 32=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3], io_u_dist[4], io_u_dist[5], io_u_dist[6]);
- /*
- * Do latency distribution calculations
- */
- for (i = 0; i < FIO_IO_U_LAT_NR; i++) {
- io_u_lat[i] = (double) td->ts->io_u_lat[i] / (double) td->ts->total_io_u;
- io_u_lat[i] *= 100.0;
- }
-
fprintf(f_out, " lat (msec): 2=%3.1f%%, 4=%3.1f%%, 10=%3.1f%%, 20=%3.1f%%, 50=%3.1f%%, 100=%3.1f%%\n", io_u_lat[0], io_u_lat[1], io_u_lat[2], io_u_lat[3], io_u_lat[4], io_u_lat[5]);
fprintf(f_out, " lat (msec): 250=%3.1f%%, 500=%3.1f%%, 750=%3.1f%%, 1000=%3.1f%%, >=2000=%3.1f%%\n", io_u_lat[6], io_u_lat[7], io_u_lat[8], io_u_lat[9], io_u_lat[10]);
-
- if (td->description)
- fprintf(f_out, "%s\n", td->description);
}
-static void show_ddir_status_terse(struct thread_data *td,
+static void show_ddir_status_terse(struct thread_stat *ts,
struct group_run_stats *rs, int ddir)
{
- struct thread_stat *ts = td->ts;
unsigned long min, max;
unsigned long long bw;
double mean, dev;
bw = 0;
- if (td->runtime[ddir])
- bw = td->io_bytes[ddir] / td->runtime[ddir];
+ if (ts->runtime[ddir])
+ bw = ts->io_bytes[ddir] / ts->runtime[ddir];
- fprintf(f_out, ",%llu,%llu,%lu", td->io_bytes[ddir] >> 10, bw, td->runtime[ddir]);
+ fprintf(f_out, ";%llu;%llu;%lu", ts->io_bytes[ddir] >> 10, bw, ts->runtime[ddir]);
if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
- fprintf(f_out, ",%lu,%lu,%f,%f", min, max, mean, dev);
+ fprintf(f_out, ";%lu;%lu;%f;%f", min, max, mean, dev);
else
- fprintf(f_out, ",%lu,%lu,%f,%f", 0UL, 0UL, 0.0, 0.0);
+ fprintf(f_out, ";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
- fprintf(f_out, ",%lu,%lu,%f,%f", min, max, mean, dev);
+ fprintf(f_out, ";%lu;%lu;%f;%f", min, max, mean, dev);
else
- fprintf(f_out, ",%lu,%lu,%f,%f", 0UL, 0UL, 0.0, 0.0);
+ fprintf(f_out, ";%lu;%lu;%f;%f", 0UL, 0UL, 0.0, 0.0);
if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
double p_of_agg;
p_of_agg = mean * 100 / (double) rs->agg[ddir];
- fprintf(f_out, ",%lu,%lu,%f%%,%f,%f", min, max, p_of_agg, mean, dev);
+ fprintf(f_out, ";%lu;%lu;%f%%;%f;%f", min, max, p_of_agg, mean, dev);
} else
- fprintf(f_out, ",%lu,%lu,%f%%,%f,%f", 0UL, 0UL, 0.0, 0.0, 0.0);
-
+ fprintf(f_out, ";%lu;%lu;%f%%;%f;%f", 0UL, 0UL, 0.0, 0.0, 0.0);
}
-static void show_thread_status_terse(struct thread_data *td,
+static void show_thread_status_terse(struct thread_stat *ts,
struct group_run_stats *rs)
{
+ double io_u_dist[FIO_IO_U_MAP_NR];
+ double io_u_lat[FIO_IO_U_LAT_NR];
double usr_cpu, sys_cpu;
- fprintf(f_out, "%s,%d,%d",td->name, td->groupid, td->error);
+ fprintf(f_out, "%s;%d;%d", ts->name, ts->groupid, ts->error);
- show_ddir_status_terse(td, rs, 0);
- show_ddir_status_terse(td, rs, 1);
+ show_ddir_status_terse(ts, rs, 0);
+ show_ddir_status_terse(ts, rs, 1);
- if (td->runtime[0] + td->runtime[1]) {
- double runt = (double) (td->runtime[0] + td->runtime[1]);
+ if (ts->total_run_time) {
+ double runt = (double) ts->total_run_time;
- usr_cpu = (double) td->ts->usr_time * 100 / runt;
- sys_cpu = (double) td->ts->sys_time * 100 / runt;
+ usr_cpu = (double) ts->usr_time * 100 / runt;
+ sys_cpu = (double) ts->sys_time * 100 / runt;
} else {
usr_cpu = 0;
sys_cpu = 0;
}
- fprintf(f_out, ",%f%%,%f%%,%lu\n", usr_cpu, sys_cpu, td->ts->ctx);
+ fprintf(f_out, ";%f%%;%f%%;%lu", usr_cpu, sys_cpu, ts->ctx);
+
+ stat_calc_dist(ts, io_u_dist);
+ stat_calc_lat(ts, io_u_lat);
+
+ fprintf(f_out, ";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%", io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3], io_u_dist[4], io_u_dist[5], io_u_dist[6]);
+
+ fprintf(f_out, ";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%\n", io_u_lat[0], io_u_lat[1], io_u_lat[2], io_u_lat[3], io_u_lat[4], io_u_lat[5]);
+ fprintf(f_out, ";%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%;%3.1f%%", io_u_lat[6], io_u_lat[7], io_u_lat[8], io_u_lat[9], io_u_lat[10]);
+
+ if (ts->description)
+ fprintf(f_out, ";%s", ts->description);
+
+ fprintf(f_out, "\n");
+}
+
+static void sum_stat(struct io_stat *dst, struct io_stat *src, int nr)
+{
+ double mean, S;
+
+ dst->min_val = min(dst->min_val, src->min_val);
+ dst->max_val = max(dst->max_val, src->max_val);
+ dst->samples += src->samples;
+
+ /*
+ * Needs a new method for calculating stddev, we cannot just
+ * average them we do below for nr > 1
+ */
+ if (nr == 1) {
+ mean = src->mean;
+ S = src->S;
+ } else {
+ mean = ((src->mean * (double) (nr - 1)) + dst->mean) / ((double) nr);
+ S = ((src->S * (double) (nr - 1)) + dst->S) / ((double) nr);
+ }
+
+ dst->mean = mean;
+ dst->S = S;
}
void show_run_stats(void)
{
struct group_run_stats *runstats, *rs;
struct thread_data *td;
- int i;
+ struct thread_stat *threadstats, *ts;
+ int i, j, k, l, nr_ts, last_ts, idx;
runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
rs->min_bw[1] = rs->min_run[1] = ~0UL;
}
+ /*
+ * find out how many threads stats we need. if group reporting isn't
+ * enabled, it's one-per-td.
+ */
+ nr_ts = 0;
+ last_ts = -1;
+ for_each_td(td, i) {
+ if (!td->group_reporting) {
+ nr_ts++;
+ continue;
+ }
+ if (last_ts == td->groupid)
+ continue;
+
+ last_ts = td->groupid;
+ nr_ts++;
+ }
+
+ threadstats = malloc(nr_ts * sizeof(struct thread_stat));
+
+ for (i = 0; i < nr_ts; i++) {
+ ts = &threadstats[i];
+
+ memset(ts, 0, sizeof(*ts));
+ for (j = 0; j <= DDIR_WRITE; j++) {
+ ts->clat_stat[j].min_val = -1UL;
+ ts->slat_stat[j].min_val = -1UL;
+ ts->bw_stat[j].min_val = -1UL;
+ }
+ ts->groupid = -1;
+ }
+
+ j = 0;
+ last_ts = -1;
+ idx = 0;
for_each_td(td, i) {
- unsigned long long rbw, wbw;
-
- rs = &runstats[td->groupid];
-
- if (td->runtime[0] < rs->min_run[0] || !rs->min_run[0])
- rs->min_run[0] = td->runtime[0];
- if (td->runtime[0] > rs->max_run[0])
- rs->max_run[0] = td->runtime[0];
- if (td->runtime[1] < rs->min_run[1] || !rs->min_run[1])
- rs->min_run[1] = td->runtime[1];
- if (td->runtime[1] > rs->max_run[1])
- rs->max_run[1] = td->runtime[1];
-
- rbw = wbw = 0;
- if (td->runtime[0])
- rbw = td->io_bytes[0] / (unsigned long long) td->runtime[0];
- if (td->runtime[1])
- wbw = td->io_bytes[1] / (unsigned long long) td->runtime[1];
-
- if (rbw < rs->min_bw[0])
- rs->min_bw[0] = rbw;
- if (wbw < rs->min_bw[1])
- rs->min_bw[1] = wbw;
- if (rbw > rs->max_bw[0])
- rs->max_bw[0] = rbw;
- if (wbw > rs->max_bw[1])
- rs->max_bw[1] = wbw;
-
- rs->io_kb[0] += td->io_bytes[0] >> 10;
- rs->io_kb[1] += td->io_bytes[1] >> 10;
+ if (idx && (!td->group_reporting ||
+ (td->group_reporting && last_ts != td->groupid))) {
+ idx = 0;
+ j++;
+ }
+
+ last_ts = td->groupid;
+
+ ts = &threadstats[j];
+
+ idx++;
+ ts->members++;
+
+ if (ts->groupid == -1) {
+ /*
+ * These are per-group shared already
+ */
+ ts->name = td->name;
+ ts->description = td->description;
+ ts->groupid = td->groupid;
+
+ /*
+ * first pid in group, not very useful...
+ */
+ ts->pid = td->pid;
+ }
+
+ if (td->error && !ts->error) {
+ ts->error = td->error;
+ ts->verror = td->verror;
+ }
+
+ for (l = 0; l <= DDIR_WRITE; l++) {
+ sum_stat(&ts->clat_stat[l], &td->ts.clat_stat[l], idx);
+ sum_stat(&ts->slat_stat[l], &td->ts.slat_stat[l], idx);
+ sum_stat(&ts->bw_stat[l], &td->ts.bw_stat[l], idx);
+
+ ts->stat_io_bytes[l] += td->ts.stat_io_bytes[l];
+ ts->io_bytes[l] += td->ts.io_bytes[l];
+
+ if (ts->runtime[l] < td->ts.runtime[l])
+ ts->runtime[l] = td->ts.runtime[l];
+ }
+
+ ts->usr_time += td->ts.usr_time;
+ ts->sys_time += td->ts.sys_time;
+ ts->ctx += td->ts.ctx;
+
+ for (k = 0; k < FIO_IO_U_MAP_NR; k++)
+ ts->io_u_map[k] += td->ts.io_u_map[k];
+ for (k = 0; k < FIO_IO_U_LAT_NR; k++)
+ ts->io_u_lat[k] += td->ts.io_u_lat[k];
+
+ for (k = 0; k <= DDIR_WRITE; k++)
+ ts->total_io_u[k] += td->ts.total_io_u[k];
+
+ ts->total_run_time += td->ts.total_run_time;
+ }
+
+ for (i = 0; i < nr_ts; i++) {
+ unsigned long long bw;
+
+ ts = &threadstats[i];
+ rs = &runstats[ts->groupid];
+
+ for (j = 0; j <= DDIR_WRITE; j++) {
+ if (!ts->runtime[j])
+ continue;
+ if (ts->runtime[j] < rs->min_run[j] || !rs->min_run[j])
+ rs->min_run[j] = ts->runtime[j];
+ if (ts->runtime[j] > rs->max_run[j])
+ rs->max_run[j] = ts->runtime[j];
+
+ bw = 0;
+ if (ts->runtime[j])
+ bw = ts->io_bytes[j] / (unsigned long long) ts->runtime[j];
+ if (bw < rs->min_bw[j])
+ rs->min_bw[j] = bw;
+ if (bw > rs->max_bw[j])
+ rs->max_bw[j] = bw;
+
+ rs->io_kb[j] += ts->io_bytes[j] >> 10;
+ }
}
for (i = 0; i < groupid + 1; i++) {
if (!terse_output)
printf("\n");
- for_each_td(td, i) {
- rs = &runstats[td->groupid];
+ for (i = 0; i < nr_ts; i++) {
+ ts = &threadstats[i];
+ rs = &runstats[ts->groupid];
if (terse_output)
- show_thread_status_terse(td, rs);
+ show_thread_status_terse(ts, rs);
else
- show_thread_status(td, rs);
+ show_thread_status(ts, rs);
}
if (!terse_output) {
}
free(runstats);
+ free(threadstats);
}
static inline void add_stat_sample(struct io_stat *is, unsigned long data)
{
double val = data;
- double delta, n;
+ double delta;
if (data > is->max_val)
is->max_val = data;
is->min_val = data;
delta = val - is->mean;
- n = is->samples + 1.0;
- is->mean += delta / n;
+ is->mean += delta / (is->samples + 1.0);
is->S += delta * (val - is->mean);
is->samples++;
void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
unsigned long msec)
{
- struct thread_stat *ts = td->ts;
+ struct thread_stat *ts = &td->ts;
add_stat_sample(&ts->clat_stat[ddir], msec);
void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
unsigned long msec)
{
- struct thread_stat *ts = td->ts;
+ struct thread_stat *ts = &td->ts;
add_stat_sample(&ts->slat_stat[ddir], msec);
void add_bw_sample(struct thread_data *td, enum fio_ddir ddir,
struct timeval *t)
{
- struct thread_stat *ts = td->ts;
+ struct thread_stat *ts = &td->ts;
unsigned long spent = mtime_since(&ts->stat_sample_time[ddir], t);
unsigned long rate;