static struct itimerval itimer;
static struct list_head disk_list = LIST_HEAD_INIT(disk_list);
+static dev_t last_dev;
+
+/*
+ * Cheasy number->string conversion, complete with carry rounding error.
+ */
+static char *num2str(unsigned long num, int maxlen, int base)
+{
+ /*
+ * could be passed in for 10^3 base, but every caller expects
+ * 2^10 base right now.
+ */
+ const unsigned int thousand = 1024;
+ char postfix[] = { 'K', 'M', 'G', 'P', 'E' };
+ char *buf;
+ int i;
+
+ buf = malloc(128);
+
+ for (i = 0; base > 1; i++)
+ base /= thousand;
+
+ do {
+ int len, carry = 0;
+
+ len = sprintf(buf, "%'lu", num);
+ if (len <= maxlen) {
+ buf[len] = postfix[i];
+ buf[len + 1] = '\0';
+ return buf;
+ }
+
+ if ((num % thousand) >= (thousand / 2))
+ carry = 1;
+
+ num /= thousand;
+ num += carry;
+ i++;
+ } while (i <= 5);
+
+ return buf;
+}
static int get_io_ticks(struct disk_util *du, struct disk_util_stat *dus)
{
dev_t dev;
char *p;
- if (!td->do_disk_util || (td->io_ops->flags & FIO_NETIO))
+ if (!td->do_disk_util || (td->io_ops->flags & FIO_DISKLESSIO))
return;
/*
if (disk_util_exists(dev))
return;
+
+ /*
+ * for an fs without a device, we will repeatedly stat through
+ * sysfs which can take oodles of time for thousands of files. so
+ * cache the last lookup and compare with that before going through
+ * everything again.
+ */
+ if (dev == last_dev)
+ return;
+
+ last_dev = dev;
sprintf(foo, "/sys/block");
if (!find_block_dir(dev, foo))
void update_rusage_stat(struct thread_data *td)
{
- getrusage(RUSAGE_SELF, &td->ru_end);
+ struct thread_stat *ts = &td->ts;
- td->usr_time += mtime_since(&td->ru_start.ru_utime, &td->ru_end.ru_utime);
- td->sys_time += mtime_since(&td->ru_start.ru_stime, &td->ru_end.ru_stime);
- td->ctx += td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw - (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
+ getrusage(RUSAGE_SELF, &ts->ru_end);
+
+ ts->usr_time += mtime_since(&ts->ru_start.ru_utime, &ts->ru_end.ru_utime);
+ ts->sys_time += mtime_since(&ts->ru_start.ru_stime, &ts->ru_end.ru_stime);
+ ts->ctx += ts->ru_end.ru_nvcsw + ts->ru_end.ru_nivcsw - (ts->ru_start.ru_nvcsw + ts->ru_start.ru_nivcsw);
- memcpy(&td->ru_start, &td->ru_end, sizeof(td->ru_end));
+ memcpy(&ts->ru_start, &ts->ru_end, sizeof(ts->ru_end));
}
static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
static void show_group_stats(struct group_run_stats *rs, int id)
{
+ char *p1, *p2, *p3, *p4;
+ const char *ddir_str[] = { " READ", " WRITE" };
+ int i;
+
fprintf(f_out, "\nRun status group %d (all jobs):\n", id);
- if (rs->max_run[DDIR_READ])
- fprintf(f_out, " READ: io=%lluMiB, aggrb=%llu, minb=%llu, maxb=%llu, mint=%llumsec, maxt=%llumsec\n", rs->io_kb[0] >> 10, rs->agg[0], rs->min_bw[0], rs->max_bw[0], rs->min_run[0], rs->max_run[0]);
- if (rs->max_run[DDIR_WRITE])
- fprintf(f_out, " WRITE: io=%lluMiB, aggrb=%llu, minb=%llu, maxb=%llu, mint=%llumsec, maxt=%llumsec\n", rs->io_kb[1] >> 10, rs->agg[1], rs->min_bw[1], rs->max_bw[1], rs->min_run[1], rs->max_run[1]);
+ for (i = 0; i <= DDIR_WRITE; i++) {
+ if (!rs->max_run[i])
+ continue;
+
+ p1 = num2str(rs->io_kb[i], 6, 1);
+ p2 = num2str(rs->agg[i], 6, 1);
+ p3 = num2str(rs->min_bw[i], 6, 1);
+ p4 = num2str(rs->max_bw[i], 6, 1);
+
+ fprintf(f_out, "%s: io=%siB, aggrb=%siB/s, minb=%siB/s, maxb=%siB/s, mint=%llumsec, maxt=%llumsec\n", ddir_str[i], p1, p2, p3, p4, rs->min_run[0], rs->max_run[0]);
+
+ free(p1);
+ free(p2);
+ free(p3);
+ free(p4);
+ }
}
static void show_disk_util(void)
int ddir)
{
const char *ddir_str[] = { "read ", "write" };
+ struct thread_stat *ts;
unsigned long min, max;
unsigned long long bw;
double mean, dev;
+ char *io_p, *bw_p;
if (!td->runtime[ddir])
return;
bw = td->io_bytes[ddir] / td->runtime[ddir];
- fprintf(f_out, " %s: io=%6lluMiB, bw=%6lluKiB/s, runt=%6lumsec\n", ddir_str[ddir], td->io_bytes[ddir] >> 20, bw, td->runtime[ddir]);
+ io_p = num2str(td->io_bytes[ddir] >> 10, 6, 1);
+ bw_p = num2str(bw, 6, 1);
- if (calc_lat(&td->slat_stat[ddir], &min, &max, &mean, &dev))
+ fprintf(f_out, " %s: io=%siB, bw=%siB/s, runt=%6lumsec\n", ddir_str[ddir], io_p, bw_p, td->runtime[ddir]);
+
+ free(io_p);
+ free(bw_p);
+
+ ts = &td->ts;
+ if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
fprintf(f_out, " slat (msec): min=%5lu, max=%5lu, avg=%5.02f, stdev=%5.02f\n", min, max, mean, dev);
- if (calc_lat(&td->clat_stat[ddir], &min, &max, &mean, &dev))
+ if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
fprintf(f_out, " clat (msec): min=%5lu, max=%5lu, avg=%5.02f, stdev=%5.02f\n", min, max, mean, dev);
- if (calc_lat(&td->bw_stat[ddir], &min, &max, &mean, &dev)) {
+ if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
double p_of_agg;
p_of_agg = mean * 100 / (double) rs->agg[ddir];
double usr_cpu, sys_cpu;
unsigned long runtime;
double io_u_dist[FIO_IO_U_MAP_NR];
+ double io_u_lat[FIO_IO_U_LAT_NR];
int i;
- if (!(td->io_bytes[0] + td->io_bytes[1]) && !td->error)
+ if (!(td->io_bytes[0] + td->io_bytes[1]))
return;
- fprintf(f_out, "%s: (groupid=%d): err=%2d: pid=%d\n",td->name, td->groupid, td->error, td->pid);
+ if (!td->error)
+ fprintf(f_out, "%s: (groupid=%d): err=%2d: pid=%d\n",td->name, td->groupid, td->error, td->pid);
+ else
+ fprintf(f_out, "%s: (groupid=%d): err=%2d (%s): pid=%d\n",td->name, td->groupid, td->error, td->verror, td->pid);
- show_ddir_status(td, rs, td->ddir);
- if (td->io_bytes[td->ddir ^ 1])
- show_ddir_status(td, rs, td->ddir ^ 1);
+ if (td_read(td))
+ show_ddir_status(td, rs, DDIR_READ);
+ if (td_write(td))
+ show_ddir_status(td, rs, DDIR_WRITE);
runtime = mtime_since(&td->epoch, &td->end_time);
if (runtime) {
double runt = (double) runtime;
- usr_cpu = (double) td->usr_time * 100 / runt;
- sys_cpu = (double) td->sys_time * 100 / runt;
+ usr_cpu = (double) td->ts.usr_time * 100 / runt;
+ sys_cpu = (double) td->ts.sys_time * 100 / runt;
} else {
usr_cpu = 0;
sys_cpu = 0;
}
- fprintf(f_out, " cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, td->ctx);
+ fprintf(f_out, " cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, td->ts.ctx);
/*
* Do depth distribution calculations
fprintf(f_out, " IO depths : 1=%3.1f%%, 2=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%, 32=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3], io_u_dist[4], io_u_dist[5], io_u_dist[6]);
+ /*
+ * Do latency distribution calculations
+ */
+ for (i = 0; i < FIO_IO_U_LAT_NR; i++) {
+ io_u_lat[i] = (double) td->io_u_lat[i] / (double) td->total_io_u;
+ io_u_lat[i] *= 100.0;
+ }
+
+ fprintf(f_out, " lat (msec): 2=%3.1f%%, 4=%3.1f%%, 10=%3.1f%%, 20=%3.1f%%, 50=%3.1f%%, 100=%3.1f%%\n", io_u_lat[0], io_u_lat[1], io_u_lat[2], io_u_lat[3], io_u_lat[4], io_u_lat[5]);
+ fprintf(f_out, " lat (msec): 250=%3.1f%%, 500=%3.1f%%, 750=%3.1f%%, 1000=%3.1f%%, >=2000=%3.1f%%\n", io_u_lat[6], io_u_lat[7], io_u_lat[8], io_u_lat[9], io_u_lat[10]);
+
if (td->description)
fprintf(f_out, "%s\n", td->description);
}
static void show_ddir_status_terse(struct thread_data *td,
struct group_run_stats *rs, int ddir)
{
+ struct thread_stat *ts = &td->ts;
unsigned long min, max;
unsigned long long bw;
double mean, dev;
fprintf(f_out, ",%llu,%llu,%lu", td->io_bytes[ddir] >> 10, bw, td->runtime[ddir]);
- if (calc_lat(&td->slat_stat[ddir], &min, &max, &mean, &dev))
+ if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
fprintf(f_out, ",%lu,%lu,%f,%f", min, max, mean, dev);
else
fprintf(f_out, ",%lu,%lu,%f,%f", 0UL, 0UL, 0.0, 0.0);
- if (calc_lat(&td->clat_stat[ddir], &min, &max, &mean, &dev))
+ if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
fprintf(f_out, ",%lu,%lu,%f,%f", min, max, mean, dev);
else
fprintf(f_out, ",%lu,%lu,%f,%f", 0UL, 0UL, 0.0, 0.0);
- if (calc_lat(&td->bw_stat[ddir], &min, &max, &mean, &dev)) {
+ if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
double p_of_agg;
p_of_agg = mean * 100 / (double) rs->agg[ddir];
if (td->runtime[0] + td->runtime[1]) {
double runt = (double) (td->runtime[0] + td->runtime[1]);
- usr_cpu = (double) td->usr_time * 100 / runt;
- sys_cpu = (double) td->sys_time * 100 / runt;
+ usr_cpu = (double) td->ts.usr_time * 100 / runt;
+ sys_cpu = (double) td->ts.sys_time * 100 / runt;
} else {
usr_cpu = 0;
sys_cpu = 0;
}
- fprintf(f_out, ",%f%%,%f%%,%lu\n", usr_cpu, sys_cpu, td->ctx);
+ fprintf(f_out, ",%f%%,%f%%,%lu\n", usr_cpu, sys_cpu, td->ts.ctx);
}
void show_run_stats(void)
for_each_td(td, i) {
unsigned long long rbw, wbw;
- if (td->error) {
- fprintf(f_out, "%s: %s\n", td->name, td->verror);
- continue;
- }
-
rs = &runstats[td->groupid];
if (td->runtime[0] < rs->min_run[0] || !rs->min_run[0])
void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
unsigned long msec)
{
- add_stat_sample(&td->clat_stat[ddir], msec);
+ struct thread_stat *ts = &td->ts;
+
+ add_stat_sample(&ts->clat_stat[ddir], msec);
- if (td->clat_log)
- add_log_sample(td, td->clat_log, msec, ddir);
+ if (ts->clat_log)
+ add_log_sample(td, ts->clat_log, msec, ddir);
}
void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
unsigned long msec)
{
- add_stat_sample(&td->slat_stat[ddir], msec);
+ struct thread_stat *ts = &td->ts;
+
+ add_stat_sample(&ts->slat_stat[ddir], msec);
- if (td->slat_log)
- add_log_sample(td, td->slat_log, msec, ddir);
+ if (ts->slat_log)
+ add_log_sample(td, ts->slat_log, msec, ddir);
}
void add_bw_sample(struct thread_data *td, enum fio_ddir ddir,
struct timeval *t)
{
- unsigned long spent = mtime_since(&td->stat_sample_time[ddir], t);
+ struct thread_stat *ts = &td->ts;
+ unsigned long spent = mtime_since(&ts->stat_sample_time[ddir], t);
unsigned long rate;
if (spent < td->bw_avg_time)
return;
- rate = (td->this_io_bytes[ddir] - td->stat_io_bytes[ddir]) / spent;
- add_stat_sample(&td->bw_stat[ddir], rate);
+ rate = (td->this_io_bytes[ddir] - ts->stat_io_bytes[ddir]) / spent;
+ add_stat_sample(&ts->bw_stat[ddir], rate);
- if (td->bw_log)
- add_log_sample(td, td->bw_log, rate, ddir);
+ if (ts->bw_log)
+ add_log_sample(td, ts->bw_log, rate, ddir);
- fio_gettime(&td->stat_sample_time[ddir], NULL);
- td->stat_io_bytes[ddir] = td->this_io_bytes[ddir];
+ fio_gettime(&ts->stat_sample_time[ddir], NULL);
+ ts->stat_io_bytes[ddir] = td->this_io_bytes[ddir];
}
-
-