static struct itimerval itimer;
static struct list_head disk_list = LIST_HEAD_INIT(disk_list);
+static dev_t last_dev;
+
+/*
+ * Cheesy number->string conversion, complete with carry rounding error.
+ */
+static char *num2str(unsigned long num, int maxlen, int base)
+{
+ /*
+ * could be passed in for 10^3 base, but every caller expects
+ * 2^10 base right now.
+ */
+ const unsigned int thousand = 1024;
+ char postfix[] = { 'K', 'M', 'G', 'P', 'E' };
+ char *buf;
+ int i;
+
+ buf = malloc(128);
+
+ for (i = 0; base > 1; i++)
+ base /= thousand;
+
+ do {
+ int len, carry = 0;
+
+ len = sprintf(buf, "%'lu", num);
+ if (len <= maxlen) {
+ buf[len] = postfix[i];
+ buf[len + 1] = '\0';
+ return buf;
+ }
+
+ if ((num % thousand) >= (thousand / 2))
+ carry = 1;
+
+ num /= thousand;
+ num += carry;
+ i++;
+ } while (i <= 5);
+
+ return buf;
+}
static int get_io_ticks(struct disk_util *du, struct disk_util_stat *dus)
{
dev_t dev;
char *p;
- if (!td->do_disk_util)
+ if (!td->do_disk_util || (td->io_ops->flags & FIO_DISKLESSIO))
return;
/*
if (disk_util_exists(dev))
return;
+
+ /*
+ * for an fs without a device, we will repeatedly stat through
+ * sysfs which can take oodles of time for thousands of files. so
+ * cache the last lookup and compare with that before going through
+ * everything again.
+ */
+ if (dev == last_dev)
+ return;
+
+ last_dev = dev;
sprintf(foo, "/sys/block");
if (!find_block_dir(dev, foo))
void update_rusage_stat(struct thread_data *td)
{
- getrusage(RUSAGE_SELF, &td->ru_end);
+ struct thread_stat *ts = &td->ts;
- td->usr_time += mtime_since(&td->ru_start.ru_utime, &td->ru_end.ru_utime);
- td->sys_time += mtime_since(&td->ru_start.ru_stime, &td->ru_end.ru_stime);
- td->ctx += td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw - (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
+ getrusage(RUSAGE_SELF, &ts->ru_end);
+
+ ts->usr_time += mtime_since(&ts->ru_start.ru_utime, &ts->ru_end.ru_utime);
+ ts->sys_time += mtime_since(&ts->ru_start.ru_stime, &ts->ru_end.ru_stime);
+ ts->ctx += ts->ru_end.ru_nvcsw + ts->ru_end.ru_nivcsw - (ts->ru_start.ru_nvcsw + ts->ru_start.ru_nivcsw);
- memcpy(&td->ru_start, &td->ru_end, sizeof(td->ru_end));
+ memcpy(&ts->ru_start, &ts->ru_end, sizeof(ts->ru_end));
}
static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
static void show_group_stats(struct group_run_stats *rs, int id)
{
+ char *p1, *p2, *p3, *p4;
+ const char *ddir_str[] = { " READ", " WRITE" };
+ int i;
+
fprintf(f_out, "\nRun status group %d (all jobs):\n", id);
- if (rs->max_run[DDIR_READ])
- fprintf(f_out, " READ: io=%lluMiB, aggrb=%llu, minb=%llu, maxb=%llu, mint=%llumsec, maxt=%llumsec\n", rs->io_kb[0] >> 10, rs->agg[0], rs->min_bw[0], rs->max_bw[0], rs->min_run[0], rs->max_run[0]);
- if (rs->max_run[DDIR_WRITE])
- fprintf(f_out, " WRITE: io=%lluMiB, aggrb=%llu, minb=%llu, maxb=%llu, mint=%llumsec, maxt=%llumsec\n", rs->io_kb[1] >> 10, rs->agg[1], rs->min_bw[1], rs->max_bw[1], rs->min_run[1], rs->max_run[1]);
+ for (i = 0; i <= DDIR_WRITE; i++) {
+ if (!rs->max_run[i])
+ continue;
+
+ p1 = num2str(rs->io_kb[i], 6, 1);
+ p2 = num2str(rs->agg[i], 6, 1);
+ p3 = num2str(rs->min_bw[i], 6, 1);
+ p4 = num2str(rs->max_bw[i], 6, 1);
+
+ fprintf(f_out, "%s: io=%siB, aggrb=%siB/s, minb=%siB/s, maxb=%siB/s, mint=%llumsec, maxt=%llumsec\n", ddir_str[i], p1, p2, p3, p4, rs->min_run[0], rs->max_run[0]);
+
+ free(p1);
+ free(p2);
+ free(p3);
+ free(p4);
+ }
}
static void show_disk_util(void)
}
}
-static void show_ddir_status(struct thread_data *td, struct group_run_stats *rs,
+static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
int ddir)
{
const char *ddir_str[] = { "read ", "write" };
unsigned long min, max;
unsigned long long bw;
double mean, dev;
+ char *io_p, *bw_p;
- if (!td->runtime[ddir])
+ if (!ts->runtime[ddir])
return;
- bw = td->io_bytes[ddir] / td->runtime[ddir];
- fprintf(f_out, " %s: io=%6lluMiB, bw=%6lluKiB/s, runt=%6lumsec\n", ddir_str[ddir], td->io_bytes[ddir] >> 20, bw, td->runtime[ddir]);
+ bw = ts->io_bytes[ddir] / ts->runtime[ddir];
+ io_p = num2str(ts->io_bytes[ddir] >> 10, 6, 1);
+ bw_p = num2str(bw, 6, 1);
+
+ fprintf(f_out, " %s: io=%siB, bw=%siB/s, runt=%6lumsec\n", ddir_str[ddir], io_p, bw_p, ts->runtime[ddir]);
- if (calc_lat(&td->slat_stat[ddir], &min, &max, &mean, &dev))
+ free(io_p);
+ free(bw_p);
+
+ if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
fprintf(f_out, " slat (msec): min=%5lu, max=%5lu, avg=%5.02f, stdev=%5.02f\n", min, max, mean, dev);
- if (calc_lat(&td->clat_stat[ddir], &min, &max, &mean, &dev))
+ if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
fprintf(f_out, " clat (msec): min=%5lu, max=%5lu, avg=%5.02f, stdev=%5.02f\n", min, max, mean, dev);
- if (calc_lat(&td->bw_stat[ddir], &min, &max, &mean, &dev)) {
+ if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
double p_of_agg;
p_of_agg = mean * 100 / (double) rs->agg[ddir];
}
}
-static void show_thread_status(struct thread_data *td,
+static void show_thread_status(struct thread_stat *ts,
struct group_run_stats *rs)
{
double usr_cpu, sys_cpu;
unsigned long runtime;
double io_u_dist[FIO_IO_U_MAP_NR];
+ double io_u_lat[FIO_IO_U_LAT_NR];
int i;
- if (!(td->io_bytes[0] + td->io_bytes[1]) && !td->error)
+ if (!(ts->io_bytes[0] + ts->io_bytes[1]))
return;
- fprintf(f_out, "%s: (groupid=%d): err=%2d:\n",td->name, td->groupid, td->error);
+ if (!ts->error)
+ fprintf(f_out, "%s: (groupid=%d): err=%2d: pid=%d\n", ts->name, ts->groupid, ts->error, ts->pid);
+ else
+ fprintf(f_out, "%s: (groupid=%d): err=%2d (%s): pid=%d\n", ts->name, ts->groupid, ts->error, ts->verror, ts->pid);
- show_ddir_status(td, rs, td->ddir);
- if (td->io_bytes[td->ddir ^ 1])
- show_ddir_status(td, rs, td->ddir ^ 1);
+ if (ts->io_bytes[DDIR_READ])
+ show_ddir_status(rs, ts, DDIR_READ);
+ if (ts->io_bytes[DDIR_WRITE])
+ show_ddir_status(rs, ts, DDIR_WRITE);
- runtime = mtime_since(&td->epoch, &td->end_time);
+ runtime = ts->total_run_time;
if (runtime) {
double runt = (double) runtime;
- usr_cpu = (double) td->usr_time * 100 / runt;
- sys_cpu = (double) td->sys_time * 100 / runt;
+ usr_cpu = (double) ts->usr_time * 100 / runt;
+ sys_cpu = (double) ts->sys_time * 100 / runt;
} else {
usr_cpu = 0;
sys_cpu = 0;
}
- fprintf(f_out, " cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, td->ctx);
+ fprintf(f_out, " cpu : usr=%3.2f%%, sys=%3.2f%%, ctx=%lu\n", usr_cpu, sys_cpu, ts->ctx);
/*
* Do depth distribution calculations
*/
for (i = 0; i < FIO_IO_U_MAP_NR; i++) {
- io_u_dist[i] = (double) td->io_u_map[i] / (double) td->total_io_u;
+ io_u_dist[i] = (double) ts->io_u_map[i] / (double) ts->total_io_u;
io_u_dist[i] *= 100.0;
}
- fprintf(f_out, " IO depths : 1=%3.1f%%, 2=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%, 32=%3.1f%%, >32=%3.1f%%\n", io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3], io_u_dist[4], io_u_dist[5], io_u_dist[6]);
+ fprintf(f_out, " IO depths : 1=%3.1f%%, 2=%3.1f%%, 4=%3.1f%%, 8=%3.1f%%, 16=%3.1f%%, 32=%3.1f%%, >=64=%3.1f%%\n", io_u_dist[0], io_u_dist[1], io_u_dist[2], io_u_dist[3], io_u_dist[4], io_u_dist[5], io_u_dist[6]);
+
+ /*
+ * Do latency distribution calculations
+ */
+ for (i = 0; i < FIO_IO_U_LAT_NR; i++) {
+ io_u_lat[i] = (double) ts->io_u_lat[i] / (double) ts->total_io_u;
+ io_u_lat[i] *= 100.0;
+ }
+
+ fprintf(f_out, " lat (msec): 2=%3.1f%%, 4=%3.1f%%, 10=%3.1f%%, 20=%3.1f%%, 50=%3.1f%%, 100=%3.1f%%\n", io_u_lat[0], io_u_lat[1], io_u_lat[2], io_u_lat[3], io_u_lat[4], io_u_lat[5]);
+ fprintf(f_out, " lat (msec): 250=%3.1f%%, 500=%3.1f%%, 750=%3.1f%%, 1000=%3.1f%%, >=2000=%3.1f%%\n", io_u_lat[6], io_u_lat[7], io_u_lat[8], io_u_lat[9], io_u_lat[10]);
+
+ if (ts->description)
+ fprintf(f_out, "%s\n", ts->description);
}
-static void show_ddir_status_terse(struct thread_data *td,
+static void show_ddir_status_terse(struct thread_stat *ts,
struct group_run_stats *rs, int ddir)
{
unsigned long min, max;
double mean, dev;
bw = 0;
- if (td->runtime[ddir])
- bw = td->io_bytes[ddir] / td->runtime[ddir];
+ if (ts->runtime[ddir])
+ bw = ts->io_bytes[ddir] / ts->runtime[ddir];
- fprintf(f_out, ",%llu,%llu,%lu", td->io_bytes[ddir] >> 10, bw, td->runtime[ddir]);
+ fprintf(f_out, ",%llu,%llu,%lu", ts->io_bytes[ddir] >> 10, bw, ts->runtime[ddir]);
- if (calc_lat(&td->slat_stat[ddir], &min, &max, &mean, &dev))
+ if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev))
fprintf(f_out, ",%lu,%lu,%f,%f", min, max, mean, dev);
else
fprintf(f_out, ",%lu,%lu,%f,%f", 0UL, 0UL, 0.0, 0.0);
- if (calc_lat(&td->clat_stat[ddir], &min, &max, &mean, &dev))
+ if (calc_lat(&ts->clat_stat[ddir], &min, &max, &mean, &dev))
fprintf(f_out, ",%lu,%lu,%f,%f", min, max, mean, dev);
else
fprintf(f_out, ",%lu,%lu,%f,%f", 0UL, 0UL, 0.0, 0.0);
- if (calc_lat(&td->bw_stat[ddir], &min, &max, &mean, &dev)) {
+ if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
double p_of_agg;
p_of_agg = mean * 100 / (double) rs->agg[ddir];
fprintf(f_out, ",%lu,%lu,%f%%,%f,%f", min, max, p_of_agg, mean, dev);
} else
fprintf(f_out, ",%lu,%lu,%f%%,%f,%f", 0UL, 0UL, 0.0, 0.0, 0.0);
-
}
-static void show_thread_status_terse(struct thread_data *td,
+static void show_thread_status_terse(struct thread_stat *ts,
struct group_run_stats *rs)
{
double usr_cpu, sys_cpu;
- fprintf(f_out, "%s,%d,%d",td->name, td->groupid, td->error);
+ fprintf(f_out, "%s,%d,%d", ts->name, ts->groupid, ts->error);
- show_ddir_status_terse(td, rs, 0);
- show_ddir_status_terse(td, rs, 1);
+ show_ddir_status_terse(ts, rs, 0);
+ show_ddir_status_terse(ts, rs, 1);
- if (td->runtime[0] + td->runtime[1]) {
- double runt = (double) (td->runtime[0] + td->runtime[1]);
+ if (ts->total_run_time) {
+ double runt = (double) ts->total_run_time;
- usr_cpu = (double) td->usr_time * 100 / runt;
- sys_cpu = (double) td->sys_time * 100 / runt;
+ usr_cpu = (double) ts->usr_time * 100 / runt;
+ sys_cpu = (double) ts->sys_time * 100 / runt;
} else {
usr_cpu = 0;
sys_cpu = 0;
}
- fprintf(f_out, ",%f%%,%f%%,%lu\n", usr_cpu, sys_cpu, td->ctx);
+ fprintf(f_out, ",%f%%,%f%%,%lu\n", usr_cpu, sys_cpu, ts->ctx);
+}
+
+static void __sum_stat(struct io_stat *dst, struct io_stat *src, int nr)
+{
+ double mean, S;
+
+ dst->min_val = min(dst->min_val, src->min_val);
+ dst->max_val = max(dst->max_val, src->max_val);
+ dst->samples += src->samples;
+
+ /*
+ * Needs a new method for calculating stddev, we cannot just
+ * average them we do below for nr > 1
+ */
+ if (nr == 1) {
+ mean = src->mean;
+ S = src->S;
+ } else {
+ mean = ((src->mean * (double) (nr - 1)) + dst->mean) / ((double) nr);
+ S = ((src->S * (double) (nr - 1)) + dst->S) / ((double) nr);
+ }
+
+ dst->mean = mean;
+ dst->S = S;
+}
+
+static void sum_stat(struct io_stat *dst, struct io_stat *src, int nr)
+{
+ __sum_stat(&dst[DDIR_READ], &src[DDIR_READ], nr);
+ __sum_stat(&dst[DDIR_WRITE], &src[DDIR_WRITE], nr);
}
void show_run_stats(void)
{
struct group_run_stats *runstats, *rs;
struct thread_data *td;
- int i;
+ struct thread_stat *threadstats, *ts;
+ int i, j, k, nr_ts, last_ts, members;
runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1));
rs->min_bw[1] = rs->min_run[1] = ~0UL;
}
+ /*
+ * find out how many threads stats we need. if group reporting isn't
+ * enabled, it's one-per-td.
+ */
+ nr_ts = 0;
+ last_ts = -1;
for_each_td(td, i) {
- unsigned long long rbw, wbw;
+ if (!td->group_reporting) {
+ nr_ts++;
+ continue;
+ }
+ if (last_ts == td->groupid)
+ continue;
+
+ last_ts = td->groupid;
+ nr_ts++;
+ }
+
+ threadstats = malloc(nr_ts * sizeof(struct thread_stat));
+
+ for (i = 0; i < nr_ts; i++) {
+ ts = &threadstats[i];
+
+ memset(ts, 0, sizeof(*ts));
+ ts->clat_stat[0].min_val = -1UL;
+ ts->clat_stat[1].min_val = -1UL;
+ ts->slat_stat[0].min_val = -1UL;
+ ts->slat_stat[1].min_val = -1UL;
+ ts->bw_stat[0].min_val = -1UL;
+ ts->bw_stat[1].min_val = -1UL;
+ }
+
+ j = 0;
+ last_ts = -1;
+ members = 0;
+ for_each_td(td, i) {
+ ts = &threadstats[j];
+
+ members++;
+
+ if (!ts->groupid) {
+ ts->name = td->name;
+ ts->description = td->description;
+ ts->error = td->error;
+ ts->groupid = td->groupid;
+ ts->pid = td->pid;
+ ts->verror = td->verror;
+ }
+
+ sum_stat(ts->clat_stat, td->ts.clat_stat, members);
+ sum_stat(ts->slat_stat, td->ts.slat_stat, members);
+ sum_stat(ts->bw_stat, td->ts.bw_stat, members);
+
+ ts->stat_io_bytes[0] += td->ts.stat_io_bytes[0];
+ ts->stat_io_bytes[1] += td->ts.stat_io_bytes[1];
+
+ ts->usr_time += td->ts.usr_time;
+ ts->sys_time += td->ts.sys_time;
+ ts->ctx += td->ts.ctx;
+
+ for (k = 0; k < FIO_IO_U_MAP_NR; k++)
+ ts->io_u_map[k] += td->ts.io_u_map[k];
+ for (k = 0; k < FIO_IO_U_LAT_NR; k++)
+ ts->io_u_lat[k] += td->ts.io_u_lat[k];
+
+ ts->total_io_u += td->ts.total_io_u;
+ ts->io_bytes[0] += td->ts.io_bytes[0];
+ ts->io_bytes[1] += td->ts.io_bytes[1];
+
+ if (ts->runtime[0] < td->ts.runtime[0])
+ ts->runtime[0] = td->ts.runtime[0];
+ if (ts->runtime[1] < td->ts.runtime[1])
+ ts->runtime[1] = td->ts.runtime[1];
- if (td->error) {
- fprintf(f_out, "%s: %s\n", td->name, td->verror);
+ ts->total_run_time += td->ts.total_run_time;
+
+ if (!td->group_reporting) {
+ members = 0;
+ j++;
+ continue;
+ }
+ if (last_ts == td->groupid)
continue;
+
+ if (last_ts != -1) {
+ members = 0;
+ j++;
}
- rs = &runstats[td->groupid];
+ last_ts = td->groupid;
+ }
+
+ for (i = 0; i < nr_ts; i++) {
+ unsigned long long rbw, wbw;
+
+ ts = &threadstats[i];
+ rs = &runstats[ts->groupid];
- if (td->runtime[0] < rs->min_run[0] || !rs->min_run[0])
- rs->min_run[0] = td->runtime[0];
- if (td->runtime[0] > rs->max_run[0])
- rs->max_run[0] = td->runtime[0];
- if (td->runtime[1] < rs->min_run[1] || !rs->min_run[1])
- rs->min_run[1] = td->runtime[1];
- if (td->runtime[1] > rs->max_run[1])
- rs->max_run[1] = td->runtime[1];
+ if (ts->runtime[0] < rs->min_run[0] || !rs->min_run[0])
+ rs->min_run[0] = ts->runtime[0];
+ if (ts->runtime[0] > rs->max_run[0])
+ rs->max_run[0] = ts->runtime[0];
+ if (ts->runtime[1] < rs->min_run[1] || !rs->min_run[1])
+ rs->min_run[1] = ts->runtime[1];
+ if (ts->runtime[1] > rs->max_run[1])
+ rs->max_run[1] = ts->runtime[1];
rbw = wbw = 0;
- if (td->runtime[0])
- rbw = td->io_bytes[0] / (unsigned long long) td->runtime[0];
- if (td->runtime[1])
- wbw = td->io_bytes[1] / (unsigned long long) td->runtime[1];
+ if (ts->runtime[0])
+ rbw = ts->io_bytes[0] / (unsigned long long) ts->runtime[0];
+ if (ts->runtime[1])
+ wbw = ts->io_bytes[1] / (unsigned long long) ts->runtime[1];
if (rbw < rs->min_bw[0])
rs->min_bw[0] = rbw;
if (wbw > rs->max_bw[1])
rs->max_bw[1] = wbw;
- rs->io_kb[0] += td->io_bytes[0] >> 10;
- rs->io_kb[1] += td->io_bytes[1] >> 10;
+ rs->io_kb[0] += ts->io_bytes[0] >> 10;
+ rs->io_kb[1] += ts->io_bytes[1] >> 10;
}
for (i = 0; i < groupid + 1; i++) {
if (!terse_output)
printf("\n");
- for_each_td(td, i) {
- rs = &runstats[td->groupid];
+ for (i = 0; i < nr_ts; i++) {
+ ts = &threadstats[i];
+ rs = &runstats[ts->groupid];
if (terse_output)
- show_thread_status_terse(td, rs);
+ show_thread_status_terse(ts, rs);
else
- show_thread_status(td, rs);
+ show_thread_status(ts, rs);
}
if (!terse_output) {
}
free(runstats);
+ free(threadstats);
}
static inline void add_stat_sample(struct io_stat *is, unsigned long data)
is->samples++;
}
-static void add_log_sample(struct thread_data *td, struct io_log *iolog,
- unsigned long val, enum fio_ddir ddir)
+static void __add_log_sample(struct io_log *iolog, unsigned long val,
+ enum fio_ddir ddir, unsigned long time)
{
if (iolog->nr_samples == iolog->max_samples) {
int new_size = sizeof(struct io_sample) * iolog->max_samples*2;
}
iolog->log[iolog->nr_samples].val = val;
- iolog->log[iolog->nr_samples].time = mtime_since_now(&td->epoch);
+ iolog->log[iolog->nr_samples].time = time;
iolog->log[iolog->nr_samples].ddir = ddir;
iolog->nr_samples++;
}
+static void add_log_sample(struct thread_data *td, struct io_log *iolog,
+ unsigned long val, enum fio_ddir ddir)
+{
+ __add_log_sample(iolog, val, ddir, mtime_since_now(&td->epoch));
+}
+
+void add_agg_sample(unsigned long val, enum fio_ddir ddir)
+{
+ struct io_log *iolog = agg_io_log[ddir];
+
+ __add_log_sample(iolog, val, ddir, mtime_since_genesis());
+}
+
void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
unsigned long msec)
{
- add_stat_sample(&td->clat_stat[ddir], msec);
+ struct thread_stat *ts = &td->ts;
+
+ add_stat_sample(&ts->clat_stat[ddir], msec);
- if (td->clat_log)
- add_log_sample(td, td->clat_log, msec, ddir);
+ if (ts->clat_log)
+ add_log_sample(td, ts->clat_log, msec, ddir);
}
void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
unsigned long msec)
{
- add_stat_sample(&td->slat_stat[ddir], msec);
+ struct thread_stat *ts = &td->ts;
- if (td->slat_log)
- add_log_sample(td, td->slat_log, msec, ddir);
+ add_stat_sample(&ts->slat_stat[ddir], msec);
+
+ if (ts->slat_log)
+ add_log_sample(td, ts->slat_log, msec, ddir);
}
void add_bw_sample(struct thread_data *td, enum fio_ddir ddir,
struct timeval *t)
{
- unsigned long spent = mtime_since(&td->stat_sample_time[ddir], t);
+ struct thread_stat *ts = &td->ts;
+ unsigned long spent = mtime_since(&ts->stat_sample_time[ddir], t);
unsigned long rate;
if (spent < td->bw_avg_time)
return;
- rate = (td->this_io_bytes[ddir] - td->stat_io_bytes[ddir]) / spent;
- add_stat_sample(&td->bw_stat[ddir], rate);
+ rate = (td->this_io_bytes[ddir] - ts->stat_io_bytes[ddir]) / spent;
+ add_stat_sample(&ts->bw_stat[ddir], rate);
- if (td->bw_log)
- add_log_sample(td, td->bw_log, rate, ddir);
+ if (ts->bw_log)
+ add_log_sample(td, ts->bw_log, rate, ddir);
- fio_gettime(&td->stat_sample_time[ddir], NULL);
- td->stat_io_bytes[ddir] = td->this_io_bytes[ddir];
+ fio_gettime(&ts->stat_sample_time[ddir], NULL);
+ ts->stat_io_bytes[ddir] = td->this_io_bytes[ddir];
}
-
-