{
struct thread_stat *ts = &td->ts;
- getrusage(RUSAGE_SELF, &ts->ru_end);
-
- ts->usr_time += mtime_since(&ts->ru_start.ru_utime,
- &ts->ru_end.ru_utime);
- ts->sys_time += mtime_since(&ts->ru_start.ru_stime,
- &ts->ru_end.ru_stime);
- ts->ctx += ts->ru_end.ru_nvcsw + ts->ru_end.ru_nivcsw
- - (ts->ru_start.ru_nvcsw + ts->ru_start.ru_nivcsw);
- ts->minf += ts->ru_end.ru_minflt - ts->ru_start.ru_minflt;
- ts->majf += ts->ru_end.ru_majflt - ts->ru_start.ru_majflt;
-
- memcpy(&ts->ru_start, &ts->ru_end, sizeof(ts->ru_end));
+ getrusage(RUSAGE_SELF, &td->ru_end);
+
+ ts->usr_time += mtime_since(&td->ru_start.ru_utime,
+ &td->ru_end.ru_utime);
+ ts->sys_time += mtime_since(&td->ru_start.ru_stime,
+ &td->ru_end.ru_stime);
+ ts->ctx += td->ru_end.ru_nvcsw + td->ru_end.ru_nivcsw
+ - (td->ru_start.ru_nvcsw + td->ru_start.ru_nivcsw);
+ ts->minf += td->ru_end.ru_minflt - td->ru_start.ru_minflt;
+ ts->majf += td->ru_end.ru_majflt - td->ru_start.ru_majflt;
+
+ memcpy(&td->ru_start, &td->ru_end, sizeof(td->ru_end));
+}
+
+/*
+ * Given a latency, return the index of the corresponding bucket in
+ * the structure tracking percentiles.
+ *
+ * (1) find the group (and error bits) that the value (latency)
+ * belongs to by looking at its MSB. (2) find the bucket number in the
+ * group by looking at the index bits.
+ *
+ */
+static unsigned int plat_val_to_idx(unsigned int val)
+{
+ unsigned int msb, error_bits, base, offset, idx;
+
+ /* Find MSB starting from bit 0 */
+ if (val == 0)
+ msb = 0;
+ else
+ msb = (sizeof(val)*8) - __builtin_clz(val) - 1;
+
+ /*
+ * MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
+ * all bits of the sample as index
+ */
+ if (msb <= FIO_IO_U_PLAT_BITS)
+ return val;
+
+ /* Compute the number of error bits to discard*/
+ error_bits = msb - FIO_IO_U_PLAT_BITS;
+
+ /* Compute the number of buckets before the group */
+ base = (error_bits + 1) << FIO_IO_U_PLAT_BITS;
+
+ /*
+ * Discard the error bits and apply the mask to find the
+ * index for the buckets in the group
+ */
+ offset = (FIO_IO_U_PLAT_VAL - 1) & (val >> error_bits);
+
+ /* Make sure the index does not exceed (array size - 1) */
+ idx = (base + offset) < (FIO_IO_U_PLAT_NR - 1)?
+ (base + offset) : (FIO_IO_U_PLAT_NR - 1);
+
+ return idx;
+}
+
+/*
+ * Convert the given index of the bucket array to the value
+ * represented by the bucket
+ */
+static unsigned int plat_idx_to_val(unsigned int idx)
+{
+ unsigned int error_bits, k, base;
+
+ assert(idx < FIO_IO_U_PLAT_NR);
+
+ /* MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use
+ * all bits of the sample as index */
+ if (idx < (FIO_IO_U_PLAT_VAL << 1) )
+ return idx;
+
+ /* Find the group and compute the minimum value of that group */
+ error_bits = (idx >> FIO_IO_U_PLAT_BITS) -1;
+ base = 1 << (error_bits + FIO_IO_U_PLAT_BITS);
+
+ /* Find its bucket number of the group */
+ k = idx % FIO_IO_U_PLAT_VAL;
+
+ /* Return the mean of the range of the bucket */
+ return base + ((k + 0.5) * (1 << error_bits));
+}
+
+static int double_cmp(const void *a, const void *b)
+{
+ const double fa = *(const double *)a;
+ const double fb = *(const double *)b;
+ int cmp = 0;
+
+ if (fa > fb)
+ cmp = 1;
+ else if (fa < fb)
+ cmp = -1;
+
+ return cmp;
+}
+
+/*
+ * Find and display the p-th percentile of clat
+ */
+static void show_clat_percentiles(unsigned int* io_u_plat, unsigned long nr,
+ double* user_list)
+{
+ unsigned long sum = 0;
+ unsigned int len, i, j = 0;
+ const double *plist;
+ int is_last = 0;
+ static const double def_list[FIO_IO_U_LIST_MAX_LEN] = {
+ 1.0, 5.0, 10.0, 20.0, 30.0,
+ 40.0, 50.0, 60.0, 70.0, 80.0,
+ 90.0, 95.0, 99.0, 99.5, 99.9};
+
+ plist = user_list;
+ if (!plist)
+ plist = def_list;
+
+ for (len = 0; len <FIO_IO_U_LIST_MAX_LEN && plist[len] != 0; len++)
+ ;
+
+ /*
+ * Sort the user-specified list. Note that this does not work
+ * for NaN values
+ */
+ if (user_list && len > 1)
+ qsort((void*)user_list, len, sizeof(user_list[0]), double_cmp);
+
+ log_info(" clat percentiles (usec) :");
+
+ for (i = 0; i < FIO_IO_U_PLAT_NR && !is_last; i++) {
+ sum += io_u_plat[i];
+ while (sum >= (plist[j] / 100 * nr)) {
+ assert(plist[j] <= 100.0);
+
+ /* for formatting */
+ if (j != 0 && (j % 4) == 0)
+ log_info(" ");
+
+ /* end of the list */
+ is_last = (j == len - 1);
+
+ log_info(" %2.2fth=%u%c", plist[j], plat_idx_to_val(i),
+ (is_last? '\n' : ','));
+
+ if (is_last)
+ break;
+
+ if (j % 4 == 3) /* for formatting */
+ log_info("\n");
+ j++;
+ }
+ }
}
static int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max,
return 1;
}
-static void show_group_stats(struct group_run_stats *rs, int id)
+void show_group_stats(struct group_run_stats *rs)
{
char *p1, *p2, *p3, *p4;
const char *ddir_str[] = { " READ", " WRITE" };
int i;
- log_info("\nRun status group %d (all jobs):\n", id);
+ log_info("\nRun status group %d (all jobs):\n", rs->groupid);
for (i = 0; i <= DDIR_WRITE; i++) {
const int i2p = is_power_of_2(rs->kb_base);
io_p = num2str(ts->io_bytes[ddir], 6, 1, i2p);
bw_p = num2str(bw, 6, 1, i2p);
- iops = (1000 * ts->total_io_u[ddir]) / runt;
+ iops = (1000 * (uint64_t)ts->total_io_u[ddir]) / runt;
iops_p = num2str(iops, 6, 1, 0);
log_info(" %s: io=%sB, bw=%sB/s, iops=%s, runt=%6llumsec\n",
free(minp);
free(maxp);
}
+ if (ts->clat_percentiles) {
+ show_clat_percentiles(ts->io_u_plat[ddir],
+ ts->clat_stat[ddir].samples,
+ ts->percentile_list);
+ }
if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) {
double p_of_agg;
log_info("\n");
}
-static void show_thread_status(struct thread_stat *ts,
- struct group_run_stats *rs)
+void show_thread_status(struct thread_stat *ts, struct group_run_stats *rs)
{
double usr_cpu, sys_cpu;
unsigned long runtime;
{
double mean, S;
+ if (src->samples == 0)
+ return;
+
dst->min_val = min(dst->min_val, src->min_val);
dst->max_val = max(dst->max_val, src->max_val);
- dst->samples += src->samples;
/*
- * Needs a new method for calculating stddev, we cannot just
- * average them we do below for nr > 1
+ * Compute new mean and S after the merge
+ * <http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
+ * #Parallel_algorithm>
*/
if (nr == 1) {
mean = src->mean;
S = src->S;
} else {
- mean = ((src->mean * (double) (nr - 1))
- + dst->mean) / ((double) nr);
- S = ((src->S * (double) (nr - 1)) + dst->S) / ((double) nr);
+ double delta = src->mean - dst->mean;
+
+ mean = ((src->mean * src->samples) +
+ (dst->mean * dst->samples)) /
+ (dst->samples + src->samples);
+
+ S = src->S + dst->S + pow(delta, 2.0) *
+ (dst->samples * src->samples) /
+ (dst->samples + src->samples);
}
+ dst->samples += src->samples;
dst->mean = mean;
dst->S = S;
}
ts = &threadstats[j];
+ ts->clat_percentiles = td->o.clat_percentiles;
+ if (td->o.overwrite_plist)
+ ts->percentile_list = td->o.percentile_list;
+ else
+ ts->percentile_list = NULL;
+
idx++;
ts->members++;
/*
* These are per-group shared already
*/
- ts->name = td->o.name;
- ts->description = td->o.description;
+ strncpy(ts->name, td->o.name, FIO_JOBNAME_SIZE);
+ if (td->o.description)
+ strncpy(ts->description, td->o.description,
+ FIO_JOBNAME_SIZE);
+ else
+ memset(ts->description, 0, FIO_JOBNAME_SIZE);
+
ts->groupid = td->groupid;
/*
if (!td->error && td->o.continue_on_error &&
td->first_error) {
ts->error = td->first_error;
- ts->verror = td->verror;
+ strcpy(ts->verror, td->verror);
} else if (td->error) {
ts->error = td->error;
- ts->verror = td->verror;
+ strcpy(ts->verror, td->verror);
}
}
sum_stat(&ts->lat_stat[l], &td->ts.lat_stat[l], idx);
sum_stat(&ts->bw_stat[l], &td->ts.bw_stat[l], idx);
- ts->stat_io_bytes[l] += td->ts.stat_io_bytes[l];
ts->io_bytes[l] += td->ts.io_bytes[l];
if (ts->runtime[l] < td->ts.runtime[l])
ts->short_io_u[k] += td->ts.short_io_u[k];
}
+ for (k = 0; k <= DDIR_WRITE; k++) {
+ int m;
+ for (m = 0; m < FIO_IO_U_PLAT_NR; m++)
+ ts->io_u_plat[k][m] += td->ts.io_u_plat[k][m];
+ }
+
ts->total_run_time += td->ts.total_run_time;
ts->total_submit += td->ts.total_submit;
ts->total_complete += td->ts.total_complete;
ts = &threadstats[i];
rs = &runstats[ts->groupid];
- if (terse_output)
+ if (is_backend)
+ fio_server_send_ts(ts, rs);
+ else if (terse_output)
show_thread_status_terse(ts, rs);
else
show_thread_status(ts, rs);
}
if (!terse_output) {
- for (i = 0; i < groupid + 1; i++)
- show_group_stats(&runstats[i], i);
+ for (i = 0; i < groupid + 1; i++) {
+ rs = &runstats[i];
+
+ rs->groupid = i;
+ if (is_backend)
+ fio_server_send_gs(rs);
+ else
+ show_group_stats(rs);
+ }
show_disk_util();
}
static void __add_log_sample(struct io_log *iolog, unsigned long val,
enum fio_ddir ddir, unsigned int bs,
- unsigned long time)
+ unsigned long t)
{
const int nr_samples = iolog->nr_samples;
}
iolog->log[nr_samples].val = val;
- iolog->log[nr_samples].time = time;
+ iolog->log[nr_samples].time = t;
iolog->log[nr_samples].ddir = ddir;
iolog->log[nr_samples].bs = bs;
iolog->nr_samples++;
__add_log_sample(iolog, val, ddir, bs, mtime_since_genesis());
}
+static void add_clat_percentile_sample(struct thread_stat *ts,
+ unsigned long usec, enum fio_ddir ddir)
+{
+ unsigned int idx = plat_val_to_idx(usec);
+ assert(idx < FIO_IO_U_PLAT_NR);
+
+ ts->io_u_plat[ddir][idx]++;
+}
+
void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
unsigned long usec, unsigned int bs)
{
add_stat_sample(&ts->clat_stat[ddir], usec);
- if (ts->clat_log)
- add_log_sample(td, ts->clat_log, usec, ddir, bs);
+ if (td->clat_log)
+ add_log_sample(td, td->clat_log, usec, ddir, bs);
+
+ if (ts->clat_percentiles)
+ add_clat_percentile_sample(ts, usec, ddir);
}
void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
add_stat_sample(&ts->slat_stat[ddir], usec);
- if (ts->slat_log)
- add_log_sample(td, ts->slat_log, usec, ddir, bs);
+ if (td->slat_log)
+ add_log_sample(td, td->slat_log, usec, ddir, bs);
}
void add_lat_sample(struct thread_data *td, enum fio_ddir ddir,
add_stat_sample(&ts->lat_stat[ddir], usec);
- if (ts->lat_log)
- add_log_sample(td, ts->lat_log, usec, ddir, bs);
+ if (td->lat_log)
+ add_log_sample(td, td->lat_log, usec, ddir, bs);
}
void add_bw_sample(struct thread_data *td, enum fio_ddir ddir, unsigned int bs,
if (!ddir_rw(ddir))
return;
- spent = mtime_since(&ts->stat_sample_time[ddir], t);
+ spent = mtime_since(&td->stat_sample_time[ddir], t);
if (spent < td->o.bw_avg_time)
return;
- rate = (td->this_io_bytes[ddir] - ts->stat_io_bytes[ddir]) *
+ rate = (td->this_io_bytes[ddir] - td->stat_io_bytes[ddir]) *
1000 / spent / 1024;
add_stat_sample(&ts->bw_stat[ddir], rate);
- if (ts->bw_log)
- add_log_sample(td, ts->bw_log, rate, ddir, bs);
+ if (td->bw_log)
+ add_log_sample(td, td->bw_log, rate, ddir, bs);
- fio_gettime(&ts->stat_sample_time[ddir], NULL);
- ts->stat_io_bytes[ddir] = td->this_io_bytes[ddir];
+ fio_gettime(&td->stat_sample_time[ddir], NULL);
+ td->stat_io_bytes[ddir] = td->this_io_bytes[ddir];
}