#include "zbd.h"
#include "oslib/asprintf.h"
+#ifdef WIN32
+#define LOG_MSEC_SLACK 2
+#else
#define LOG_MSEC_SLACK 1
+#endif
struct fio_sem *stat_sem;
len = calc_clat_percentiles(io_u_plat, nr, plist, &ovals, &maxv, &minv);
if (!len || !ovals)
- goto out;
+ return;
/*
* We default to nsecs, but if the value range is such that we
log_buf(out, "\n");
}
-out:
free(ovals);
}
{
char *io, *agg, *min, *max;
char *ioalt, *aggalt, *minalt, *maxalt;
- uint64_t io_mix = 0, agg_mix = 0, min_mix = -1, max_mix = 0, min_run = -1, max_run = 0;
- int i;
+ uint64_t io_mix = 0, agg_mix = 0, min_mix = -1, max_mix = 0;
+ uint64_t min_run = -1, max_run = 0;
const int i2p = is_power_of_2(rs->kb_base);
+ int i;
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
if (!rs->max_run[i])
free(minalt);
free(maxalt);
}
-
+
/* Need to aggregate statisitics to show mixed values */
- if (rs->unified_rw_rep == UNIFIED_BOTH)
+ if (rs->unified_rw_rep == UNIFIED_BOTH)
show_mixed_group_stats(rs, out);
}
return p_of_agg;
}
-static void show_mixed_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
- struct buf_output *out)
+static void show_mixed_ddir_status(struct group_run_stats *rs,
+ struct thread_stat *ts,
+ struct buf_output *out)
{
unsigned long runt;
unsigned long long min, max, bw, iops;
double mean, dev;
char *io_p, *bw_p, *bw_p_alt, *iops_p, *post_st = NULL;
struct thread_stat *ts_lcl;
-
int i2p;
- int ddir = 0, i;
+ int ddir = 0;
- /* Handle aggregation of Reads (ddir = 0), Writes (ddir = 1), and Trims (ddir = 2) */
+ /*
+ * Handle aggregation of Reads (ddir = 0), Writes (ddir = 1), and
+ * Trims (ddir = 2) */
ts_lcl = malloc(sizeof(struct thread_stat));
memset((void *)ts_lcl, 0, sizeof(struct thread_stat));
- ts_lcl->unified_rw_rep = UNIFIED_MIXED; /* calculate mixed stats */
- for (i = 0; i < DDIR_RWDIR_CNT; i++) {
- ts_lcl->clat_stat[i].min_val = ULONG_MAX;
- ts_lcl->slat_stat[i].min_val = ULONG_MAX;
- ts_lcl->lat_stat[i].min_val = ULONG_MAX;
- ts_lcl->bw_stat[i].min_val = ULONG_MAX;
- ts_lcl->iops_stat[i].min_val = ULONG_MAX;
- ts_lcl->clat_high_prio_stat[i].min_val = ULONG_MAX;
- ts_lcl->clat_low_prio_stat[i].min_val = ULONG_MAX;
- }
- ts_lcl->sync_stat.min_val = ULONG_MAX;
+ /* calculate mixed stats */
+ ts_lcl->unified_rw_rep = UNIFIED_MIXED;
+ init_thread_stat_min_vals(ts_lcl);
sum_thread_stats(ts_lcl, ts, 1);
assert(ddir_rw(ddir));
- if (!ts_lcl->runtime[ddir])
+ if (!ts_lcl->runtime[ddir]) {
+ free(ts_lcl);
return;
+ }
i2p = is_power_of_2(rs->kb_base);
runt = ts_lcl->runtime[ddir];
else
samples = ts_lcl->clat_stat[ddir].samples;
- /* Only print this if some high and low priority stats were collected */
+ /* Only print if high and low priority stats were collected */
if (ts_lcl->clat_high_prio_stat[ddir].samples > 0 &&
- ts_lcl->clat_low_prio_stat[ddir].samples > 0)
- {
+ ts_lcl->clat_low_prio_stat[ddir].samples > 0) {
sprintf(prio_name, "high prio (%.2f%%) %s",
100. * (double) ts_lcl->clat_high_prio_stat[ddir].samples / (double) samples,
name);
if (!is_running_backend())
return;
- if (flist_empty(&disk_list)) {
+ if (flist_empty(&disk_list))
return;
- }
if ((output_format & FIO_OUTPUT_JSON) && parent)
do_json = true;
if (!terse && !do_json)
log_buf(out, "\nDisk stats (read/write):\n");
- if (do_json)
+ if (do_json) {
json_object_add_disk_utils(parent, &disk_list);
- else if (output_format & ~(FIO_OUTPUT_JSON | FIO_OUTPUT_JSON_PLUS)) {
+ } else if (output_format & ~(FIO_OUTPUT_JSON | FIO_OUTPUT_JSON_PLUS)) {
flist_for_each(entry, &disk_list) {
du = flist_entry(entry, struct disk_util, list);
else
log_buf(out, ";%llu;%llu;%f;%f", 0ULL, 0ULL, 0.0, 0.0);
- if (ts->lat_percentiles)
+ if (ts->lat_percentiles) {
len = calc_clat_percentiles(ts->io_u_plat[FIO_LAT][ddir],
ts->lat_stat[ddir].samples,
ts->percentile_list, &ovals, &maxv,
&minv);
- else if (ts->clat_percentiles)
+ } else if (ts->clat_percentiles) {
len = calc_clat_percentiles(ts->io_u_plat[FIO_CLAT][ddir],
ts->clat_stat[ddir].samples,
ts->percentile_list, &ovals, &maxv,
&minv);
- else
+ } else {
len = 0;
-
+ }
+
for (i = 0; i < FIO_IO_U_LIST_MAX_LEN; i++) {
if (i >= len) {
log_buf(out, ";0%%=0");
}
log_buf(out, ";%llu;%llu;%f%%;%f;%f", min, max, p_of_agg, mean, dev);
- } else
+ } else {
log_buf(out, ";%llu;%llu;%f%%;%f;%f", 0ULL, 0ULL, 0.0, 0.0, 0.0);
+ }
if (ver == 5) {
if (bw_stat)
int ver, struct buf_output *out)
{
struct thread_stat *ts_lcl;
- int i;
- /* Handle aggregation of Reads (ddir = 0), Writes (ddir = 1), and Trims (ddir = 2) */
+ /*
+ * Handle aggregation of Reads (ddir = 0), Writes (ddir = 1), and
+ * Trims (ddir = 2)
+ */
ts_lcl = malloc(sizeof(struct thread_stat));
memset((void *)ts_lcl, 0, sizeof(struct thread_stat));
- ts_lcl->unified_rw_rep = UNIFIED_MIXED; /* calculate mixed stats */
- for (i = 0; i < DDIR_RWDIR_CNT; i++) {
- ts_lcl->clat_stat[i].min_val = ULONG_MAX;
- ts_lcl->slat_stat[i].min_val = ULONG_MAX;
- ts_lcl->lat_stat[i].min_val = ULONG_MAX;
- ts_lcl->bw_stat[i].min_val = ULONG_MAX;
- ts_lcl->iops_stat[i].min_val = ULONG_MAX;
- ts_lcl->clat_high_prio_stat[i].min_val = ULONG_MAX;
- ts_lcl->clat_low_prio_stat[i].min_val = ULONG_MAX;
- }
- ts_lcl->sync_stat.min_val = ULONG_MAX;
+ /* calculate mixed stats */
+ ts_lcl->unified_rw_rep = UNIFIED_MIXED;
+ init_thread_stat_min_vals(ts_lcl);
ts_lcl->lat_percentiles = ts->lat_percentiles;
ts_lcl->clat_percentiles = ts->clat_percentiles;
ts_lcl->slat_percentiles = ts->slat_percentiles;
- ts_lcl->percentile_precision = ts->percentile_precision;
+ ts_lcl->percentile_precision = ts->percentile_precision;
memcpy(ts_lcl->percentile_list, ts->percentile_list, sizeof(ts->percentile_list));
sum_thread_stats(ts_lcl, ts, 1);
free(ts_lcl);
}
-static struct json_object *add_ddir_lat_json(struct thread_stat *ts, uint32_t percentiles,
- struct io_stat *lat_stat, uint64_t *io_u_plat)
+static struct json_object *add_ddir_lat_json(struct thread_stat *ts,
+ uint32_t percentiles,
+ struct io_stat *lat_stat,
+ uint64_t *io_u_plat)
{
char buf[120];
double mean, dev;
struct group_run_stats *rs, struct json_object *parent)
{
struct thread_stat *ts_lcl;
- int i;
- /* Handle aggregation of Reads (ddir = 0), Writes (ddir = 1), and Trims (ddir = 2) */
+ /*
+ * Handle aggregation of Reads (ddir = 0), Writes (ddir = 1), and
+ * Trims (ddir = 2)
+ */
ts_lcl = malloc(sizeof(struct thread_stat));
memset((void *)ts_lcl, 0, sizeof(struct thread_stat));
- ts_lcl->unified_rw_rep = UNIFIED_MIXED; /* calculate mixed stats */
- for (i = 0; i < DDIR_RWDIR_CNT; i++) {
- ts_lcl->clat_stat[i].min_val = ULONG_MAX;
- ts_lcl->slat_stat[i].min_val = ULONG_MAX;
- ts_lcl->lat_stat[i].min_val = ULONG_MAX;
- ts_lcl->bw_stat[i].min_val = ULONG_MAX;
- ts_lcl->iops_stat[i].min_val = ULONG_MAX;
- ts_lcl->clat_high_prio_stat[i].min_val = ULONG_MAX;
- ts_lcl->clat_low_prio_stat[i].min_val = ULONG_MAX;
- }
- ts_lcl->sync_stat.min_val = ULONG_MAX;
+ /* calculate mixed stats */
+ ts_lcl->unified_rw_rep = UNIFIED_MIXED;
+ init_thread_stat_min_vals(ts_lcl);
ts_lcl->lat_percentiles = ts->lat_percentiles;
ts_lcl->clat_percentiles = ts->clat_percentiles;
ts_lcl->slat_percentiles = ts->slat_percentiles;
- ts_lcl->percentile_precision = ts->percentile_precision;
+ ts_lcl->percentile_precision = ts->percentile_precision;
memcpy(ts_lcl->percentile_list, ts->percentile_list, sizeof(ts->percentile_list));
sum_thread_stats(ts_lcl, ts, 1);
{
int k, l, m;
+ sum_stat(&dst->sync_stat, &src->sync_stat, first, false);
+
for (l = 0; l < DDIR_RWDIR_CNT; l++) {
- if (!(dst->unified_rw_rep == UNIFIED_MIXED)) {
+ if (dst->unified_rw_rep != UNIFIED_MIXED) {
sum_stat(&dst->clat_stat[l], &src->clat_stat[l], first, false);
sum_stat(&dst->clat_high_prio_stat[l], &src->clat_high_prio_stat[l], first, false);
sum_stat(&dst->clat_low_prio_stat[l], &src->clat_low_prio_stat[l], first, false);
}
}
- sum_stat(&dst->sync_stat, &src->sync_stat, first, false);
dst->usr_time += src->usr_time;
dst->sys_time += src->sys_time;
dst->ctx += src->ctx;
dst->io_u_lat_m[k] += src->io_u_lat_m[k];
for (k = 0; k < DDIR_RWDIR_CNT; k++) {
- if (!(dst->unified_rw_rep == UNIFIED_MIXED)) {
+ if (dst->unified_rw_rep != UNIFIED_MIXED) {
dst->total_io_u[k] += src->total_io_u[k];
dst->short_io_u[k] += src->short_io_u[k];
dst->drop_io_u[k] += src->drop_io_u[k];
for (k = 0; k < FIO_LAT_CNT; k++)
for (l = 0; l < DDIR_RWDIR_CNT; l++)
for (m = 0; m < FIO_IO_U_PLAT_NR; m++)
- if (!(dst->unified_rw_rep == UNIFIED_MIXED))
+ if (dst->unified_rw_rep != UNIFIED_MIXED)
dst->io_u_plat[k][l][m] += src->io_u_plat[k][l][m];
else
dst->io_u_plat[k][0][m] += src->io_u_plat[k][l][m];
for (k = 0; k < DDIR_RWDIR_CNT; k++) {
for (m = 0; m < FIO_IO_U_PLAT_NR; m++) {
- if (!(dst->unified_rw_rep == UNIFIED_MIXED)) {
+ if (dst->unified_rw_rep != UNIFIED_MIXED) {
dst->io_u_plat_high_prio[k][m] += src->io_u_plat_high_prio[k][m];
dst->io_u_plat_low_prio[k][m] += src->io_u_plat_low_prio[k][m];
} else {
gs->min_bw[i] = gs->min_run[i] = ~0UL;
}
-void init_thread_stat(struct thread_stat *ts)
+void init_thread_stat_min_vals(struct thread_stat *ts)
{
- int j;
+ int i;
+ for (i = 0; i < DDIR_RWDIR_CNT; i++) {
+ ts->clat_stat[i].min_val = ULONG_MAX;
+ ts->slat_stat[i].min_val = ULONG_MAX;
+ ts->lat_stat[i].min_val = ULONG_MAX;
+ ts->bw_stat[i].min_val = ULONG_MAX;
+ ts->iops_stat[i].min_val = ULONG_MAX;
+ ts->clat_high_prio_stat[i].min_val = ULONG_MAX;
+ ts->clat_low_prio_stat[i].min_val = ULONG_MAX;
+ }
+ ts->sync_stat.min_val = ULONG_MAX;
+}
+
+void init_thread_stat(struct thread_stat *ts)
+{
memset(ts, 0, sizeof(*ts));
- for (j = 0; j < DDIR_RWDIR_CNT; j++) {
- ts->lat_stat[j].min_val = -1UL;
- ts->clat_stat[j].min_val = -1UL;
- ts->slat_stat[j].min_val = -1UL;
- ts->bw_stat[j].min_val = -1UL;
- ts->iops_stat[j].min_val = -1UL;
- ts->clat_high_prio_stat[j].min_val = -1UL;
- ts->clat_low_prio_stat[j].min_val = -1UL;
- }
- ts->sync_stat.min_val = -1UL;
+ init_thread_stat_min_vals(ts);
ts->groupid = -1;
}
*/
static struct io_logs *get_new_log(struct io_log *iolog)
{
- size_t new_size, new_samples;
+ size_t new_samples;
struct io_logs *cur_log;
/*
* Cap the size at MAX_LOG_ENTRIES, so we don't keep doubling
* forever
*/
- if (!iolog->cur_log_max)
- new_samples = DEF_LOG_ENTRIES;
- else {
+ if (!iolog->cur_log_max) {
+ new_samples = iolog->td->o.log_entries;
+ } else {
new_samples = iolog->cur_log_max * 2;
if (new_samples > MAX_LOG_ENTRIES)
new_samples = MAX_LOG_ENTRIES;
}
- new_size = new_samples * log_entry_sz(iolog);
-
cur_log = smalloc(sizeof(*cur_log));
if (cur_log) {
INIT_FLIST_HEAD(&cur_log->list);
- cur_log->log = malloc(new_size);
+ cur_log->log = calloc(new_samples, log_entry_sz(iolog));
if (cur_log->log) {
cur_log->nr_samples = 0;
cur_log->max_samples = new_samples;
static void __add_log_sample(struct io_log *iolog, union io_sample_data data,
enum fio_ddir ddir, unsigned long long bs,
- unsigned long t, uint64_t offset, uint8_t priority_bit)
+ unsigned long t, uint64_t offset,
+ unsigned int priority)
{
struct io_logs *cur_log;
s->time = t + (iolog->td ? iolog->td->unix_epoch : 0);
io_sample_set_ddir(iolog, s, ddir);
s->bs = bs;
- s->priority_bit = priority_bit;
+ s->priority = priority;
if (iolog->log_offset) {
struct io_sample_offset *so = (void *) s;
}
static void __add_stat_to_log(struct io_log *iolog, enum fio_ddir ddir,
- unsigned long elapsed, bool log_max, uint8_t priority_bit)
+ unsigned long elapsed, bool log_max)
{
/*
* Note an entry in the log. Use the mean from the logged samples,
else
data.val = iolog->avg_window[ddir].mean.u.f + 0.50;
- __add_log_sample(iolog, data, ddir, 0, elapsed, 0, priority_bit);
+ __add_log_sample(iolog, data, ddir, 0, elapsed, 0, 0);
}
reset_io_stat(&iolog->avg_window[ddir]);
}
static void _add_stat_to_log(struct io_log *iolog, unsigned long elapsed,
- bool log_max, uint8_t priority_bit)
+ bool log_max)
{
int ddir;
for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
- __add_stat_to_log(iolog, ddir, elapsed, log_max, priority_bit);
+ __add_stat_to_log(iolog, ddir, elapsed, log_max);
}
static unsigned long add_log_sample(struct thread_data *td,
struct io_log *iolog,
union io_sample_data data,
enum fio_ddir ddir, unsigned long long bs,
- uint64_t offset, uint8_t priority_bit)
+ uint64_t offset, unsigned int ioprio)
{
unsigned long elapsed, this_window;
* If no time averaging, just add the log sample.
*/
if (!iolog->avg_msec) {
- __add_log_sample(iolog, data, ddir, bs, elapsed, offset, priority_bit);
+ __add_log_sample(iolog, data, ddir, bs, elapsed, offset,
+ ioprio);
return 0;
}
return diff;
}
- __add_stat_to_log(iolog, ddir, elapsed, td->o.log_max != 0, priority_bit);
+ __add_stat_to_log(iolog, ddir, elapsed, td->o.log_max != 0);
iolog->avg_last[ddir] = elapsed - (elapsed % iolog->avg_msec);
elapsed = mtime_since_now(&td->epoch);
if (td->clat_log && unit_logs)
- _add_stat_to_log(td->clat_log, elapsed, td->o.log_max != 0, 0);
+ _add_stat_to_log(td->clat_log, elapsed, td->o.log_max != 0);
if (td->slat_log && unit_logs)
- _add_stat_to_log(td->slat_log, elapsed, td->o.log_max != 0, 0);
+ _add_stat_to_log(td->slat_log, elapsed, td->o.log_max != 0);
if (td->lat_log && unit_logs)
- _add_stat_to_log(td->lat_log, elapsed, td->o.log_max != 0, 0);
+ _add_stat_to_log(td->lat_log, elapsed, td->o.log_max != 0);
if (td->bw_log && (unit_logs == per_unit_log(td->bw_log)))
- _add_stat_to_log(td->bw_log, elapsed, td->o.log_max != 0, 0);
+ _add_stat_to_log(td->bw_log, elapsed, td->o.log_max != 0);
if (td->iops_log && (unit_logs == per_unit_log(td->iops_log)))
- _add_stat_to_log(td->iops_log, elapsed, td->o.log_max != 0, 0);
+ _add_stat_to_log(td->iops_log, elapsed, td->o.log_max != 0);
}
-void add_agg_sample(union io_sample_data data, enum fio_ddir ddir, unsigned long long bs,
- uint8_t priority_bit)
+void add_agg_sample(union io_sample_data data, enum fio_ddir ddir,
+ unsigned long long bs)
{
struct io_log *iolog;
return;
iolog = agg_io_log[ddir];
- __add_log_sample(iolog, data, ddir, bs, mtime_since_genesis(), 0, priority_bit);
+ __add_log_sample(iolog, data, ddir, bs, mtime_since_genesis(), 0, 0);
}
void add_sync_clat_sample(struct thread_stat *ts, unsigned long long nsec)
add_stat_sample(&ts->sync_stat, nsec);
}
-static void add_lat_percentile_sample_noprio(struct thread_stat *ts,
- unsigned long long nsec, enum fio_ddir ddir, enum fio_lat lat)
+static inline void add_lat_percentile_sample(struct thread_stat *ts,
+ unsigned long long nsec,
+ enum fio_ddir ddir,
+ enum fio_lat lat)
{
unsigned int idx = plat_val_to_idx(nsec);
assert(idx < FIO_IO_U_PLAT_NR);
ts->io_u_plat[lat][ddir][idx]++;
}
-static void add_lat_percentile_sample(struct thread_stat *ts,
- unsigned long long nsec, enum fio_ddir ddir, uint8_t priority_bit,
- enum fio_lat lat)
+static inline void add_lat_percentile_prio_sample(struct thread_stat *ts,
+ unsigned long long nsec,
+ enum fio_ddir ddir,
+ bool high_prio)
{
unsigned int idx = plat_val_to_idx(nsec);
- add_lat_percentile_sample_noprio(ts, nsec, ddir, lat);
-
- if (!priority_bit)
+ if (!high_prio)
ts->io_u_plat_low_prio[ddir][idx]++;
else
ts->io_u_plat_high_prio[ddir][idx]++;
void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
unsigned long long nsec, unsigned long long bs,
- uint64_t offset, uint8_t priority_bit)
+ uint64_t offset, unsigned int ioprio, bool high_prio)
{
const bool needs_lock = td_async_processing(td);
unsigned long elapsed, this_window;
add_stat_sample(&ts->clat_stat[ddir], nsec);
+ /*
+ * When lat_percentiles=1 (default 0), the reported high/low priority
+ * percentiles and stats are used for describing total latency values,
+ * even though the variable names themselves start with clat_.
+ *
+ * Because of the above definition, add a prio stat sample only when
+ * lat_percentiles=0. add_lat_sample() will add the prio stat sample
+ * when lat_percentiles=1.
+ */
if (!ts->lat_percentiles) {
- if (priority_bit)
+ if (high_prio)
add_stat_sample(&ts->clat_high_prio_stat[ddir], nsec);
else
add_stat_sample(&ts->clat_low_prio_stat[ddir], nsec);
if (td->clat_log)
add_log_sample(td, td->clat_log, sample_val(nsec), ddir, bs,
- offset, priority_bit);
+ offset, ioprio);
if (ts->clat_percentiles) {
- if (ts->lat_percentiles)
- add_lat_percentile_sample_noprio(ts, nsec, ddir, FIO_CLAT);
- else
- add_lat_percentile_sample(ts, nsec, ddir, priority_bit, FIO_CLAT);
+ /*
+ * Because of the above definition, add a prio lat percentile
+ * sample only when lat_percentiles=0. add_lat_sample() will add
+ * the prio lat percentile sample when lat_percentiles=1.
+ */
+ add_lat_percentile_sample(ts, nsec, ddir, FIO_CLAT);
+ if (!ts->lat_percentiles)
+ add_lat_percentile_prio_sample(ts, nsec, ddir,
+ high_prio);
}
if (iolog && iolog->hist_msec) {
FIO_IO_U_PLAT_NR * sizeof(uint64_t));
flist_add(&dst->list, &hw->list);
__add_log_sample(iolog, sample_plat(dst), ddir, bs,
- elapsed, offset, priority_bit);
+ elapsed, offset, ioprio);
/*
* Update the last time we recorded as being now, minus
}
void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
- unsigned long long nsec, unsigned long long bs, uint64_t offset,
- uint8_t priority_bit)
+ unsigned long long nsec, unsigned long long bs,
+ uint64_t offset, unsigned int ioprio)
{
const bool needs_lock = td_async_processing(td);
struct thread_stat *ts = &td->ts;
add_stat_sample(&ts->slat_stat[ddir], nsec);
if (td->slat_log)
- add_log_sample(td, td->slat_log, sample_val(nsec), ddir, bs, offset,
- priority_bit);
+ add_log_sample(td, td->slat_log, sample_val(nsec), ddir, bs,
+ offset, ioprio);
if (ts->slat_percentiles)
- add_lat_percentile_sample_noprio(ts, nsec, ddir, FIO_SLAT);
+ add_lat_percentile_sample(ts, nsec, ddir, FIO_SLAT);
if (needs_lock)
__td_io_u_unlock(td);
void add_lat_sample(struct thread_data *td, enum fio_ddir ddir,
unsigned long long nsec, unsigned long long bs,
- uint64_t offset, uint8_t priority_bit)
+ uint64_t offset, unsigned int ioprio, bool high_prio)
{
const bool needs_lock = td_async_processing(td);
struct thread_stat *ts = &td->ts;
if (td->lat_log)
add_log_sample(td, td->lat_log, sample_val(nsec), ddir, bs,
- offset, priority_bit);
+ offset, ioprio);
+ /*
+ * When lat_percentiles=1 (default 0), the reported high/low priority
+ * percentiles and stats are used for describing total latency values,
+ * even though the variable names themselves start with clat_.
+ *
+ * Because of the above definition, add a prio stat and prio lat
+ * percentile sample only when lat_percentiles=1. add_clat_sample() will
+ * add the prio stat and prio lat percentile sample when
+ * lat_percentiles=0.
+ */
if (ts->lat_percentiles) {
- add_lat_percentile_sample(ts, nsec, ddir, priority_bit, FIO_LAT);
- if (priority_bit)
+ add_lat_percentile_sample(ts, nsec, ddir, FIO_LAT);
+ add_lat_percentile_prio_sample(ts, nsec, ddir, high_prio);
+ if (high_prio)
add_stat_sample(&ts->clat_high_prio_stat[ddir], nsec);
else
add_stat_sample(&ts->clat_low_prio_stat[ddir], nsec);
if (td->bw_log)
add_log_sample(td, td->bw_log, sample_val(rate), io_u->ddir,
- bytes, io_u->offset, io_u_is_prio(io_u));
+ bytes, io_u->offset, io_u->ioprio);
td->stat_io_bytes[io_u->ddir] = td->this_io_bytes[io_u->ddir];
if (td->o.min_bs[ddir] == td->o.max_bs[ddir])
bs = td->o.min_bs[ddir];
- next = add_log_sample(td, log, sample_val(rate), ddir, bs, 0, 0);
+ next = add_log_sample(td, log, sample_val(rate), ddir,
+ bs, 0, 0);
next_log = min(next_log, next);
}
if (td->iops_log)
add_log_sample(td, td->iops_log, sample_val(1), io_u->ddir,
- bytes, io_u->offset, io_u_is_prio(io_u));
+ bytes, io_u->offset, io_u->ioprio);
td->stat_io_blocks[io_u->ddir] = td->this_io_blocks[io_u->ddir];