o->log_hist_coarseness = le32_to_cpu(top->log_hist_coarseness);
o->log_max = le32_to_cpu(top->log_max);
o->log_offset = le32_to_cpu(top->log_offset);
+ o->log_prio = le32_to_cpu(top->log_prio);
o->log_gz = le32_to_cpu(top->log_gz);
o->log_gz_store = le32_to_cpu(top->log_gz_store);
o->log_unix_epoch = le32_to_cpu(top->log_unix_epoch);
top->log_avg_msec = cpu_to_le32(o->log_avg_msec);
top->log_max = cpu_to_le32(o->log_max);
top->log_offset = cpu_to_le32(o->log_offset);
+ top->log_prio = cpu_to_le32(o->log_prio);
top->log_gz = cpu_to_le32(o->log_gz);
top->log_gz_store = cpu_to_le32(o->log_gz_store);
top->log_unix_epoch = cpu_to_le32(o->log_unix_epoch);
ret->log_type = le32_to_cpu(ret->log_type);
ret->compressed = le32_to_cpu(ret->compressed);
ret->log_offset = le32_to_cpu(ret->log_offset);
+ ret->log_prio = le32_to_cpu(ret->log_prio);
ret->log_hist_coarseness = le32_to_cpu(ret->log_hist_coarseness);
if (*store_direct)
s->data.val = le64_to_cpu(s->data.val);
s->__ddir = __le32_to_cpu(s->__ddir);
s->bs = le64_to_cpu(s->bs);
+ s->priority = le16_to_cpu(s->priority);
if (ret->log_offset) {
struct io_sample_offset *so = (void *) s;
uint64_t nsec;
nsec = ntime_since_now(&start);
- add_clat_sample(td, data->stat_ddir, nsec, 0, 0, 0);
+ add_clat_sample(td, data->stat_ddir, nsec, 0, 0, 0, false);
}
return 0;
uint64_t nsec;
nsec = ntime_since_now(&start);
- add_clat_sample(td, data->stat_ddir, nsec, 0, 0, 0);
+ add_clat_sample(td, data->stat_ddir, nsec, 0, 0, 0, false);
}
return 0;
uint64_t nsec;
nsec = ntime_since_now(&start);
- add_clat_sample(td, data->stat_ddir, nsec, 0, 0, 0);
+ add_clat_sample(td, data->stat_ddir, nsec, 0, 0, 0, false);
}
return 0;
* than the priority set by "prio" and "prioclass"
* options.
*/
- io_u->flags |= IO_U_F_PRIORITY;
+ io_u->flags |= IO_U_F_HIGH_PRIO;
}
} else {
sqe->ioprio = td->ioprio;
* is higher (has a lower value) than the async IO
* priority.
*/
- io_u->flags |= IO_U_F_PRIORITY;
+ io_u->flags |= IO_U_F_HIGH_PRIO;
}
}
+
+ io_u->ioprio = sqe->ioprio;
}
static enum fio_q_status fio_ioring_queue(struct thread_data *td,
ioprio_value(cmdprio->class[ddir], cmdprio->level[ddir]);
if (p && rand_between(&td->prio_state, 0, 99) < p) {
+ io_u->ioprio = cmdprio_value;
io_u->iocb.aio_reqprio = cmdprio_value;
io_u->iocb.u.c.flags |= IOCB_FLAG_IOPRIO;
if (!td->ioprio || cmdprio_value < td->ioprio) {
* The async IO priority is higher (has a lower value)
* than the default context priority.
*/
- io_u->flags |= IO_U_F_PRIORITY;
+ io_u->flags |= IO_U_F_HIGH_PRIO;
}
} else if (td->ioprio && td->ioprio < cmdprio_value) {
/*
* and this priority is higher (has a lower value) than the
* async IO priority.
*/
- io_u->flags |= IO_U_F_PRIORITY;
+ io_u->flags |= IO_U_F_HIGH_PRIO;
}
}
memcpy(&rate_prev_time, &now, sizeof(now));
regrow_agg_logs();
for_each_rw_ddir(ddir) {
- add_agg_sample(sample_val(je->rate[ddir]), ddir, 0, 0);
+ add_agg_sample(sample_val(je->rate[ddir]), ddir, 0);
}
}
entry as well as the other data values. Defaults to 0 meaning that
offsets are not present in logs. Also see \fBLOG FILE FORMATS\fR section.
.TP
+.BI log_prio \fR=\fPbool
+If this is set, the iolog options will include the I/O priority for the I/O
+entry as well as the other data values. Defaults to 0 meaning that
+I/O priorities are not present in logs. Also see \fBLOG FILE FORMATS\fR section.
+.TP
.BI log_compression \fR=\fPint
If this is set, fio will compress the I/O logs as it goes, to keep the
memory footprint lower. When a log reaches the specified size, that chunk is
from the start of the file for that particular I/O. The logging of the offset can be
toggled with \fBlog_offset\fR.
.P
-`Command priority` is 0 for normal priority and 1 for high priority. This is controlled
-by the ioengine specific \fBcmdprio_percentage\fR.
+If \fBlog_prio\fR is not set, the entry's `Command priority` is 1 for an IO executed
+with the highest RT priority class (\fBprioclass\fR=1 or \fBcmdprio_class\fR=1) and 0
+otherwise. This is controlled by the \fBprioclass\fR option and the ioengine specific
+\fBcmdprio_percentage\fR \fBcmdprio_class\fR options. If \fBlog_prio\fR is set, the
+entry's `Command priority` is the priority set for the IO, as a 16-bits hexadecimal
+number with the lowest 13 bits indicating the priority value (\fBprio\fR and
+\fBcmdprio\fR options) and the highest 3 bits indicating the IO priority class
+(\fBprioclass\fR and \fBcmdprio_class\fR options).
.P
Fio defaults to logging every individual I/O but when windowed logging is set
through \fBlog_avg_msec\fR, either the average (by default) or the maximum
.hist_coarseness = o->log_hist_coarseness,
.log_type = IO_LOG_TYPE_LAT,
.log_offset = o->log_offset,
+ .log_prio = o->log_prio,
.log_gz = o->log_gz,
.log_gz_store = o->log_gz_store,
};
.hist_coarseness = o->log_hist_coarseness,
.log_type = IO_LOG_TYPE_HIST,
.log_offset = o->log_offset,
+ .log_prio = o->log_prio,
.log_gz = o->log_gz,
.log_gz_store = o->log_gz_store,
};
.hist_coarseness = o->log_hist_coarseness,
.log_type = IO_LOG_TYPE_BW,
.log_offset = o->log_offset,
+ .log_prio = o->log_prio,
.log_gz = o->log_gz,
.log_gz_store = o->log_gz_store,
};
.hist_coarseness = o->log_hist_coarseness,
.log_type = IO_LOG_TYPE_IOPS,
.log_offset = o->log_offset,
+ .log_prio = o->log_prio,
.log_gz = o->log_gz,
.log_gz_store = o->log_gz_store,
};
assert(io_u->flags & IO_U_F_FREE);
io_u_clear(td, io_u, IO_U_F_FREE | IO_U_F_NO_FILE_PUT |
IO_U_F_TRIMMED | IO_U_F_BARRIER |
- IO_U_F_VER_LIST | IO_U_F_PRIORITY);
+ IO_U_F_VER_LIST | IO_U_F_HIGH_PRIO);
io_u->error = 0;
io_u->acct_ddir = -1;
io_u->xfer_buf = io_u->buf;
io_u->xfer_buflen = io_u->buflen;
+ /*
+ * Remember the issuing context priority. The IO engine may change this.
+ */
+ io_u->ioprio = td->ioprio;
out:
assert(io_u->file);
if (!td_io_prep(td, io_u)) {
unsigned long long tnsec;
tnsec = ntime_since(&io_u->start_time, &icd->time);
- add_lat_sample(td, idx, tnsec, bytes, io_u->offset, io_u_is_prio(io_u));
+ add_lat_sample(td, idx, tnsec, bytes, io_u->offset,
+ io_u->ioprio, io_u_is_high_prio(io_u));
if (td->flags & TD_F_PROFILE_OPS) {
struct prof_io_ops *ops = &td->prof_io_ops;
if (ddir_rw(idx)) {
if (!td->o.disable_clat) {
- add_clat_sample(td, idx, llnsec, bytes, io_u->offset, io_u_is_prio(io_u));
+ add_clat_sample(td, idx, llnsec, bytes, io_u->offset,
+ io_u->ioprio, io_u_is_high_prio(io_u));
io_u_mark_latency(td, llnsec);
}
td = td->parent;
add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen,
- io_u->offset, io_u_is_prio(io_u));
+ io_u->offset, io_u->ioprio);
}
}
IO_U_F_TRIMMED = 1 << 5,
IO_U_F_BARRIER = 1 << 6,
IO_U_F_VER_LIST = 1 << 7,
- IO_U_F_PRIORITY = 1 << 8,
+ IO_U_F_HIGH_PRIO = 1 << 8,
};
/*
*/
unsigned short numberio;
+ /*
+ * IO priority.
+ */
+ unsigned short ioprio;
+
/*
* Allocated/set buffer and length
*/
td_flags_clear((td), &(io_u->flags), (val))
#define io_u_set(td, io_u, val) \
td_flags_set((td), &(io_u)->flags, (val))
-#define io_u_is_prio(io_u) \
- (io_u->flags & (unsigned int) IO_U_F_PRIORITY) != 0
+#define io_u_is_high_prio(io_u) (io_u->flags & IO_U_F_HIGH_PRIO)
#endif
INIT_FLIST_HEAD(&l->io_logs);
l->log_type = p->log_type;
l->log_offset = p->log_offset;
+ l->log_prio = p->log_prio;
l->log_gz = p->log_gz;
l->log_gz_store = p->log_gz_store;
l->avg_msec = p->avg_msec;
if (l->log_offset)
l->log_ddir_mask = LOG_OFFSET_SAMPLE_BIT;
+ if (l->log_prio)
+ l->log_ddir_mask |= LOG_PRIO_SAMPLE_BIT;
INIT_FLIST_HEAD(&l->chunk_list);
void flush_samples(FILE *f, void *samples, uint64_t sample_size)
{
struct io_sample *s;
- int log_offset;
+ int log_offset, log_prio;
uint64_t i, nr_samples;
+ unsigned int prio_val;
+ const char *fmt;
if (!sample_size)
return;
s = __get_sample(samples, 0, 0);
log_offset = (s->__ddir & LOG_OFFSET_SAMPLE_BIT) != 0;
+ log_prio = (s->__ddir & LOG_PRIO_SAMPLE_BIT) != 0;
+
+ if (log_offset) {
+ if (log_prio)
+ fmt = "%lu, %" PRId64 ", %u, %llu, %llu, 0x%04x\n";
+ else
+ fmt = "%lu, %" PRId64 ", %u, %llu, %llu, %u\n";
+ } else {
+ if (log_prio)
+ fmt = "%lu, %" PRId64 ", %u, %llu, 0x%04x\n";
+ else
+ fmt = "%lu, %" PRId64 ", %u, %llu, %u\n";
+ }
nr_samples = sample_size / __log_entry_sz(log_offset);
for (i = 0; i < nr_samples; i++) {
s = __get_sample(samples, log_offset, i);
+ if (log_prio)
+ prio_val = s->priority;
+ else
+ prio_val = ioprio_value_is_class_rt(s->priority);
+
if (!log_offset) {
- fprintf(f, "%lu, %" PRId64 ", %u, %llu, %u\n",
- (unsigned long) s->time,
- s->data.val,
- io_sample_ddir(s), (unsigned long long) s->bs, s->priority_bit);
+ fprintf(f, fmt,
+ (unsigned long) s->time,
+ s->data.val,
+ io_sample_ddir(s), (unsigned long long) s->bs,
+ prio_val);
} else {
struct io_sample_offset *so = (void *) s;
- fprintf(f, "%lu, %" PRId64 ", %u, %llu, %llu, %u\n",
- (unsigned long) s->time,
- s->data.val,
- io_sample_ddir(s), (unsigned long long) s->bs,
- (unsigned long long) so->offset, s->priority_bit);
+ fprintf(f, fmt,
+ (unsigned long) s->time,
+ s->data.val,
+ io_sample_ddir(s), (unsigned long long) s->bs,
+ (unsigned long long) so->offset,
+ prio_val);
}
}
}
uint64_t time;
union io_sample_data data;
uint32_t __ddir;
- uint8_t priority_bit;
+ uint16_t priority;
uint64_t bs;
};
*/
unsigned int log_offset;
+ /*
+ * Log I/O priorities
+ */
+ unsigned int log_prio;
+
/*
* Max size of log entries before a chunk is compressed
*/
* If the upper bit is set, then we have the offset as well
*/
#define LOG_OFFSET_SAMPLE_BIT 0x80000000U
-#define io_sample_ddir(io) ((io)->__ddir & ~LOG_OFFSET_SAMPLE_BIT)
+/*
+ * If the bit following the upper bit is set, then we have the priority
+ */
+#define LOG_PRIO_SAMPLE_BIT 0x40000000U
+
+#define LOG_SAMPLE_BITS (LOG_OFFSET_SAMPLE_BIT | LOG_PRIO_SAMPLE_BIT)
+#define io_sample_ddir(io) ((io)->__ddir & ~LOG_SAMPLE_BITS)
static inline void io_sample_set_ddir(struct io_log *log,
struct io_sample *io,
int hist_coarseness;
int log_type;
int log_offset;
+ int log_prio;
int log_gz;
int log_gz_store;
int log_compress;
.category = FIO_OPT_C_LOG,
.group = FIO_OPT_G_INVALID,
},
+ {
+ .name = "log_prio",
+ .lname = "Log priority of IO",
+ .type = FIO_OPT_BOOL,
+ .off1 = offsetof(struct thread_options, log_prio),
+ .help = "Include priority value of IO for each log entry",
+ .def = "0",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
#ifdef CONFIG_ZLIB
{
.name = "log_compression",
return (ioprio_class << IOPRIO_CLASS_SHIFT) | ioprio;
}
+static inline bool ioprio_value_is_class_rt(unsigned int priority)
+{
+ return (priority >> IOPRIO_CLASS_SHIFT) == IOPRIO_CLASS_RT;
+}
+
static inline int ioprio_set(int which, int who, int ioprio_class, int ioprio)
{
return syscall(__NR_ioprio_set, which, who,
return (ioprio_class << IOPRIO_CLASS_SHIFT) | ioprio;
}
+static inline bool ioprio_value_is_class_rt(unsigned int priority)
+{
+ return (priority >> IOPRIO_CLASS_SHIFT) == IOPRIO_CLASS_RT;
+}
+
static inline int ioprio_set(int which, int who, int ioprio_class, int ioprio)
{
return syscall(__NR_ioprio_set, which, who,
extern int fio_cpus_split(os_cpu_mask_t *mask, unsigned int cpu);
#endif
+#ifndef FIO_HAVE_IOPRIO_CLASS
+#define ioprio_value_is_class_rt(prio) (false)
+#endif
#ifndef FIO_HAVE_IOPRIO
#define ioprio_value(prioclass, prio) (0)
#define ioprio_set(which, who, prioclass, prio) (0)
};
enum {
- FIO_SERVER_VER = 92,
+ FIO_SERVER_VER = 93,
FIO_SERVER_MAX_FRAGMENT_PDU = 1024,
FIO_SERVER_MAX_CMD_MB = 2048,
uint32_t log_type;
uint32_t compressed;
uint32_t log_offset;
+ uint32_t log_prio;
uint32_t log_hist_coarseness;
uint8_t name[FIO_NET_NAME_MAX];
struct io_sample samples[0];
static void __add_log_sample(struct io_log *iolog, union io_sample_data data,
enum fio_ddir ddir, unsigned long long bs,
- unsigned long t, uint64_t offset, uint8_t priority_bit)
+ unsigned long t, uint64_t offset,
+ unsigned int priority)
{
struct io_logs *cur_log;
s->time = t + (iolog->td ? iolog->td->unix_epoch : 0);
io_sample_set_ddir(iolog, s, ddir);
s->bs = bs;
- s->priority_bit = priority_bit;
+ s->priority = priority;
if (iolog->log_offset) {
struct io_sample_offset *so = (void *) s;
}
static void __add_stat_to_log(struct io_log *iolog, enum fio_ddir ddir,
- unsigned long elapsed, bool log_max, uint8_t priority_bit)
+ unsigned long elapsed, bool log_max)
{
/*
* Note an entry in the log. Use the mean from the logged samples,
else
data.val = iolog->avg_window[ddir].mean.u.f + 0.50;
- __add_log_sample(iolog, data, ddir, 0, elapsed, 0, priority_bit);
+ __add_log_sample(iolog, data, ddir, 0, elapsed, 0, 0);
}
reset_io_stat(&iolog->avg_window[ddir]);
}
static void _add_stat_to_log(struct io_log *iolog, unsigned long elapsed,
- bool log_max, uint8_t priority_bit)
+ bool log_max)
{
int ddir;
for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
- __add_stat_to_log(iolog, ddir, elapsed, log_max, priority_bit);
+ __add_stat_to_log(iolog, ddir, elapsed, log_max);
}
static unsigned long add_log_sample(struct thread_data *td,
struct io_log *iolog,
union io_sample_data data,
enum fio_ddir ddir, unsigned long long bs,
- uint64_t offset, uint8_t priority_bit)
+ uint64_t offset, unsigned int ioprio)
{
unsigned long elapsed, this_window;
* If no time averaging, just add the log sample.
*/
if (!iolog->avg_msec) {
- __add_log_sample(iolog, data, ddir, bs, elapsed, offset, priority_bit);
+ __add_log_sample(iolog, data, ddir, bs, elapsed, offset,
+ ioprio);
return 0;
}
return diff;
}
- __add_stat_to_log(iolog, ddir, elapsed, td->o.log_max != 0, priority_bit);
+ __add_stat_to_log(iolog, ddir, elapsed, td->o.log_max != 0);
iolog->avg_last[ddir] = elapsed - (elapsed % iolog->avg_msec);
elapsed = mtime_since_now(&td->epoch);
if (td->clat_log && unit_logs)
- _add_stat_to_log(td->clat_log, elapsed, td->o.log_max != 0, 0);
+ _add_stat_to_log(td->clat_log, elapsed, td->o.log_max != 0);
if (td->slat_log && unit_logs)
- _add_stat_to_log(td->slat_log, elapsed, td->o.log_max != 0, 0);
+ _add_stat_to_log(td->slat_log, elapsed, td->o.log_max != 0);
if (td->lat_log && unit_logs)
- _add_stat_to_log(td->lat_log, elapsed, td->o.log_max != 0, 0);
+ _add_stat_to_log(td->lat_log, elapsed, td->o.log_max != 0);
if (td->bw_log && (unit_logs == per_unit_log(td->bw_log)))
- _add_stat_to_log(td->bw_log, elapsed, td->o.log_max != 0, 0);
+ _add_stat_to_log(td->bw_log, elapsed, td->o.log_max != 0);
if (td->iops_log && (unit_logs == per_unit_log(td->iops_log)))
- _add_stat_to_log(td->iops_log, elapsed, td->o.log_max != 0, 0);
+ _add_stat_to_log(td->iops_log, elapsed, td->o.log_max != 0);
}
-void add_agg_sample(union io_sample_data data, enum fio_ddir ddir, unsigned long long bs,
- uint8_t priority_bit)
+void add_agg_sample(union io_sample_data data, enum fio_ddir ddir,
+ unsigned long long bs)
{
struct io_log *iolog;
return;
iolog = agg_io_log[ddir];
- __add_log_sample(iolog, data, ddir, bs, mtime_since_genesis(), 0, priority_bit);
+ __add_log_sample(iolog, data, ddir, bs, mtime_since_genesis(), 0, 0);
}
void add_sync_clat_sample(struct thread_stat *ts, unsigned long long nsec)
}
static void add_lat_percentile_sample(struct thread_stat *ts,
- unsigned long long nsec, enum fio_ddir ddir, uint8_t priority_bit,
- enum fio_lat lat)
+ unsigned long long nsec, enum fio_ddir ddir,
+ bool high_prio, enum fio_lat lat)
{
unsigned int idx = plat_val_to_idx(nsec);
add_lat_percentile_sample_noprio(ts, nsec, ddir, lat);
- if (!priority_bit)
+ if (!high_prio)
ts->io_u_plat_low_prio[ddir][idx]++;
else
ts->io_u_plat_high_prio[ddir][idx]++;
void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
unsigned long long nsec, unsigned long long bs,
- uint64_t offset, uint8_t priority_bit)
+ uint64_t offset, unsigned int ioprio, bool high_prio)
{
const bool needs_lock = td_async_processing(td);
unsigned long elapsed, this_window;
add_stat_sample(&ts->clat_stat[ddir], nsec);
if (!ts->lat_percentiles) {
- if (priority_bit)
+ if (high_prio)
add_stat_sample(&ts->clat_high_prio_stat[ddir], nsec);
else
add_stat_sample(&ts->clat_low_prio_stat[ddir], nsec);
if (td->clat_log)
add_log_sample(td, td->clat_log, sample_val(nsec), ddir, bs,
- offset, priority_bit);
+ offset, ioprio);
if (ts->clat_percentiles) {
if (ts->lat_percentiles)
add_lat_percentile_sample_noprio(ts, nsec, ddir, FIO_CLAT);
else
- add_lat_percentile_sample(ts, nsec, ddir, priority_bit, FIO_CLAT);
+ add_lat_percentile_sample(ts, nsec, ddir, high_prio, FIO_CLAT);
}
if (iolog && iolog->hist_msec) {
FIO_IO_U_PLAT_NR * sizeof(uint64_t));
flist_add(&dst->list, &hw->list);
__add_log_sample(iolog, sample_plat(dst), ddir, bs,
- elapsed, offset, priority_bit);
+ elapsed, offset, ioprio);
/*
* Update the last time we recorded as being now, minus
}
void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
- unsigned long long nsec, unsigned long long bs, uint64_t offset,
- uint8_t priority_bit)
+ unsigned long long nsec, unsigned long long bs,
+ uint64_t offset, unsigned int ioprio)
{
const bool needs_lock = td_async_processing(td);
struct thread_stat *ts = &td->ts;
add_stat_sample(&ts->slat_stat[ddir], nsec);
if (td->slat_log)
- add_log_sample(td, td->slat_log, sample_val(nsec), ddir, bs, offset,
- priority_bit);
+ add_log_sample(td, td->slat_log, sample_val(nsec), ddir, bs,
+ offset, ioprio);
if (ts->slat_percentiles)
add_lat_percentile_sample_noprio(ts, nsec, ddir, FIO_SLAT);
void add_lat_sample(struct thread_data *td, enum fio_ddir ddir,
unsigned long long nsec, unsigned long long bs,
- uint64_t offset, uint8_t priority_bit)
+ uint64_t offset, unsigned int ioprio, bool high_prio)
{
const bool needs_lock = td_async_processing(td);
struct thread_stat *ts = &td->ts;
if (td->lat_log)
add_log_sample(td, td->lat_log, sample_val(nsec), ddir, bs,
- offset, priority_bit);
+ offset, ioprio);
if (ts->lat_percentiles) {
- add_lat_percentile_sample(ts, nsec, ddir, priority_bit, FIO_LAT);
- if (priority_bit)
+ add_lat_percentile_sample(ts, nsec, ddir, high_prio, FIO_LAT);
+ if (high_prio)
add_stat_sample(&ts->clat_high_prio_stat[ddir], nsec);
else
add_stat_sample(&ts->clat_low_prio_stat[ddir], nsec);
if (td->bw_log)
add_log_sample(td, td->bw_log, sample_val(rate), io_u->ddir,
- bytes, io_u->offset, io_u_is_prio(io_u));
+ bytes, io_u->offset, io_u->ioprio);
td->stat_io_bytes[io_u->ddir] = td->this_io_bytes[io_u->ddir];
if (td->o.min_bs[ddir] == td->o.max_bs[ddir])
bs = td->o.min_bs[ddir];
- next = add_log_sample(td, log, sample_val(rate), ddir, bs, 0, 0);
+ next = add_log_sample(td, log, sample_val(rate), ddir,
+ bs, 0, 0);
next_log = min(next_log, next);
}
if (td->iops_log)
add_log_sample(td, td->iops_log, sample_val(1), io_u->ddir,
- bytes, io_u->offset, io_u_is_prio(io_u));
+ bytes, io_u->offset, io_u->ioprio);
td->stat_io_blocks[io_u->ddir] = td->this_io_blocks[io_u->ddir];
extern void clear_rusage_stat(struct thread_data *);
extern void add_lat_sample(struct thread_data *, enum fio_ddir, unsigned long long,
- unsigned long long, uint64_t, uint8_t);
+ unsigned long long, uint64_t, unsigned int, bool);
extern void add_clat_sample(struct thread_data *, enum fio_ddir, unsigned long long,
- unsigned long long, uint64_t, uint8_t);
+ unsigned long long, uint64_t, unsigned int, bool);
extern void add_slat_sample(struct thread_data *, enum fio_ddir, unsigned long long,
- unsigned long long, uint64_t, uint8_t);
-extern void add_agg_sample(union io_sample_data, enum fio_ddir, unsigned long long bs,
- uint8_t priority_bit);
+ unsigned long long, uint64_t, unsigned int);
+extern void add_agg_sample(union io_sample_data, enum fio_ddir, unsigned long long);
extern void add_iops_sample(struct thread_data *, struct io_u *,
unsigned int);
extern void add_bw_sample(struct thread_data *, struct io_u *,
unsigned int ignore_zone_limits;
fio_fp64_t zrt;
fio_fp64_t zrf;
+
+ unsigned int log_prio;
};
#define FIO_TOP_STR_MAX 256
uint32_t zone_mode;
int32_t max_open_zones;
uint32_t ignore_zone_limits;
+
+ uint32_t log_prio;
} __attribute__((packed));
extern void convert_thread_options_to_cpu(struct thread_options *o, struct thread_options_pack *top);