specified period of time, reducing the resolution of the log.
Defaults to 0.
+log_offset=int If this is set, the iolog options will include the byte
+ offset for the IO entry as well as the other data values.
+
lockmem=int Pin down the specified amount of memory with mlock(2). Can
potentially be used instead of removing memory or booting
with less memory to simulate a smaller amount of memory.
return 0;
if (write_bw_log) {
- setup_log(&agg_io_log[DDIR_READ], 0, IO_LOG_TYPE_BW);
- setup_log(&agg_io_log[DDIR_WRITE], 0, IO_LOG_TYPE_BW);
- setup_log(&agg_io_log[DDIR_TRIM], 0, IO_LOG_TYPE_BW);
+ setup_log(&agg_io_log[DDIR_READ], 0, IO_LOG_TYPE_BW, 0);
+ setup_log(&agg_io_log[DDIR_WRITE], 0, IO_LOG_TYPE_BW, 0);
+ setup_log(&agg_io_log[DDIR_TRIM], 0, IO_LOG_TYPE_BW, 0);
}
startup_mutex = fio_mutex_init(FIO_MUTEX_LOCKED);
*/
nr_samples = le32_to_cpu(pdu->nr_samples);
- total = nr_samples * sizeof(struct io_sample);
+ total = nr_samples * __log_entry_sz(pdu->log_offset);
ret = malloc(total + sizeof(*pdu));
ret->nr_samples = nr_samples;
{
struct cmd_iolog_pdu *pdu = (struct cmd_iolog_pdu *) cmd->payload;
struct cmd_iolog_pdu *ret;
- int i;
+ uint64_t i;
+ void *samples;
/*
* Convert if compressed and we support it. If it's not
} else
ret = pdu;
+ ret->nr_samples = le64_to_cpu(ret->nr_samples);
ret->thread_number = le32_to_cpu(ret->thread_number);
- ret->nr_samples = le32_to_cpu(ret->nr_samples);
ret->log_type = le32_to_cpu(ret->log_type);
ret->compressed = le32_to_cpu(ret->compressed);
+ ret->log_offset = le32_to_cpu(ret->log_offset);
+ samples = &ret->samples[i];
for (i = 0; i < ret->nr_samples; i++) {
- struct io_sample *s = &ret->samples[i];
+ struct io_sample *s;
+ s = __get_sample(samples, ret->log_offset, i);
s->time = le64_to_cpu(s->time);
s->val = le64_to_cpu(s->val);
s->ddir = le32_to_cpu(s->ddir);
s->bs = le32_to_cpu(s->bs);
+
+ if (ret->log_offset) {
+ struct io_sample_offset *so = (void *) s;
+
+ so->offset = le64_to_cpu(so->offset);
+ }
}
return ret;
over the specified period of time, reducing the resolution of the log.
Defaults to 0.
.TP
+.BI log_offset \fR=\fPbool
+If this is set, the iolog options will include the byte offset for the IO
+entry as well as the other data values.
+.TP
.BI disable_lat \fR=\fPbool
Disable measurements of total latency numbers. Useful only for cutting
back the number of calls to \fBgettimeofday\fR\|(2), as that does impact performance at
goto err;
if (o->lat_log_file || write_lat_log) {
- setup_log(&td->lat_log, o->log_avg_msec, IO_LOG_TYPE_LAT);
- setup_log(&td->slat_log, o->log_avg_msec, IO_LOG_TYPE_SLAT);
- setup_log(&td->clat_log, o->log_avg_msec, IO_LOG_TYPE_CLAT);
+ setup_log(&td->lat_log, o->log_avg_msec, IO_LOG_TYPE_LAT,
+ o->log_offset);
+ setup_log(&td->slat_log, o->log_avg_msec, IO_LOG_TYPE_SLAT,
+ o->log_offset);
+ setup_log(&td->clat_log, o->log_avg_msec, IO_LOG_TYPE_CLAT,
+ o->log_offset);
}
if (o->bw_log_file || write_bw_log)
- setup_log(&td->bw_log, o->log_avg_msec, IO_LOG_TYPE_BW);
+ setup_log(&td->bw_log, o->log_avg_msec, IO_LOG_TYPE_BW,
+ o->log_offset);
if (o->iops_log_file)
- setup_log(&td->iops_log, o->log_avg_msec, IO_LOG_TYPE_IOPS);
+ setup_log(&td->iops_log, o->log_avg_msec, IO_LOG_TYPE_IOPS,
+ o->log_offset);
if (!o->name)
o->name = strdup(jobname);
unsigned long tusec;
tusec = utime_since(&io_u->start_time, &icd->time);
- add_lat_sample(td, idx, tusec, bytes);
+ add_lat_sample(td, idx, tusec, bytes, io_u->offset);
if (td->flags & TD_F_PROFILE_OPS) {
struct prof_io_ops *ops = &td->prof_io_ops;
}
if (!td->o.disable_clat) {
- add_clat_sample(td, idx, lusec, bytes);
+ add_clat_sample(td, idx, lusec, bytes, io_u->offset);
io_u_mark_latency(td, lusec);
}
unsigned long slat_time;
slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
- add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen);
+ add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen,
+ io_u->offset);
}
}
return ret;
}
-void setup_log(struct io_log **log, unsigned long avg_msec, int log_type)
+void setup_log(struct io_log **log, unsigned long avg_msec, int log_type,
+ int log_offset)
{
struct io_log *l = malloc(sizeof(*l));
l->nr_samples = 0;
l->max_samples = 1024;
l->log_type = log_type;
- l->log = malloc(l->max_samples * sizeof(struct io_sample));
+ l->log_offset = log_offset;
+ l->log = malloc(l->max_samples * log_entry_sz(l));
l->avg_msec = avg_msec;
*log = l;
}
void __finish_log(struct io_log *log, const char *name)
{
- unsigned int i;
+ uint64_t i;
void *buf;
FILE *f;
buf = set_file_buffer(f);
for (i = 0; i < log->nr_samples; i++) {
- fprintf(f, "%lu, %lu, %u, %u\n",
- (unsigned long) log->log[i].time,
- (unsigned long) log->log[i].val,
- log->log[i].ddir, log->log[i].bs);
+ struct io_sample *s = get_sample(log, i);
+
+ if (!log->log_offset) {
+ fprintf(f, "%lu, %lu, %u, %u\n",
+ (unsigned long) s->time,
+ (unsigned long) s->val,
+ s->ddir, s->bs);
+ } else {
+ struct io_sample_offset *so = (void *) s;
+
+ fprintf(f, "%lu, %lu, %u, %u, %llu\n",
+ (unsigned long) s->time,
+ (unsigned long) s->val,
+ s->ddir, s->bs,
+ (unsigned long long) so->offset);
+ }
}
fclose(f);
uint32_t bs;
};
+struct io_sample_offset {
+ struct io_sample s;
+ uint64_t offset;
+};
+
enum {
IO_LOG_TYPE_LAT = 1,
IO_LOG_TYPE_CLAT,
*/
unsigned long nr_samples;
unsigned long max_samples;
- struct io_sample *log;
+ void *log;
unsigned int log_type;
*/
unsigned int disabled;
+ /*
+ * Log offsets
+ */
+ unsigned int log_offset;
+
/*
* Windowed average, for logging single entries average over some
* period of time.
unsigned long avg_last;
};
+static inline size_t __log_entry_sz(int log_offset)
+{
+ if (log_offset)
+ return sizeof(struct io_sample_offset);
+ else
+ return sizeof(struct io_sample);
+}
+
+static inline size_t log_entry_sz(struct io_log *log)
+{
+ return __log_entry_sz(log->log_offset);
+}
+
+static inline struct io_sample *__get_sample(void *samples, int log_offset,
+ uint64_t sample)
+{
+ return samples + sample * __log_entry_sz(log_offset);
+}
+
+static inline struct io_sample *get_sample(struct io_log *iolog,
+ uint64_t sample)
+{
+ return __get_sample(iolog->log, iolog->log_offset, sample);
+}
+
enum {
IP_F_ONRB = 1,
IP_F_ONLIST = 2,
*/
extern void finalize_logs(struct thread_data *td);
extern void add_lat_sample(struct thread_data *, enum fio_ddir, unsigned long,
- unsigned int);
+ unsigned int, uint64_t);
extern void add_clat_sample(struct thread_data *, enum fio_ddir, unsigned long,
- unsigned int);
+ unsigned int, uint64_t);
extern void add_slat_sample(struct thread_data *, enum fio_ddir, unsigned long,
- unsigned int);
+ unsigned int, uint64_t);
extern void add_bw_sample(struct thread_data *, enum fio_ddir, unsigned int,
struct timeval *);
extern void add_iops_sample(struct thread_data *, enum fio_ddir, unsigned int,
struct timeval *);
extern void init_disk_util(struct thread_data *);
extern void update_rusage_stat(struct thread_data *);
-extern void setup_log(struct io_log **, unsigned long, int);
+extern void setup_log(struct io_log **, unsigned long, int, int);
extern void __finish_log(struct io_log *, const char *);
extern struct io_log *agg_io_log[DDIR_RWDIR_CNT];
extern int write_bw_log;
.category = FIO_OPT_C_LOG,
.group = FIO_OPT_G_INVALID,
},
+ {
+ .name = "log_offset",
+ .lname = "Log offset of IO",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(log_offset),
+ .help = "Include offset of IO for each log entry",
+ .def = "0",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
{
.name = "bwavgtime",
.lname = "Bandwidth average time",
}
stream.next_in = (void *) log->log;
- stream.avail_in = log->nr_samples * sizeof(struct io_sample);
+ stream.avail_in = log->nr_samples * log_entry_sz(log);
do {
unsigned int this_len, flags = 0;
struct cmd_iolog_pdu pdu;
int i, ret = 0;
+ pdu.nr_samples = cpu_to_le64(log->nr_samples);
pdu.thread_number = cpu_to_le32(td->thread_number);
- pdu.nr_samples = __cpu_to_le32(log->nr_samples);
pdu.log_type = cpu_to_le32(log->log_type);
pdu.compressed = cpu_to_le32(use_zlib);
pdu.name[FIO_NET_NAME_MAX - 1] = '\0';
for (i = 0; i < log->nr_samples; i++) {
- struct io_sample *s = &log->log[i];
+ struct io_sample *s = get_sample(log, i);
s->time = cpu_to_le64(s->time);
s->val = cpu_to_le64(s->val);
s->ddir = cpu_to_le32(s->ddir);
s->bs = cpu_to_le32(s->bs);
+
+ if (log->log_offset) {
+ struct io_sample_offset *so = (void *) s;
+
+ so->offset = cpu_to_le64(so->offset);
+ }
}
/*
return fio_send_iolog_gz(&pdu, log);
return fio_send_cmd_ext_pdu(server_fd, FIO_NET_CMD_IOLOG, log->log,
- log->nr_samples * sizeof(struct io_sample), 0, 0);
+ log->nr_samples * log_entry_sz(log), 0, 0);
}
void fio_server_send_add_job(struct thread_data *td)
};
struct cmd_iolog_pdu {
+ uint64_t nr_samples;
uint32_t thread_number;
- uint32_t nr_samples;
uint32_t log_type;
uint32_t compressed;
+ uint32_t log_offset;
uint8_t name[FIO_NET_NAME_MAX];
struct io_sample samples[0];
};
static void __add_log_sample(struct io_log *iolog, unsigned long val,
enum fio_ddir ddir, unsigned int bs,
- unsigned long t)
+ unsigned long t, uint64_t offset)
{
const int nr_samples = iolog->nr_samples;
+ struct io_sample *s;
if (iolog->disabled)
return;
iolog->avg_last = t;
if (iolog->nr_samples == iolog->max_samples) {
- int new_size = sizeof(struct io_sample) * iolog->max_samples*2;
+ size_t new_size;
void *new_log;
+ new_size = 2 * iolog->max_samples * log_entry_sz(iolog);
new_log = realloc(iolog->log, new_size);
if (!new_log) {
log_err("fio: failed extending iolog! Will stop logging.\n");
iolog->max_samples <<= 1;
}
- iolog->log[nr_samples].val = val;
- iolog->log[nr_samples].time = t;
- iolog->log[nr_samples].ddir = ddir;
- iolog->log[nr_samples].bs = bs;
+ s = get_sample(iolog, nr_samples);
+
+ s->val = val;
+ s->time = t;
+ s->ddir = ddir;
+ s->bs = bs;
+
+ if (iolog->log_offset) {
+ struct io_sample_offset *so = (void *) s;
+
+ so->offset = offset;
+ }
+
iolog->nr_samples++;
}
unsigned long mr;
mr = iolog->avg_window[DDIR_READ].mean.u.f + 0.50;
- __add_log_sample(iolog, mr, DDIR_READ, 0, elapsed);
+ __add_log_sample(iolog, mr, DDIR_READ, 0, elapsed, 0);
}
if (iolog->avg_window[DDIR_WRITE].samples) {
unsigned long mw;
mw = iolog->avg_window[DDIR_WRITE].mean.u.f + 0.50;
- __add_log_sample(iolog, mw, DDIR_WRITE, 0, elapsed);
+ __add_log_sample(iolog, mw, DDIR_WRITE, 0, elapsed, 0);
}
if (iolog->avg_window[DDIR_TRIM].samples) {
unsigned long mw;
mw = iolog->avg_window[DDIR_TRIM].mean.u.f + 0.50;
- __add_log_sample(iolog, mw, DDIR_TRIM, 0, elapsed);
+ __add_log_sample(iolog, mw, DDIR_TRIM, 0, elapsed, 0);
}
reset_io_stat(&iolog->avg_window[DDIR_READ]);
static void add_log_sample(struct thread_data *td, struct io_log *iolog,
unsigned long val, enum fio_ddir ddir,
- unsigned int bs)
+ unsigned int bs, uint64_t offset)
{
unsigned long elapsed, this_window;
* If no time averaging, just add the log sample.
*/
if (!iolog->avg_msec) {
- __add_log_sample(iolog, val, ddir, bs, elapsed);
+ __add_log_sample(iolog, val, ddir, bs, elapsed, offset);
return;
}
return;
iolog = agg_io_log[ddir];
- __add_log_sample(iolog, val, ddir, bs, mtime_since_genesis());
+ __add_log_sample(iolog, val, ddir, bs, mtime_since_genesis(), 0);
}
static void add_clat_percentile_sample(struct thread_stat *ts,
}
void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
- unsigned long usec, unsigned int bs)
+ unsigned long usec, unsigned int bs, uint64_t offset)
{
struct thread_stat *ts = &td->ts;
add_stat_sample(&ts->clat_stat[ddir], usec);
if (td->clat_log)
- add_log_sample(td, td->clat_log, usec, ddir, bs);
+ add_log_sample(td, td->clat_log, usec, ddir, bs, offset);
if (ts->clat_percentiles)
add_clat_percentile_sample(ts, usec, ddir);
}
void add_slat_sample(struct thread_data *td, enum fio_ddir ddir,
- unsigned long usec, unsigned int bs)
+ unsigned long usec, unsigned int bs, uint64_t offset)
{
struct thread_stat *ts = &td->ts;
add_stat_sample(&ts->slat_stat[ddir], usec);
if (td->slat_log)
- add_log_sample(td, td->slat_log, usec, ddir, bs);
+ add_log_sample(td, td->slat_log, usec, ddir, bs, offset);
}
void add_lat_sample(struct thread_data *td, enum fio_ddir ddir,
- unsigned long usec, unsigned int bs)
+ unsigned long usec, unsigned int bs, uint64_t offset)
{
struct thread_stat *ts = &td->ts;
add_stat_sample(&ts->lat_stat[ddir], usec);
if (td->lat_log)
- add_log_sample(td, td->lat_log, usec, ddir, bs);
+ add_log_sample(td, td->lat_log, usec, ddir, bs, offset);
}
void add_bw_sample(struct thread_data *td, enum fio_ddir ddir, unsigned int bs,
add_stat_sample(&ts->bw_stat[ddir], rate);
if (td->bw_log)
- add_log_sample(td, td->bw_log, rate, ddir, bs);
+ add_log_sample(td, td->bw_log, rate, ddir, bs, 0);
td->stat_io_bytes[ddir] = td->this_io_bytes[ddir];
}
add_stat_sample(&ts->iops_stat[ddir], iops);
if (td->iops_log)
- add_log_sample(td, td->iops_log, iops, ddir, bs);
+ add_log_sample(td, td->iops_log, iops, ddir, bs, 0);
td->stat_io_blocks[ddir] = td->this_io_blocks[ddir];
}
unsigned long long rand_seed;
unsigned int use_os_rand;
unsigned int log_avg_msec;
+ unsigned int log_offset;
unsigned int norandommap;
unsigned int softrandommap;
unsigned int bs_unaligned;
uint64_t rand_seed;
uint32_t use_os_rand;
uint32_t log_avg_msec;
+ uint32_t log_offset;
uint32_t norandommap;
uint32_t softrandommap;
uint32_t bs_unaligned;