void log_io_u(const struct thread_data *td, const struct io_u *io_u)
{
+ struct timespec now;
+
if (!td->o.write_iolog_file)
return;
- fprintf(td->iolog_f, "%s %s %llu %llu\n", io_u->file->file_name,
- io_ddir_name(io_u->ddir),
- io_u->offset, io_u->buflen);
+ fio_gettime(&now, NULL);
+ fprintf(td->iolog_f, "%llu %s %s %llu %llu\n",
+ (unsigned long long) utime_since_now(&td->io_log_start_time),
+ io_u->file->file_name, io_ddir_name(io_u->ddir), io_u->offset,
+ io_u->buflen);
+
}
void log_file(struct thread_data *td, struct fio_file *f,
enum file_log_act what)
{
const char *act[] = { "add", "open", "close" };
+ struct timespec now;
assert(what < 3);
if (!td->iolog_f)
return;
- fprintf(td->iolog_f, "%s %s\n", f->file_name, act[what]);
+ fio_gettime(&now, NULL);
+ fprintf(td->iolog_f, "%llu %s %s\n",
+ (unsigned long long) utime_since_now(&td->io_log_start_time),
+ f->file_name, act[what]);
}
static void iolog_delay(struct thread_data *td, unsigned long delay)
{
uint64_t usec = utime_since_now(&td->last_issue);
unsigned long orig_delay = delay;
- uint64_t this_delay;
struct timespec ts;
+ int ret = 0;
if (delay < td->time_offset) {
td->time_offset = 0;
delay -= usec;
fio_gettime(&ts, NULL);
- while (delay && !td->terminate) {
- this_delay = delay;
- if (this_delay > 500000)
- this_delay = 500000;
- usec_sleep(td, this_delay);
- delay -= this_delay;
+ while (delay && !td->terminate) {
+ ret = io_u_queued_complete(td, 0);
+ if (ret < 0)
+ td_verror(td, -ret, "io_u_queued_complete");
+ if (td->flags & TD_F_REGROW_LOGS)
+ regrow_logs(td);
+ if (utime_since_now(&ts) > delay)
+ break;
}
usec = utime_since_now(&ts);
break;
}
ret = td_io_open_file(td, f);
- if (!ret)
+ if (!ret) {
+ if (td->o.dp_type != FIO_DP_NONE) {
+ int dp_init_ret = dp_init(td);
+
+ if (dp_init_ret != 0) {
+ td_verror(td, abs(dp_init_ret), "dp_init");
+ return -1;
+ }
+ }
break;
+ }
td_verror(td, ret, "iolog open file");
return -1;
case FIO_LOG_CLOSE_FILE:
io_u->buflen, io_u->file->file_name);
if (ipo->delay)
iolog_delay(td, ipo->delay);
+
+ if (td->o.dp_type != FIO_DP_NONE)
+ dp_fill_dspec_data(td, io_u);
} else {
elapsed = mtime_since_genesis();
if (ipo->delay > elapsed)
unsigned long long offset;
unsigned int bytes;
unsigned long long delay = 0;
- int reads, writes, waits, fileno = 0, file_action = 0; /* stupid gcc */
+ int reads, writes, trims, waits, fileno = 0, file_action = 0; /* stupid gcc */
char *rfname, *fname, *act;
char *str, *p;
enum fio_ddir rw;
rfname = fname = malloc(256+16);
act = malloc(256+16);
- syncs = reads = writes = waits = 0;
+ syncs = reads = writes = trims = waits = 0;
while ((p = fgets(str, 4096, td->io_log_rfile)) != NULL) {
struct io_piece *ipo;
int r;
*/
if (!strcmp(act, "wait"))
rw = DDIR_WAIT;
- else if (!strcmp(act, "read"))
+ else if (!strcmp(act, "read")) {
+ if (td->o.replay_skip & (1u << DDIR_READ))
+ continue;
rw = DDIR_READ;
- else if (!strcmp(act, "write"))
+ } else if (!strcmp(act, "write")) {
+ if (td->o.replay_skip & (1u << DDIR_WRITE))
+ continue;
rw = DDIR_WRITE;
- else if (!strcmp(act, "sync"))
+ } else if (!strcmp(act, "sync")) {
+ if (td->o.replay_skip & (1u << DDIR_SYNC))
+ continue;
rw = DDIR_SYNC;
- else if (!strcmp(act, "datasync"))
+ } else if (!strcmp(act, "datasync"))
rw = DDIR_DATASYNC;
- else if (!strcmp(act, "trim"))
+ else if (!strcmp(act, "trim")) {
+ if (td->o.replay_skip & (1u << DDIR_TRIM))
+ continue;
rw = DDIR_TRIM;
- else {
+ } else {
log_err("fio: bad iolog file action: %s\n",
act);
continue;
if (read_only)
continue;
writes++;
+ } else if (rw == DDIR_TRIM) {
+ /*
+ * Don't add a trim for ro mode
+ */
+ if (read_only)
+ continue;
+ trims++;
} else if (rw == DDIR_WAIT) {
if (td->o.no_stall)
continue;
{
io_u_quiesce(td);
free_io_mem(td);
- init_io_u_buffers(td);
+ if (init_io_u_buffers(td))
+ return false;
}
return true;
}
- if (!reads && !writes && !waits)
+ if (!reads && !writes && !waits && !trims)
return false;
- else if (reads && !writes)
- td->o.td_ddir = TD_DDIR_READ;
- else if (!reads && writes)
- td->o.td_ddir = TD_DDIR_WRITE;
- else
- td->o.td_ddir = TD_DDIR_RW;
+
+ td->o.td_ddir = 0;
+ if (reads)
+ td->o.td_ddir |= TD_DDIR_READ;
+ if (writes)
+ td->o.td_ddir |= TD_DDIR_WRITE;
+ if (trims)
+ td->o.td_ddir |= TD_DDIR_TRIM;
return true;
}
td->iolog_f = f;
td->iolog_buf = malloc(8192);
setvbuf(f, td->iolog_buf, _IOFBF, 8192);
+ fio_gettime(&td->io_log_start_time, NULL);
/*
* write our version line
*/
- if (fprintf(f, "%s\n", iolog_ver2) < 0) {
+ if (fprintf(f, "%s\n", iolog_ver3) < 0) {
perror("iolog init\n");
return false;
}
if (!ret)
td_verror(td, EINVAL, "failed initializing iolog");
+ init_disk_util(td);
+
return ret;
}
struct flist_head *list;
l = scalloc(1, sizeof(*l));
+ assert(l);
INIT_FLIST_HEAD(&l->io_logs);
l->log_type = p->log_type;
l->log_offset = p->log_offset;
l->log_prio = p->log_prio;
+ l->log_issue_time = p->log_issue_time;
l->log_gz = p->log_gz;
l->log_gz_store = p->log_gz_store;
l->avg_msec = p->avg_msec;
l->log_ddir_mask = LOG_OFFSET_SAMPLE_BIT;
if (l->log_prio)
l->log_ddir_mask |= LOG_PRIO_SAMPLE_BIT;
+ /*
+ * The bandwidth-log option generates agg-read_bw.log,
+ * agg-write_bw.log and agg-trim_bw.log for which l->td is NULL.
+ * Check if l->td is valid before dereferencing it.
+ */
+ if (l->td && l->td->o.log_max == IO_LOG_SAMPLE_BOTH)
+ l->log_ddir_mask |= LOG_AVG_MAX_SAMPLE_BIT;
+
+ if (l->log_issue_time)
+ l->log_ddir_mask |= LOG_ISSUE_TIME_SAMPLE_BIT;
INIT_FLIST_HEAD(&l->chunk_list);
uint64_t sample_size)
{
struct io_sample *s;
- int log_offset;
+ bool log_offset, log_issue_time;
uint64_t i, j, nr_samples;
struct io_u_plat_entry *entry, *entry_before;
uint64_t *io_u_plat;
if (!sample_size)
return;
- s = __get_sample(samples, 0, 0);
+ s = __get_sample(samples, 0, 0, 0);
log_offset = (s->__ddir & LOG_OFFSET_SAMPLE_BIT) != 0;
+ log_issue_time = (s->__ddir & LOG_ISSUE_TIME_SAMPLE_BIT) != 0;
- nr_samples = sample_size / __log_entry_sz(log_offset);
+ nr_samples = sample_size / __log_entry_sz(log_offset, log_issue_time);
for (i = 0; i < nr_samples; i++) {
- s = __get_sample(samples, log_offset, i);
+ s = __get_sample(samples, log_offset, log_issue_time, i);
entry = s->data.plat_entry;
io_u_plat = entry->io_u_plat;
}
}
+static int print_sample_fields(char **p, size_t *left, const char *fmt, ...) {
+ va_list ap;
+ int ret;
+
+ va_start(ap, fmt);
+ ret = vsnprintf(*p, *left, fmt, ap);
+ if (ret < 0 || ret >= *left) {
+ log_err("sample file write failed: %d\n", ret);
+ va_end(ap);
+ return -1;
+ }
+ va_end(ap);
+
+ *p += ret;
+ *left -= ret;
+
+ return 0;
+}
+
+/*
+ * flush_samples - Generate output for log samples
+ * Each sample output is built using a temporary buffer. This buffer size
+ * assumptions are:
+ * - Each sample has less than 10 fields
+ * - Each sample field fits in 25 characters (20 digits for 64 bit number
+ * and a few bytes delimiter)
+ */
void flush_samples(FILE *f, void *samples, uint64_t sample_size)
{
struct io_sample *s;
- int log_offset, log_prio;
+ bool log_offset, log_prio, log_avg_max, log_issue_time;
uint64_t i, nr_samples;
- unsigned int prio_val;
- const char *fmt;
+ char buf[256];
+ char *p;
+ size_t left;
+ int ret;
if (!sample_size)
return;
- s = __get_sample(samples, 0, 0);
+ s = __get_sample(samples, 0, 0, 0);
log_offset = (s->__ddir & LOG_OFFSET_SAMPLE_BIT) != 0;
log_prio = (s->__ddir & LOG_PRIO_SAMPLE_BIT) != 0;
+ log_avg_max = (s->__ddir & LOG_AVG_MAX_SAMPLE_BIT) != 0;
+ log_issue_time = (s->__ddir & LOG_ISSUE_TIME_SAMPLE_BIT) != 0;
- if (log_offset) {
- if (log_prio)
- fmt = "%lu, %" PRId64 ", %u, %llu, %llu, 0x%04x\n";
- else
- fmt = "%lu, %" PRId64 ", %u, %llu, %llu, %u\n";
- } else {
- if (log_prio)
- fmt = "%lu, %" PRId64 ", %u, %llu, 0x%04x\n";
- else
- fmt = "%lu, %" PRId64 ", %u, %llu, %u\n";
- }
-
- nr_samples = sample_size / __log_entry_sz(log_offset);
+ nr_samples = sample_size / __log_entry_sz(log_offset, log_issue_time);
for (i = 0; i < nr_samples; i++) {
- s = __get_sample(samples, log_offset, i);
+ s = __get_sample(samples, log_offset, log_issue_time, i);
+ p = buf;
+ left = sizeof(buf);
+
+ ret = print_sample_fields(&p, &left, "%" PRIu64 ", %" PRId64,
+ s->time, s->data.val.val0);
+ if (ret)
+ return;
+
+ if (log_avg_max) {
+ ret = print_sample_fields(&p, &left, ", %" PRId64,
+ s->data.val.val1);
+ if (ret)
+ return;
+ }
+
+ ret = print_sample_fields(&p, &left, ", %u, %llu",
+ io_sample_ddir(s),
+ (unsigned long long) s->bs);
+ if (ret)
+ return;
+
+ if (log_offset) {
+ ret = print_sample_fields(&p, &left, ", %llu",
+ (unsigned long long) s->aux[IOS_AUX_OFFSET_INDEX]);
+ if (ret)
+ return;
+ }
if (log_prio)
- prio_val = s->priority;
+ ret = print_sample_fields(&p, &left, ", 0x%04x",
+ s->priority);
else
- prio_val = ioprio_value_is_class_rt(s->priority);
-
- if (!log_offset) {
- fprintf(f, fmt,
- (unsigned long) s->time,
- s->data.val,
- io_sample_ddir(s), (unsigned long long) s->bs,
- prio_val);
- } else {
- struct io_sample_offset *so = (void *) s;
-
- fprintf(f, fmt,
- (unsigned long) s->time,
- s->data.val,
- io_sample_ddir(s), (unsigned long long) s->bs,
- (unsigned long long) so->offset,
- prio_val);
+ ret = print_sample_fields(&p, &left, ", %u",
+ ioprio_value_is_class_rt(s->priority));
+ if (ret)
+ return;
+
+ if (log_issue_time) {
+ ret = print_sample_fields(&p, &left, ", %llu",
+ (unsigned long long) s->aux[IOS_AUX_ISSUE_TIME_INDEX]);
+ if (ret)
+ return;
}
+
+ fprintf(f, "%s\n", buf);
}
}
void *buf;
FILE *f;
- f = fopen(file, "r");
+ f = fopen(file, "rb");
if (!f) {
perror("fopen");
return 1;
void *buf;
FILE *f;
+ /*
+ * If log_gz_store is true, we are writing a binary file.
+ * Set the mode appropriately (on all platforms) to avoid issues
+ * on windows (line-ending conversions, etc.)
+ */
if (!do_append)
- f = fopen(log->filename, "w");
+ if (log->log_gz_store)
+ f = fopen(log->filename, "wb");
+ else
+ f = fopen(log->filename, "w");
else
- f = fopen(log->filename, "a");
+ if (log->log_gz_store)
+ f = fopen(log->filename, "ab");
+ else
+ f = fopen(log->filename, "a");
if (!f) {
perror("fopen log");
return;
* Queue work item to compress the existing log entries. We reset the
* current log to a small size, and reference the existing log in the
* data that we queue for compression. Once compression has been done,
- * this old log is freed. If called with finish == true, will not return
- * until the log compression has completed, and will flush all previous
- * logs too
+ * this old log is freed. Will not return until the log compression
+ * has completed, and will flush all previous logs too
*/
static int iolog_flush(struct io_log *log)
{
struct iolog_flush_data *data;
+ workqueue_flush(&log->td->log_compress_wq);
data = malloc(sizeof(*data));
if (!data)
return 1;
void fio_writeout_logs(bool unit_logs)
{
- struct thread_data *td;
- int i;
-
- for_each_td(td, i)
+ for_each_td(td) {
td_writeout_logs(td, unit_logs);
+ } end_for_each();
}