#include "lib/pow2.h"
#include "lib/output_buffer.h"
#include "helper_thread.h"
+#include "smalloc.h"
struct fio_mutex *stat_mutex;
if (output_format & FIO_OUTPUT_JSON) {
struct thread_data *global;
char time_buf[32];
- time_t time_p;
+ struct timeval now;
+ unsigned long long ms_since_epoch;
- time(&time_p);
- os_ctime_r((const time_t *) &time_p, time_buf,
+ gettimeofday(&now, NULL);
+ ms_since_epoch = (unsigned long long)(now.tv_sec) * 1000 +
+ (unsigned long long)(now.tv_usec) / 1000;
+
+ os_ctime_r((const time_t *) &now.tv_sec, time_buf,
sizeof(time_buf));
time_buf[strlen(time_buf) - 1] = '\0';
root = json_create_object();
json_object_add_value_string(root, "fio version", fio_version_string);
- json_object_add_value_int(root, "timestamp", time_p);
+ json_object_add_value_int(root, "timestamp", now.tv_sec);
+ json_object_add_value_int(root, "timestamp_ms", ms_since_epoch);
json_object_add_value_string(root, "time", time_buf);
global = get_global_options();
json_add_job_opts(root, "global options", &global->opt_list, false);
is->samples++;
}
+/*
+ * Return a struct io_logs, which is added to the tail of the log
+ * list for 'iolog'.
+ */
+static struct io_logs *get_new_log(struct io_log *iolog)
+{
+ size_t new_size, new_samples;
+ struct io_logs *cur_log;
+
+ /*
+ * Cap the size at MAX_LOG_ENTRIES, so we don't keep doubling
+ * forever
+ */
+ if (!iolog->cur_log_max)
+ new_samples = DEF_LOG_ENTRIES;
+ else {
+ new_samples = iolog->cur_log_max * 2;
+ if (new_samples > MAX_LOG_ENTRIES)
+ new_samples = MAX_LOG_ENTRIES;
+ }
+
+ new_size = new_samples * log_entry_sz(iolog);
+
+ cur_log = smalloc(sizeof(*cur_log));
+ if (cur_log) {
+ INIT_FLIST_HEAD(&cur_log->list);
+ cur_log->log = malloc(new_size);
+ if (cur_log->log) {
+ cur_log->nr_samples = 0;
+ cur_log->max_samples = new_samples;
+ flist_add_tail(&cur_log->list, &iolog->io_logs);
+ iolog->cur_log_max = new_samples;
+ return cur_log;
+ }
+ sfree(cur_log);
+ }
+
+ return NULL;
+}
+
+/*
+ * Add and return a new log chunk, or return current log if big enough
+ */
+static struct io_logs *regrow_log(struct io_log *iolog)
+{
+ struct io_logs *cur_log;
+ int i;
+
+ if (!iolog || iolog->disabled)
+ goto disable;
+
+ cur_log = iolog_cur_log(iolog);
+ if (!cur_log) {
+ cur_log = get_new_log(iolog);
+ if (!cur_log)
+ return NULL;
+ }
+
+ if (cur_log->nr_samples < cur_log->max_samples)
+ return cur_log;
+
+ /*
+ * No room for a new sample. If we're compressing on the fly, flush
+ * out the current chunk
+ */
+ if (iolog->log_gz) {
+ if (iolog_cur_flush(iolog, cur_log)) {
+ log_err("fio: failed flushing iolog! Will stop logging.\n");
+ return NULL;
+ }
+ }
+
+ /*
+ * Get a new log array, and add to our list
+ */
+ cur_log = get_new_log(iolog);
+ if (!cur_log) {
+ log_err("fio: failed extending iolog! Will stop logging.\n");
+ return NULL;
+ }
+
+ if (!iolog->pending || !iolog->pending->nr_samples)
+ return cur_log;
+
+ /*
+ * Flush pending items to new log
+ */
+ for (i = 0; i < iolog->pending->nr_samples; i++) {
+ struct io_sample *src, *dst;
+
+ src = get_sample(iolog, iolog->pending, i);
+ dst = get_sample(iolog, cur_log, i);
+ memcpy(dst, src, log_entry_sz(iolog));
+ }
+
+ iolog->pending->nr_samples = 0;
+ return cur_log;
+disable:
+ if (iolog)
+ iolog->disabled = true;
+ return NULL;
+}
+
+void regrow_logs(struct thread_data *td)
+{
+ regrow_log(td->slat_log);
+ regrow_log(td->clat_log);
+ regrow_log(td->lat_log);
+ regrow_log(td->bw_log);
+ regrow_log(td->iops_log);
+ td->flags &= ~TD_F_REGROW_LOGS;
+}
+
+static struct io_logs *get_cur_log(struct io_log *iolog)
+{
+ struct io_logs *cur_log;
+
+ cur_log = iolog_cur_log(iolog);
+ if (!cur_log) {
+ cur_log = get_new_log(iolog);
+ if (!cur_log)
+ return NULL;
+ }
+
+ if (cur_log->nr_samples < cur_log->max_samples)
+ return cur_log;
+
+ /*
+ * Out of space. If we're in IO offload mode, or we're not doing
+ * per unit logging (hence logging happens outside of the IO thread
+ * as well), add a new log chunk inline. If we're doing inline
+ * submissions, flag 'td' as needing a log regrow and we'll take
+ * care of it on the submission side.
+ */
+ if (iolog->td->o.io_submit_mode == IO_MODE_OFFLOAD ||
+ !per_unit_log(iolog))
+ return regrow_log(iolog);
+
+ iolog->td->flags |= TD_F_REGROW_LOGS;
+ assert(iolog->pending->nr_samples < iolog->pending->max_samples);
+ return iolog->pending;
+}
+
static void __add_log_sample(struct io_log *iolog, unsigned long val,
enum fio_ddir ddir, unsigned int bs,
unsigned long t, uint64_t offset)
{
- uint64_t nr_samples = iolog->nr_samples;
- struct io_sample *s;
+ struct io_logs *cur_log;
if (iolog->disabled)
return;
-
- if (!iolog->nr_samples)
+ if (flist_empty(&iolog->io_logs))
iolog->avg_last = t;
- if (iolog->nr_samples == iolog->max_samples) {
- size_t new_size, new_samples;
- void *new_log;
+ cur_log = get_cur_log(iolog);
+ if (cur_log) {
+ struct io_sample *s;
- if (!iolog->max_samples)
- new_samples = DEF_LOG_ENTRIES;
- else
- new_samples = iolog->max_samples * 2;
-
- new_size = new_samples * log_entry_sz(iolog);
-
- if (iolog->log_gz && (new_size > iolog->log_gz)) {
- if (!iolog->log) {
- iolog->log = malloc(new_size);
- iolog->max_samples = new_samples;
- } else if (iolog_flush(iolog, 0)) {
- log_err("fio: failed flushing iolog! Will stop logging.\n");
- iolog->disabled = 1;
- return;
- }
- nr_samples = iolog->nr_samples;
- } else {
- new_log = realloc(iolog->log, new_size);
- if (!new_log) {
- log_err("fio: failed extending iolog! Will stop logging.\n");
- iolog->disabled = 1;
- return;
- }
- iolog->log = new_log;
- iolog->max_samples = new_samples;
- }
- }
+ s = get_sample(iolog, cur_log, cur_log->nr_samples);
- s = get_sample(iolog, nr_samples);
+ s->val = val;
+ s->time = t;
+ io_sample_set_ddir(iolog, s, ddir);
+ s->bs = bs;
- s->val = val;
- s->time = t;
- io_sample_set_ddir(iolog, s, ddir);
- s->bs = bs;
+ if (iolog->log_offset) {
+ struct io_sample_offset *so = (void *) s;
- if (iolog->log_offset) {
- struct io_sample_offset *so = (void *) s;
+ so->offset = offset;
+ }
- so->offset = offset;
+ cur_log->nr_samples++;
+ return;
}
- iolog->nr_samples++;
+ iolog->disabled = true;
}
static inline void reset_io_stat(struct io_stat *ios)
unsigned long spent, rate;
enum fio_ddir ddir;
- if (per_unit_log(td->bw_log))
- return 0;
-
spent = mtime_since(&td->bw_sample_time, t);
if (spent < td->o.bw_avg_time &&
td->o.bw_avg_time - spent >= 10)
add_stat_sample(&ts->bw_stat[ddir], rate);
- if (td->bw_log)
- add_log_sample(td, td->bw_log, rate, ddir, 0, 0);
+ if (td->bw_log) {
+ unsigned int bs = 0;
+
+ if (td->o.min_bs[ddir] == td->o.max_bs[ddir])
+ bs = td->o.min_bs[ddir];
+
+ add_log_sample(td, td->bw_log, rate, ddir, bs, 0);
+ }
td->stat_io_bytes[ddir] = td->this_io_bytes[ddir];
}
unsigned long spent, iops;
enum fio_ddir ddir;
- if (per_unit_log(td->iops_log))
- return 0;
-
spent = mtime_since(&td->iops_sample_time, t);
if (spent < td->o.iops_avg_time &&
td->o.iops_avg_time - spent >= 10)
add_stat_sample(&ts->iops_stat[ddir], iops);
- if (td->iops_log)
- add_log_sample(td, td->iops_log, iops, ddir, 0, 0);
+ if (td->iops_log) {
+ unsigned int bs = 0;
+
+ if (td->o.min_bs[ddir] == td->o.max_bs[ddir])
+ bs = td->o.min_bs[ddir];
+
+ add_log_sample(td, td->iops_log, iops, ddir, bs, 0);
+ }
td->stat_io_blocks[ddir] = td->this_io_blocks[ddir];
}
fio_gettime(&now, NULL);
for_each_td(td, i) {
- if (!ramp_time_over(td) ||
+ if (in_ramp_time(td) ||
!(td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING)) {
next = min(td->o.iops_avg_time, td->o.bw_avg_time);
continue;