if (output_format & FIO_OUTPUT_JSON) {
struct thread_data *global;
char time_buf[32];
- time_t time_p;
+ struct timeval now;
+ unsigned long long ms_since_epoch;
- time(&time_p);
- os_ctime_r((const time_t *) &time_p, time_buf,
+ gettimeofday(&now, NULL);
+ ms_since_epoch = (unsigned long long)(now.tv_sec) * 1000 +
+ (unsigned long long)(now.tv_usec) / 1000;
+
+ os_ctime_r((const time_t *) &now.tv_sec, time_buf,
sizeof(time_buf));
time_buf[strlen(time_buf) - 1] = '\0';
root = json_create_object();
json_object_add_value_string(root, "fio version", fio_version_string);
- json_object_add_value_int(root, "timestamp", time_p);
+ json_object_add_value_int(root, "timestamp", now.tv_sec);
+ json_object_add_value_int(root, "timestamp_ms", ms_since_epoch);
json_object_add_value_string(root, "time", time_buf);
global = get_global_options();
json_add_job_opts(root, "global options", &global->opt_list, false);
new_samples = MAX_LOG_ENTRIES;
}
- /*
- * If the alloc size is sufficiently large, quiesce pending IO before
- * attempting it. This is to avoid spending a long time in alloc with
- * IO pending, which will unfairly skew the completion latencies of
- * inflight IO.
- */
new_size = new_samples * log_entry_sz(iolog);
- if (new_size >= LOG_QUIESCE_SZ)
- io_u_quiesce(iolog->td);
cur_log = malloc(sizeof(*cur_log));
if (cur_log) {
return NULL;
}
-static struct io_logs *get_cur_log(struct io_log *iolog)
+/*
+ * Add and return a new log chunk, or return current log if big enough
+ */
+static struct io_logs *regrow_log(struct io_log *iolog)
{
struct io_logs *cur_log;
+ int i;
+
+ if (!iolog || iolog->disabled)
+ goto disable;
cur_log = iolog_cur_log(iolog);
if (!cur_log) {
return NULL;
}
+ if (!iolog->pending || !iolog->pending->nr_samples)
+ return cur_log;
+
+ /*
+ * Flush pending items to new log
+ */
+ for (i = 0; i < iolog->pending->nr_samples; i++) {
+ struct io_sample *src, *dst;
+
+ src = get_sample(iolog, iolog->pending, i);
+ dst = get_sample(iolog, cur_log, i);
+ memcpy(dst, src, log_entry_sz(iolog));
+ }
+
+ iolog->pending->nr_samples = 0;
return cur_log;
+disable:
+ if (iolog)
+ iolog->disabled = true;
+ return NULL;
+}
+
+void regrow_logs(struct thread_data *td)
+{
+ regrow_log(td->slat_log);
+ regrow_log(td->clat_log);
+ regrow_log(td->lat_log);
+ regrow_log(td->bw_log);
+ regrow_log(td->iops_log);
+ td->flags &= ~TD_F_REGROW_LOGS;
+}
+
+static struct io_logs *get_cur_log(struct io_log *iolog)
+{
+ struct io_logs *cur_log;
+
+ cur_log = iolog_cur_log(iolog);
+ if (!cur_log) {
+ cur_log = get_new_log(iolog);
+ if (!cur_log)
+ return NULL;
+ }
+
+ if (cur_log->nr_samples < cur_log->max_samples)
+ return cur_log;
+
+ /*
+ * Out of space. If we're in IO offload mode, add a new log chunk
+ * inline. If we're doing inline submissions, flag 'td' as needing
+ * a log regrow and we'll take care of it on the submission side.
+ */
+ if (iolog->td->o.io_submit_mode == IO_MODE_OFFLOAD)
+ return regrow_log(iolog);
+
+ iolog->td->flags |= TD_F_REGROW_LOGS;
+ assert(iolog->pending->nr_samples < iolog->pending->max_samples);
+ return iolog->pending;
}
static void __add_log_sample(struct io_log *iolog, unsigned long val,
fio_gettime(&now, NULL);
for_each_td(td, i) {
- if (!ramp_time_over(td) ||
+ if (!in_ramp_time(td) ||
!(td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING)) {
next = min(td->o.iops_avg_time, td->o.bw_avg_time);
continue;