#include "idletime.h"
#include "lib/pow2.h"
#include "lib/output_buffer.h"
+#include "helper_thread.h"
+#include "smalloc.h"
+
+#define LOG_MSEC_SLACK 10
struct fio_mutex *stat_mutex;
log_buf(out, "\n");
}
-static void json_add_job_opts(struct json_object *root, const char *name,
- struct flist_head *opt_list, bool num_jobs)
+void json_add_job_opts(struct json_object *root, const char *name,
+ struct flist_head *opt_list, bool num_jobs)
{
struct json_object *dir_object;
struct flist_head *entry;
}
}
+ /* steady state detection; move this behind json+? */
+ if (ts->ss) {
+ struct json_object *data;
+ struct json_array *iops, *bw;
+ struct steadystate_data *ss = ts->ss;
+ unsigned long long sum_iops, sum_bw;
+ double mean_iops, mean_bw;
+ int i, j, k;
+ char ss_buf[64];
+
+ snprintf(ss_buf, sizeof(ss_buf), "%s%s:%f%s",
+ ss->check_iops ? "iops" : "bw",
+ ss->check_slope ? "_slope" : "",
+ (float) ss->limit,
+ ss->pct ? "%" : "");
+
+ tmp = json_create_object();
+ json_object_add_value_object(root, "steadystate", tmp);
+ json_object_add_value_string(tmp, "ss", ss_buf);
+ json_object_add_value_int(tmp, "duration", (int)ss->dur);
+ json_object_add_value_int(tmp, "steadystate_ramptime", ss->ramp_time / 1000000L);
+ json_object_add_value_int(tmp, "attained", ss->attained);
+
+ snprintf(ss_buf, sizeof(ss_buf), "%f%s", (float) ss->criterion, ss->pct ? "%" : "");
+ json_object_add_value_string(tmp, "criterion", ss_buf);
+ json_object_add_value_float(tmp, "max_deviation", ss->deviation);
+ json_object_add_value_float(tmp, "slope", ss->slope);
+
+ data = json_create_object();
+ json_object_add_value_object(tmp, "data", data);
+ bw = json_create_array();
+ iops = json_create_array();
+
+ /*
+ ** if ss was attained or the buffer is not full,
+ ** ss->head points to the first element in the list.
+ ** otherwise it actually points to the second element
+ ** in the list
+ */
+ if (ss->attained || ss->sum_y == 0)
+ j = ss->head;
+ else
+ j = ss->head == 0 ? ss->dur - 1 : ss->head - 1;
+ for (i = 0, sum_iops = 0, sum_bw = 0; i < ss->dur; i++) {
+ k = (j + i) % ss->dur;
+ sum_bw += ss->bw_data[k];
+ sum_iops += ss->iops_data[k];
+ json_array_add_value_int(bw, ss->bw_data[k]);
+ json_array_add_value_int(iops, ss->iops_data[k]);
+ }
+ mean_bw = (double) sum_bw / ss->dur;
+ mean_iops = (double) sum_iops / ss->dur;
+ json_object_add_value_float(data, "bw_mean", mean_bw);
+ json_object_add_value_float(data, "iops_mean", mean_iops);
+ json_object_add_value_array(data, "iops", iops);
+ json_object_add_value_array(data, "bw", bw);
+ }
+
return root;
}
struct json_object *show_thread_status(struct thread_stat *ts,
struct group_run_stats *rs,
+ struct flist_head *opt_list,
struct buf_output *out)
{
struct json_object *ret = NULL;
if (output_format & FIO_OUTPUT_TERSE)
show_thread_status_terse(ts, rs, out);
if (output_format & FIO_OUTPUT_JSON)
- ret = show_thread_status_json(ts, rs, NULL);
+ ret = show_thread_status_json(ts, rs, opt_list);
if (output_format & FIO_OUTPUT_NORMAL)
show_thread_status_normal(ts, rs, out);
ts->block_infos[k] = td->ts.block_infos[k];
sum_thread_stats(ts, &td->ts, idx == 1);
+
+ if (td->o.ss_dur)
+ ts->ss = &td->ss;
+ else
+ ts->ss = NULL;
}
for (i = 0; i < nr_ts; i++) {
unsigned long long bw;
ts = &threadstats[i];
+ if (ts->groupid == -1)
+ continue;
rs = &runstats[ts->groupid];
rs->kb_base = ts->kb_base;
rs->unit_base = ts->unit_base;
if (output_format & FIO_OUTPUT_JSON) {
struct thread_data *global;
char time_buf[32];
- time_t time_p;
+ struct timeval now;
+ unsigned long long ms_since_epoch;
+
+ gettimeofday(&now, NULL);
+ ms_since_epoch = (unsigned long long)(now.tv_sec) * 1000 +
+ (unsigned long long)(now.tv_usec) / 1000;
- time(&time_p);
- os_ctime_r((const time_t *) &time_p, time_buf,
+ os_ctime_r((const time_t *) &now.tv_sec, time_buf,
sizeof(time_buf));
time_buf[strlen(time_buf) - 1] = '\0';
root = json_create_object();
json_object_add_value_string(root, "fio version", fio_version_string);
- json_object_add_value_int(root, "timestamp", time_p);
+ json_object_add_value_int(root, "timestamp", now.tv_sec);
+ json_object_add_value_int(root, "timestamp_ms", ms_since_epoch);
json_object_add_value_string(root, "time", time_buf);
global = get_global_options();
json_add_job_opts(root, "global options", &global->opt_list, false);
json_object_add_value_array(root, "jobs", array);
}
+ if (is_backend)
+ fio_server_send_job_options(&get_global_options()->opt_list, -1U);
+
for (i = 0; i < nr_ts; i++) {
ts = &threadstats[i];
rs = &runstats[ts->groupid];
- if (is_backend)
+ if (is_backend) {
+ fio_server_send_job_options(opt_lists[i], i);
fio_server_send_ts(ts, rs);
- else {
+ } else {
if (output_format & FIO_OUTPUT_TERSE)
show_thread_status_terse(ts, rs, &output[__FIO_OUTPUT_TERSE]);
if (output_format & FIO_OUTPUT_JSON) {
fio_gettime(&tv, NULL);
for_each_td(td, i) {
- rt[i] = mtime_since(&td->start, &tv);
- if (td_read(td) && td->io_bytes[DDIR_READ])
- td->ts.runtime[DDIR_READ] += rt[i];
- if (td_write(td) && td->io_bytes[DDIR_WRITE])
- td->ts.runtime[DDIR_WRITE] += rt[i];
- if (td_trim(td) && td->io_bytes[DDIR_TRIM])
- td->ts.runtime[DDIR_TRIM] += rt[i];
-
td->update_rusage = 1;
td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ];
td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE];
td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM];
td->ts.total_run_time = mtime_since(&td->epoch, &tv);
+
+ rt[i] = mtime_since(&td->start, &tv);
+ if (td_read(td) && td->ts.io_bytes[DDIR_READ])
+ td->ts.runtime[DDIR_READ] += rt[i];
+ if (td_write(td) && td->ts.io_bytes[DDIR_WRITE])
+ td->ts.runtime[DDIR_WRITE] += rt[i];
+ if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM])
+ td->ts.runtime[DDIR_TRIM] += rt[i];
}
for_each_td(td, i) {
__show_run_stats();
for_each_td(td, i) {
- if (td_read(td) && td->io_bytes[DDIR_READ])
+ if (td_read(td) && td->ts.io_bytes[DDIR_READ])
td->ts.runtime[DDIR_READ] -= rt[i];
- if (td_write(td) && td->io_bytes[DDIR_WRITE])
+ if (td_write(td) && td->ts.io_bytes[DDIR_WRITE])
td->ts.runtime[DDIR_WRITE] -= rt[i];
- if (td_trim(td) && td->io_bytes[DDIR_TRIM])
+ if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM])
td->ts.runtime[DDIR_TRIM] -= rt[i];
}
is->samples++;
}
+/*
+ * Return a struct io_logs, which is added to the tail of the log
+ * list for 'iolog'.
+ */
+static struct io_logs *get_new_log(struct io_log *iolog)
+{
+ size_t new_size, new_samples;
+ struct io_logs *cur_log;
+
+ /*
+ * Cap the size at MAX_LOG_ENTRIES, so we don't keep doubling
+ * forever
+ */
+ if (!iolog->cur_log_max)
+ new_samples = DEF_LOG_ENTRIES;
+ else {
+ new_samples = iolog->cur_log_max * 2;
+ if (new_samples > MAX_LOG_ENTRIES)
+ new_samples = MAX_LOG_ENTRIES;
+ }
+
+ new_size = new_samples * log_entry_sz(iolog);
+
+ cur_log = smalloc(sizeof(*cur_log));
+ if (cur_log) {
+ INIT_FLIST_HEAD(&cur_log->list);
+ cur_log->log = malloc(new_size);
+ if (cur_log->log) {
+ cur_log->nr_samples = 0;
+ cur_log->max_samples = new_samples;
+ flist_add_tail(&cur_log->list, &iolog->io_logs);
+ iolog->cur_log_max = new_samples;
+ return cur_log;
+ }
+ sfree(cur_log);
+ }
+
+ return NULL;
+}
+
+/*
+ * Add and return a new log chunk, or return current log if big enough
+ */
+static struct io_logs *regrow_log(struct io_log *iolog)
+{
+ struct io_logs *cur_log;
+ int i;
+
+ if (!iolog || iolog->disabled)
+ goto disable;
+
+ cur_log = iolog_cur_log(iolog);
+ if (!cur_log) {
+ cur_log = get_new_log(iolog);
+ if (!cur_log)
+ return NULL;
+ }
+
+ if (cur_log->nr_samples < cur_log->max_samples)
+ return cur_log;
+
+ /*
+ * No room for a new sample. If we're compressing on the fly, flush
+ * out the current chunk
+ */
+ if (iolog->log_gz) {
+ if (iolog_cur_flush(iolog, cur_log)) {
+ log_err("fio: failed flushing iolog! Will stop logging.\n");
+ return NULL;
+ }
+ }
+
+ /*
+ * Get a new log array, and add to our list
+ */
+ cur_log = get_new_log(iolog);
+ if (!cur_log) {
+ log_err("fio: failed extending iolog! Will stop logging.\n");
+ return NULL;
+ }
+
+ if (!iolog->pending || !iolog->pending->nr_samples)
+ return cur_log;
+
+ /*
+ * Flush pending items to new log
+ */
+ for (i = 0; i < iolog->pending->nr_samples; i++) {
+ struct io_sample *src, *dst;
+
+ src = get_sample(iolog, iolog->pending, i);
+ dst = get_sample(iolog, cur_log, i);
+ memcpy(dst, src, log_entry_sz(iolog));
+ }
+ cur_log->nr_samples = iolog->pending->nr_samples;
+
+ iolog->pending->nr_samples = 0;
+ return cur_log;
+disable:
+ if (iolog)
+ iolog->disabled = true;
+ return NULL;
+}
+
+void regrow_logs(struct thread_data *td)
+{
+ regrow_log(td->slat_log);
+ regrow_log(td->clat_log);
+ regrow_log(td->clat_hist_log);
+ regrow_log(td->lat_log);
+ regrow_log(td->bw_log);
+ regrow_log(td->iops_log);
+ td->flags &= ~TD_F_REGROW_LOGS;
+}
+
+static struct io_logs *get_cur_log(struct io_log *iolog)
+{
+ struct io_logs *cur_log;
+
+ cur_log = iolog_cur_log(iolog);
+ if (!cur_log) {
+ cur_log = get_new_log(iolog);
+ if (!cur_log)
+ return NULL;
+ }
+
+ if (cur_log->nr_samples < cur_log->max_samples)
+ return cur_log;
+
+ /*
+ * Out of space. If we're in IO offload mode, or we're not doing
+ * per unit logging (hence logging happens outside of the IO thread
+ * as well), add a new log chunk inline. If we're doing inline
+ * submissions, flag 'td' as needing a log regrow and we'll take
+ * care of it on the submission side.
+ */
+ if (iolog->td->o.io_submit_mode == IO_MODE_OFFLOAD ||
+ !per_unit_log(iolog))
+ return regrow_log(iolog);
+
+ iolog->td->flags |= TD_F_REGROW_LOGS;
+ assert(iolog->pending->nr_samples < iolog->pending->max_samples);
+ return iolog->pending;
+}
+
static void __add_log_sample(struct io_log *iolog, unsigned long val,
enum fio_ddir ddir, unsigned int bs,
unsigned long t, uint64_t offset)
{
- uint64_t nr_samples = iolog->nr_samples;
- struct io_sample *s;
+ struct io_logs *cur_log;
if (iolog->disabled)
return;
-
- if (!iolog->nr_samples)
+ if (flist_empty(&iolog->io_logs))
iolog->avg_last = t;
- if (iolog->nr_samples == iolog->max_samples) {
- size_t new_size;
- void *new_log;
+ cur_log = get_cur_log(iolog);
+ if (cur_log) {
+ struct io_sample *s;
- new_size = 2 * iolog->max_samples * log_entry_sz(iolog);
+ s = get_sample(iolog, cur_log, cur_log->nr_samples);
- if (iolog->log_gz && (new_size > iolog->log_gz)) {
- if (iolog_flush(iolog, 0)) {
- log_err("fio: failed flushing iolog! Will stop logging.\n");
- iolog->disabled = 1;
- return;
- }
- nr_samples = iolog->nr_samples;
- } else {
- new_log = realloc(iolog->log, new_size);
- if (!new_log) {
- log_err("fio: failed extending iolog! Will stop logging.\n");
- iolog->disabled = 1;
- return;
- }
- iolog->log = new_log;
- iolog->max_samples <<= 1;
- }
- }
+ s->val = val;
+ s->time = t;
+ io_sample_set_ddir(iolog, s, ddir);
+ s->bs = bs;
- s = get_sample(iolog, nr_samples);
+ if (iolog->log_offset) {
+ struct io_sample_offset *so = (void *) s;
- s->val = val;
- s->time = t;
- io_sample_set_ddir(iolog, s, ddir);
- s->bs = bs;
-
- if (iolog->log_offset) {
- struct io_sample_offset *so = (void *) s;
+ so->offset = offset;
+ }
- so->offset = offset;
+ cur_log->nr_samples++;
+ return;
}
- iolog->nr_samples++;
+ iolog->disabled = true;
}
static inline void reset_io_stat(struct io_stat *ios)
}
}
-static void _add_stat_to_log(struct io_log *iolog, unsigned long elapsed)
+static void __add_stat_to_log(struct io_log *iolog, enum fio_ddir ddir,
+ unsigned long elapsed, bool log_max)
{
/*
* Note an entry in the log. Use the mean from the logged samples,
* making sure to properly round up. Only write a log entry if we
* had actual samples done.
*/
- if (iolog->avg_window[DDIR_READ].samples) {
- unsigned long mr;
+ if (iolog->avg_window[ddir].samples) {
+ unsigned long val;
- mr = iolog->avg_window[DDIR_READ].mean.u.f + 0.50;
- __add_log_sample(iolog, mr, DDIR_READ, 0, elapsed, 0);
- }
- if (iolog->avg_window[DDIR_WRITE].samples) {
- unsigned long mw;
+ if (log_max)
+ val = iolog->avg_window[ddir].max_val;
+ else
+ val = iolog->avg_window[ddir].mean.u.f + 0.50;
- mw = iolog->avg_window[DDIR_WRITE].mean.u.f + 0.50;
- __add_log_sample(iolog, mw, DDIR_WRITE, 0, elapsed, 0);
+ __add_log_sample(iolog, val, ddir, 0, elapsed, 0);
}
- if (iolog->avg_window[DDIR_TRIM].samples) {
- unsigned long mw;
- mw = iolog->avg_window[DDIR_TRIM].mean.u.f + 0.50;
- __add_log_sample(iolog, mw, DDIR_TRIM, 0, elapsed, 0);
- }
+ reset_io_stat(&iolog->avg_window[ddir]);
+}
- reset_io_stat(&iolog->avg_window[DDIR_READ]);
- reset_io_stat(&iolog->avg_window[DDIR_WRITE]);
- reset_io_stat(&iolog->avg_window[DDIR_TRIM]);
+static void _add_stat_to_log(struct io_log *iolog, unsigned long elapsed,
+ bool log_max)
+{
+ int ddir;
+
+ for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++)
+ __add_stat_to_log(iolog, ddir, elapsed, log_max);
}
-static void add_log_sample(struct thread_data *td, struct io_log *iolog,
+static long add_log_sample(struct thread_data *td, struct io_log *iolog,
unsigned long val, enum fio_ddir ddir,
unsigned int bs, uint64_t offset)
{
unsigned long elapsed, this_window;
if (!ddir_rw(ddir))
- return;
+ return 0;
elapsed = mtime_since_now(&td->epoch);
*/
if (!iolog->avg_msec) {
__add_log_sample(iolog, val, ddir, bs, elapsed, offset);
- return;
+ return 0;
}
/*
* need to do.
*/
this_window = elapsed - iolog->avg_last;
- if (this_window < iolog->avg_msec)
- return;
+ if (elapsed < iolog->avg_last)
+ return iolog->avg_last - elapsed;
+ else if (this_window < iolog->avg_msec) {
+ int diff = iolog->avg_msec - this_window;
+
+ if (inline_log(iolog) || diff > LOG_MSEC_SLACK)
+ return diff;
+ }
- _add_stat_to_log(iolog, elapsed);
+ _add_stat_to_log(iolog, elapsed, td->o.log_max != 0);
- iolog->avg_last = elapsed;
+ iolog->avg_last = elapsed - (this_window - iolog->avg_msec);
+ return iolog->avg_msec;
}
-void finalize_logs(struct thread_data *td)
+void finalize_logs(struct thread_data *td, bool unit_logs)
{
unsigned long elapsed;
elapsed = mtime_since_now(&td->epoch);
- if (td->clat_log)
- _add_stat_to_log(td->clat_log, elapsed);
- if (td->slat_log)
- _add_stat_to_log(td->slat_log, elapsed);
- if (td->lat_log)
- _add_stat_to_log(td->lat_log, elapsed);
- if (td->bw_log)
- _add_stat_to_log(td->bw_log, elapsed);
- if (td->iops_log)
- _add_stat_to_log(td->iops_log, elapsed);
+ if (td->clat_log && unit_logs)
+ _add_stat_to_log(td->clat_log, elapsed, td->o.log_max != 0);
+ if (td->slat_log && unit_logs)
+ _add_stat_to_log(td->slat_log, elapsed, td->o.log_max != 0);
+ if (td->lat_log && unit_logs)
+ _add_stat_to_log(td->lat_log, elapsed, td->o.log_max != 0);
+ if (td->bw_log && (unit_logs == per_unit_log(td->bw_log)))
+ _add_stat_to_log(td->bw_log, elapsed, td->o.log_max != 0);
+ if (td->iops_log && (unit_logs == per_unit_log(td->iops_log)))
+ _add_stat_to_log(td->iops_log, elapsed, td->o.log_max != 0);
}
void add_agg_sample(unsigned long val, enum fio_ddir ddir, unsigned int bs)
void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
unsigned long usec, unsigned int bs, uint64_t offset)
{
+ unsigned long elapsed, this_window;
struct thread_stat *ts = &td->ts;
-
- if (!ddir_rw(ddir))
- return;
+ struct io_log *iolog = td->clat_hist_log;
td_io_u_lock(td);
if (ts->clat_percentiles)
add_clat_percentile_sample(ts, usec, ddir);
+ if (iolog && iolog->hist_msec) {
+ struct io_hist *hw = &iolog->hist_window[ddir];
+
+ hw->samples++;
+ elapsed = mtime_since_now(&td->epoch);
+ if (!hw->hist_last)
+ hw->hist_last = elapsed;
+ this_window = elapsed - hw->hist_last;
+
+ if (this_window >= iolog->hist_msec) {
+ unsigned int *io_u_plat;
+ unsigned int *dst;
+
+ /*
+ * Make a byte-for-byte copy of the latency histogram
+ * stored in td->ts.io_u_plat[ddir], recording it in a
+ * log sample. Note that the matching call to free() is
+ * located in iolog.c after printing this sample to the
+ * log file.
+ */
+ io_u_plat = (unsigned int *) td->ts.io_u_plat[ddir];
+ dst = malloc(FIO_IO_U_PLAT_NR * sizeof(unsigned int));
+ memcpy(dst, io_u_plat,
+ FIO_IO_U_PLAT_NR * sizeof(unsigned int));
+ __add_log_sample(iolog, (unsigned long )dst, ddir, bs,
+ elapsed, offset);
+
+ /*
+ * Update the last time we recorded as being now, minus
+ * any drift in time we encountered before actually
+ * making the record.
+ */
+ hw->hist_last = elapsed - (this_window - iolog->hist_msec);
+ hw->samples = 0;
+ }
+ }
+
td_io_u_unlock(td);
}
td_io_u_unlock(td);
}
-void add_bw_sample(struct thread_data *td, enum fio_ddir ddir, unsigned int bs,
- struct timeval *t)
+void add_bw_sample(struct thread_data *td, struct io_u *io_u,
+ unsigned int bytes, unsigned long spent)
+{
+ struct thread_stat *ts = &td->ts;
+ unsigned long rate;
+
+ if (spent)
+ rate = bytes * 1000 / spent;
+ else
+ rate = 0;
+
+ td_io_u_lock(td);
+
+ add_stat_sample(&ts->bw_stat[io_u->ddir], rate);
+
+ if (td->bw_log)
+ add_log_sample(td, td->bw_log, rate, io_u->ddir, bytes, io_u->offset);
+
+ td->stat_io_bytes[io_u->ddir] = td->this_io_bytes[io_u->ddir];
+ td_io_u_unlock(td);
+}
+
+static int add_bw_samples(struct thread_data *td, struct timeval *t)
{
struct thread_stat *ts = &td->ts;
unsigned long spent, rate;
+ enum fio_ddir ddir;
+ unsigned int next, next_log;
- if (!ddir_rw(ddir))
- return;
+ next_log = td->o.bw_avg_time;
spent = mtime_since(&td->bw_sample_time, t);
- if (spent < td->o.bw_avg_time)
- return;
+ if (spent < td->o.bw_avg_time &&
+ td->o.bw_avg_time - spent >= LOG_MSEC_SLACK)
+ return td->o.bw_avg_time - spent;
td_io_u_lock(td);
add_stat_sample(&ts->bw_stat[ddir], rate);
- if (td->bw_log)
- add_log_sample(td, td->bw_log, rate, ddir, bs, 0);
+ if (td->bw_log) {
+ unsigned int bs = 0;
+
+ if (td->o.min_bs[ddir] == td->o.max_bs[ddir])
+ bs = td->o.min_bs[ddir];
+
+ next = add_log_sample(td, td->bw_log, rate, ddir, bs, 0);
+ next_log = min(next_log, next);
+ }
td->stat_io_bytes[ddir] = td->this_io_bytes[ddir];
}
- fio_gettime(&td->bw_sample_time, NULL);
+ timeval_add_msec(&td->bw_sample_time, td->o.bw_avg_time);
+
+ td_io_u_unlock(td);
+
+ if (spent <= td->o.bw_avg_time)
+ return min(next_log, td->o.bw_avg_time);
+
+ next = td->o.bw_avg_time - (1 + spent - td->o.bw_avg_time);
+ return min(next, next_log);
+}
+
+void add_iops_sample(struct thread_data *td, struct io_u *io_u,
+ unsigned int bytes)
+{
+ struct thread_stat *ts = &td->ts;
+
+ td_io_u_lock(td);
+
+ add_stat_sample(&ts->iops_stat[io_u->ddir], 1);
+
+ if (td->iops_log)
+ add_log_sample(td, td->iops_log, 1, io_u->ddir, bytes, io_u->offset);
+
+ td->stat_io_blocks[io_u->ddir] = td->this_io_blocks[io_u->ddir];
td_io_u_unlock(td);
}
-void add_iops_sample(struct thread_data *td, enum fio_ddir ddir, unsigned int bs,
- struct timeval *t)
+static int add_iops_samples(struct thread_data *td, struct timeval *t)
{
struct thread_stat *ts = &td->ts;
unsigned long spent, iops;
+ enum fio_ddir ddir;
+ unsigned int next, next_log;
- if (!ddir_rw(ddir))
- return;
+ next_log = td->o.iops_avg_time;
spent = mtime_since(&td->iops_sample_time, t);
- if (spent < td->o.iops_avg_time)
- return;
+ if (spent < td->o.iops_avg_time &&
+ td->o.iops_avg_time - spent >= LOG_MSEC_SLACK)
+ return td->o.iops_avg_time - spent;
td_io_u_lock(td);
add_stat_sample(&ts->iops_stat[ddir], iops);
- if (td->iops_log)
- add_log_sample(td, td->iops_log, iops, ddir, bs, 0);
+ if (td->iops_log) {
+ unsigned int bs = 0;
+
+ if (td->o.min_bs[ddir] == td->o.max_bs[ddir])
+ bs = td->o.min_bs[ddir];
+
+ next = add_log_sample(td, td->iops_log, iops, ddir, bs, 0);
+ next_log = min(next_log, next);
+ }
td->stat_io_blocks[ddir] = td->this_io_blocks[ddir];
}
- fio_gettime(&td->iops_sample_time, NULL);
+ timeval_add_msec(&td->iops_sample_time, td->o.iops_avg_time);
+
td_io_u_unlock(td);
+
+ if (spent <= td->o.iops_avg_time)
+ return min(next_log, td->o.iops_avg_time);
+
+ next = td->o.iops_avg_time - (1 + spent - td->o.iops_avg_time);
+ return min(next, next_log);
+}
+
+/*
+ * Returns msecs to next event
+ */
+int calc_log_samples(void)
+{
+ struct thread_data *td;
+ unsigned int next = ~0U, tmp;
+ struct timeval now;
+ int i;
+
+ fio_gettime(&now, NULL);
+
+ for_each_td(td, i) {
+ if (in_ramp_time(td) ||
+ !(td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING)) {
+ next = min(td->o.iops_avg_time, td->o.bw_avg_time);
+ continue;
+ }
+ if (!per_unit_log(td->bw_log)) {
+ tmp = add_bw_samples(td, &now);
+ if (tmp < next)
+ next = tmp;
+ }
+ if (!per_unit_log(td->iops_log)) {
+ tmp = add_iops_samples(td, &now);
+ if (tmp < next)
+ next = tmp;
+ }
+ }
+
+ return next == ~0U ? 0 : next;
}
void stat_init(void)
*/
void show_running_run_stats(void)
{
- helper_do_stat = 1;
- pthread_cond_signal(&helper_cond);
+ helper_do_stat();
}
uint32_t *io_u_block_info(struct thread_data *td, struct io_u *io_u)