X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=stat.c;h=c41ac899829f622be67bec5ed364d78320b64984;hp=a3bfe632454e4b0a2a2d5c5dcde5a4b429f4e6f5;hb=39c7a2ca0c7b3ffc99a8e1f2917ce9d00f318c82;hpb=0279b88017b5d21f0fcbb1b57481339735c41797 diff --git a/stat.c b/stat.c index a3bfe632..c41ac899 100644 --- a/stat.c +++ b/stat.c @@ -15,6 +15,10 @@ #include "idletime.h" #include "lib/pow2.h" #include "lib/output_buffer.h" +#include "helper_thread.h" +#include "smalloc.h" + +#define LOG_MSEC_SLACK 10 struct fio_mutex *stat_mutex; @@ -1251,6 +1255,64 @@ static struct json_object *show_thread_status_json(struct thread_stat *ts, } } + /* steady state detection; move this behind json+? */ + if (ts->ss) { + struct json_object *data; + struct json_array *iops, *bw; + struct steadystate_data *ss = ts->ss; + unsigned long long sum_iops, sum_bw; + double mean_iops, mean_bw; + int i, j, k; + char ss_buf[64]; + + snprintf(ss_buf, sizeof(ss_buf), "%s%s:%f%s", + ss->state & __FIO_SS_IOPS ? "iops" : "bw", + ss->state & __FIO_SS_SLOPE ? "_slope" : "", + (float) ss->limit, + ss->pct ? "%" : ""); + + tmp = json_create_object(); + json_object_add_value_object(root, "steadystate", tmp); + json_object_add_value_string(tmp, "ss", ss_buf); + json_object_add_value_int(tmp, "duration", (int)ss->dur); + json_object_add_value_int(tmp, "steadystate_ramptime", ss->ramp_time / 1000000L); + json_object_add_value_int(tmp, "attained", (ss->state & __FIO_SS_ATTAINED) > 0); + + snprintf(ss_buf, sizeof(ss_buf), "%f%s", (float) ss->criterion, ss->pct ? "%" : ""); + json_object_add_value_string(tmp, "criterion", ss_buf); + json_object_add_value_float(tmp, "max_deviation", ss->deviation); + json_object_add_value_float(tmp, "slope", ss->slope); + + data = json_create_object(); + json_object_add_value_object(tmp, "data", data); + bw = json_create_array(); + iops = json_create_array(); + + /* + ** if ss was attained or the buffer is not full, + ** ss->head points to the first element in the list. + ** otherwise it actually points to the second element + ** in the list + */ + if ((ss->state & __FIO_SS_ATTAINED) || ss->sum_y == 0) + j = ss->head; + else + j = ss->head == 0 ? ss->dur - 1 : ss->head - 1; + for (i = 0, sum_iops = 0, sum_bw = 0; i < ss->dur; i++) { + k = (j + i) % ss->dur; + sum_bw += ss->bw_data[k]; + sum_iops += ss->iops_data[k]; + json_array_add_value_int(bw, ss->bw_data[k]); + json_array_add_value_int(iops, ss->iops_data[k]); + } + mean_bw = (double) sum_bw / ss->dur; + mean_iops = (double) sum_iops / ss->dur; + json_object_add_value_float(data, "bw_mean", mean_bw); + json_object_add_value_float(data, "iops_mean", mean_iops); + json_object_add_value_array(data, "iops", iops); + json_object_add_value_array(data, "bw", bw); + } + return root; } @@ -1574,12 +1636,19 @@ void __show_run_stats(void) ts->block_infos[k] = td->ts.block_infos[k]; sum_thread_stats(ts, &td->ts, idx == 1); + + if (td->o.ss_dur) + ts->ss = &td->ss; + else + ts->ss = NULL; } for (i = 0; i < nr_ts; i++) { unsigned long long bw; ts = &threadstats[i]; + if (ts->groupid == -1) + continue; rs = &runstats[ts->groupid]; rs->kb_base = ts->kb_base; rs->unit_base = ts->unit_base; @@ -1633,16 +1702,21 @@ void __show_run_stats(void) if (output_format & FIO_OUTPUT_JSON) { struct thread_data *global; char time_buf[32]; - time_t time_p; + struct timeval now; + unsigned long long ms_since_epoch; + + gettimeofday(&now, NULL); + ms_since_epoch = (unsigned long long)(now.tv_sec) * 1000 + + (unsigned long long)(now.tv_usec) / 1000; - time(&time_p); - os_ctime_r((const time_t *) &time_p, time_buf, + os_ctime_r((const time_t *) &now.tv_sec, time_buf, sizeof(time_buf)); time_buf[strlen(time_buf) - 1] = '\0'; root = json_create_object(); json_object_add_value_string(root, "fio version", fio_version_string); - json_object_add_value_int(root, "timestamp", time_p); + json_object_add_value_int(root, "timestamp", now.tv_sec); + json_object_add_value_int(root, "timestamp_ms", ms_since_epoch); json_object_add_value_string(root, "time", time_buf); global = get_global_options(); json_add_job_opts(root, "global options", &global->opt_list, false); @@ -1730,19 +1804,19 @@ void __show_running_run_stats(void) fio_gettime(&tv, NULL); for_each_td(td, i) { - rt[i] = mtime_since(&td->start, &tv); - if (td_read(td) && td->io_bytes[DDIR_READ]) - td->ts.runtime[DDIR_READ] += rt[i]; - if (td_write(td) && td->io_bytes[DDIR_WRITE]) - td->ts.runtime[DDIR_WRITE] += rt[i]; - if (td_trim(td) && td->io_bytes[DDIR_TRIM]) - td->ts.runtime[DDIR_TRIM] += rt[i]; - td->update_rusage = 1; td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ]; td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE]; td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM]; td->ts.total_run_time = mtime_since(&td->epoch, &tv); + + rt[i] = mtime_since(&td->start, &tv); + if (td_read(td) && td->ts.io_bytes[DDIR_READ]) + td->ts.runtime[DDIR_READ] += rt[i]; + if (td_write(td) && td->ts.io_bytes[DDIR_WRITE]) + td->ts.runtime[DDIR_WRITE] += rt[i]; + if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM]) + td->ts.runtime[DDIR_TRIM] += rt[i]; } for_each_td(td, i) { @@ -1758,11 +1832,11 @@ void __show_running_run_stats(void) __show_run_stats(); for_each_td(td, i) { - if (td_read(td) && td->io_bytes[DDIR_READ]) + if (td_read(td) && td->ts.io_bytes[DDIR_READ]) td->ts.runtime[DDIR_READ] -= rt[i]; - if (td_write(td) && td->io_bytes[DDIR_WRITE]) + if (td_write(td) && td->ts.io_bytes[DDIR_WRITE]) td->ts.runtime[DDIR_WRITE] -= rt[i]; - if (td_trim(td) && td->io_bytes[DDIR_TRIM]) + if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM]) td->ts.runtime[DDIR_TRIM] -= rt[i]; } @@ -1846,58 +1920,184 @@ static inline void add_stat_sample(struct io_stat *is, unsigned long data) is->samples++; } +/* + * Return a struct io_logs, which is added to the tail of the log + * list for 'iolog'. + */ +static struct io_logs *get_new_log(struct io_log *iolog) +{ + size_t new_size, new_samples; + struct io_logs *cur_log; + + /* + * Cap the size at MAX_LOG_ENTRIES, so we don't keep doubling + * forever + */ + if (!iolog->cur_log_max) + new_samples = DEF_LOG_ENTRIES; + else { + new_samples = iolog->cur_log_max * 2; + if (new_samples > MAX_LOG_ENTRIES) + new_samples = MAX_LOG_ENTRIES; + } + + new_size = new_samples * log_entry_sz(iolog); + + cur_log = smalloc(sizeof(*cur_log)); + if (cur_log) { + INIT_FLIST_HEAD(&cur_log->list); + cur_log->log = malloc(new_size); + if (cur_log->log) { + cur_log->nr_samples = 0; + cur_log->max_samples = new_samples; + flist_add_tail(&cur_log->list, &iolog->io_logs); + iolog->cur_log_max = new_samples; + return cur_log; + } + sfree(cur_log); + } + + return NULL; +} + +/* + * Add and return a new log chunk, or return current log if big enough + */ +static struct io_logs *regrow_log(struct io_log *iolog) +{ + struct io_logs *cur_log; + int i; + + if (!iolog || iolog->disabled) + goto disable; + + cur_log = iolog_cur_log(iolog); + if (!cur_log) { + cur_log = get_new_log(iolog); + if (!cur_log) + return NULL; + } + + if (cur_log->nr_samples < cur_log->max_samples) + return cur_log; + + /* + * No room for a new sample. If we're compressing on the fly, flush + * out the current chunk + */ + if (iolog->log_gz) { + if (iolog_cur_flush(iolog, cur_log)) { + log_err("fio: failed flushing iolog! Will stop logging.\n"); + return NULL; + } + } + + /* + * Get a new log array, and add to our list + */ + cur_log = get_new_log(iolog); + if (!cur_log) { + log_err("fio: failed extending iolog! Will stop logging.\n"); + return NULL; + } + + if (!iolog->pending || !iolog->pending->nr_samples) + return cur_log; + + /* + * Flush pending items to new log + */ + for (i = 0; i < iolog->pending->nr_samples; i++) { + struct io_sample *src, *dst; + + src = get_sample(iolog, iolog->pending, i); + dst = get_sample(iolog, cur_log, i); + memcpy(dst, src, log_entry_sz(iolog)); + } + cur_log->nr_samples = iolog->pending->nr_samples; + + iolog->pending->nr_samples = 0; + return cur_log; +disable: + if (iolog) + iolog->disabled = true; + return NULL; +} + +void regrow_logs(struct thread_data *td) +{ + regrow_log(td->slat_log); + regrow_log(td->clat_log); + regrow_log(td->clat_hist_log); + regrow_log(td->lat_log); + regrow_log(td->bw_log); + regrow_log(td->iops_log); + td->flags &= ~TD_F_REGROW_LOGS; +} + +static struct io_logs *get_cur_log(struct io_log *iolog) +{ + struct io_logs *cur_log; + + cur_log = iolog_cur_log(iolog); + if (!cur_log) { + cur_log = get_new_log(iolog); + if (!cur_log) + return NULL; + } + + if (cur_log->nr_samples < cur_log->max_samples) + return cur_log; + + /* + * Out of space. If we're in IO offload mode, or we're not doing + * per unit logging (hence logging happens outside of the IO thread + * as well), add a new log chunk inline. If we're doing inline + * submissions, flag 'td' as needing a log regrow and we'll take + * care of it on the submission side. + */ + if (iolog->td->o.io_submit_mode == IO_MODE_OFFLOAD || + !per_unit_log(iolog)) + return regrow_log(iolog); + + iolog->td->flags |= TD_F_REGROW_LOGS; + assert(iolog->pending->nr_samples < iolog->pending->max_samples); + return iolog->pending; +} + static void __add_log_sample(struct io_log *iolog, unsigned long val, enum fio_ddir ddir, unsigned int bs, unsigned long t, uint64_t offset) { - uint64_t nr_samples = iolog->nr_samples; - struct io_sample *s; + struct io_logs *cur_log; if (iolog->disabled) return; - - if (!iolog->nr_samples) + if (flist_empty(&iolog->io_logs)) iolog->avg_last = t; - if (iolog->nr_samples == iolog->max_samples) { - size_t new_size; - void *new_log; + cur_log = get_cur_log(iolog); + if (cur_log) { + struct io_sample *s; - new_size = 2 * iolog->max_samples * log_entry_sz(iolog); + s = get_sample(iolog, cur_log, cur_log->nr_samples); - if (iolog->log_gz && (new_size > iolog->log_gz)) { - if (iolog_flush(iolog, 0)) { - log_err("fio: failed flushing iolog! Will stop logging.\n"); - iolog->disabled = 1; - return; - } - nr_samples = iolog->nr_samples; - } else { - new_log = realloc(iolog->log, new_size); - if (!new_log) { - log_err("fio: failed extending iolog! Will stop logging.\n"); - iolog->disabled = 1; - return; - } - iolog->log = new_log; - iolog->max_samples <<= 1; - } - } - - s = get_sample(iolog, nr_samples); + s->val = val; + s->time = t; + io_sample_set_ddir(iolog, s, ddir); + s->bs = bs; - s->val = val; - s->time = t; - io_sample_set_ddir(iolog, s, ddir); - s->bs = bs; + if (iolog->log_offset) { + struct io_sample_offset *so = (void *) s; - if (iolog->log_offset) { - struct io_sample_offset *so = (void *) s; + so->offset = offset; + } - so->offset = offset; + cur_log->nr_samples++; + return; } - iolog->nr_samples++; + iolog->disabled = true; } static inline void reset_io_stat(struct io_stat *ios) @@ -1942,45 +2142,45 @@ void reset_io_stats(struct thread_data *td) } } -static void _add_stat_to_log(struct io_log *iolog, unsigned long elapsed) +static void __add_stat_to_log(struct io_log *iolog, enum fio_ddir ddir, + unsigned long elapsed, bool log_max) { /* * Note an entry in the log. Use the mean from the logged samples, * making sure to properly round up. Only write a log entry if we * had actual samples done. */ - if (iolog->avg_window[DDIR_READ].samples) { - unsigned long mr; + if (iolog->avg_window[ddir].samples) { + unsigned long val; - mr = iolog->avg_window[DDIR_READ].mean.u.f + 0.50; - __add_log_sample(iolog, mr, DDIR_READ, 0, elapsed, 0); - } - if (iolog->avg_window[DDIR_WRITE].samples) { - unsigned long mw; + if (log_max) + val = iolog->avg_window[ddir].max_val; + else + val = iolog->avg_window[ddir].mean.u.f + 0.50; - mw = iolog->avg_window[DDIR_WRITE].mean.u.f + 0.50; - __add_log_sample(iolog, mw, DDIR_WRITE, 0, elapsed, 0); + __add_log_sample(iolog, val, ddir, 0, elapsed, 0); } - if (iolog->avg_window[DDIR_TRIM].samples) { - unsigned long mw; - mw = iolog->avg_window[DDIR_TRIM].mean.u.f + 0.50; - __add_log_sample(iolog, mw, DDIR_TRIM, 0, elapsed, 0); - } + reset_io_stat(&iolog->avg_window[ddir]); +} + +static void _add_stat_to_log(struct io_log *iolog, unsigned long elapsed, + bool log_max) +{ + int ddir; - reset_io_stat(&iolog->avg_window[DDIR_READ]); - reset_io_stat(&iolog->avg_window[DDIR_WRITE]); - reset_io_stat(&iolog->avg_window[DDIR_TRIM]); + for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) + __add_stat_to_log(iolog, ddir, elapsed, log_max); } -static void add_log_sample(struct thread_data *td, struct io_log *iolog, +static long add_log_sample(struct thread_data *td, struct io_log *iolog, unsigned long val, enum fio_ddir ddir, unsigned int bs, uint64_t offset) { unsigned long elapsed, this_window; if (!ddir_rw(ddir)) - return; + return 0; elapsed = mtime_since_now(&td->epoch); @@ -1989,7 +2189,7 @@ static void add_log_sample(struct thread_data *td, struct io_log *iolog, */ if (!iolog->avg_msec) { __add_log_sample(iolog, val, ddir, bs, elapsed, offset); - return; + return 0; } /* @@ -2003,30 +2203,37 @@ static void add_log_sample(struct thread_data *td, struct io_log *iolog, * need to do. */ this_window = elapsed - iolog->avg_last; - if (this_window < iolog->avg_msec) - return; + if (elapsed < iolog->avg_last) + return iolog->avg_last - elapsed; + else if (this_window < iolog->avg_msec) { + int diff = iolog->avg_msec - this_window; - _add_stat_to_log(iolog, elapsed); + if (inline_log(iolog) || diff > LOG_MSEC_SLACK) + return diff; + } - iolog->avg_last = elapsed; + _add_stat_to_log(iolog, elapsed, td->o.log_max != 0); + + iolog->avg_last = elapsed - (this_window - iolog->avg_msec); + return iolog->avg_msec; } -void finalize_logs(struct thread_data *td) +void finalize_logs(struct thread_data *td, bool unit_logs) { unsigned long elapsed; elapsed = mtime_since_now(&td->epoch); - if (td->clat_log) - _add_stat_to_log(td->clat_log, elapsed); - if (td->slat_log) - _add_stat_to_log(td->slat_log, elapsed); - if (td->lat_log) - _add_stat_to_log(td->lat_log, elapsed); - if (td->bw_log) - _add_stat_to_log(td->bw_log, elapsed); - if (td->iops_log) - _add_stat_to_log(td->iops_log, elapsed); + if (td->clat_log && unit_logs) + _add_stat_to_log(td->clat_log, elapsed, td->o.log_max != 0); + if (td->slat_log && unit_logs) + _add_stat_to_log(td->slat_log, elapsed, td->o.log_max != 0); + if (td->lat_log && unit_logs) + _add_stat_to_log(td->lat_log, elapsed, td->o.log_max != 0); + if (td->bw_log && (unit_logs == per_unit_log(td->bw_log))) + _add_stat_to_log(td->bw_log, elapsed, td->o.log_max != 0); + if (td->iops_log && (unit_logs == per_unit_log(td->iops_log))) + _add_stat_to_log(td->iops_log, elapsed, td->o.log_max != 0); } void add_agg_sample(unsigned long val, enum fio_ddir ddir, unsigned int bs) @@ -2052,10 +2259,9 @@ static void add_clat_percentile_sample(struct thread_stat *ts, void add_clat_sample(struct thread_data *td, enum fio_ddir ddir, unsigned long usec, unsigned int bs, uint64_t offset) { + unsigned long elapsed, this_window; struct thread_stat *ts = &td->ts; - - if (!ddir_rw(ddir)) - return; + struct io_log *iolog = td->clat_hist_log; td_io_u_lock(td); @@ -2067,6 +2273,43 @@ void add_clat_sample(struct thread_data *td, enum fio_ddir ddir, if (ts->clat_percentiles) add_clat_percentile_sample(ts, usec, ddir); + if (iolog && iolog->hist_msec) { + struct io_hist *hw = &iolog->hist_window[ddir]; + + hw->samples++; + elapsed = mtime_since_now(&td->epoch); + if (!hw->hist_last) + hw->hist_last = elapsed; + this_window = elapsed - hw->hist_last; + + if (this_window >= iolog->hist_msec) { + unsigned int *io_u_plat; + unsigned int *dst; + + /* + * Make a byte-for-byte copy of the latency histogram + * stored in td->ts.io_u_plat[ddir], recording it in a + * log sample. Note that the matching call to free() is + * located in iolog.c after printing this sample to the + * log file. + */ + io_u_plat = (unsigned int *) td->ts.io_u_plat[ddir]; + dst = malloc(FIO_IO_U_PLAT_NR * sizeof(unsigned int)); + memcpy(dst, io_u_plat, + FIO_IO_U_PLAT_NR * sizeof(unsigned int)); + __add_log_sample(iolog, (unsigned long )dst, ddir, bs, + elapsed, offset); + + /* + * Update the last time we recorded as being now, minus + * any drift in time we encountered before actually + * making the record. + */ + hw->hist_last = elapsed - (this_window - iolog->hist_msec); + hw->samples = 0; + } + } + td_io_u_unlock(td); } @@ -2106,18 +2349,41 @@ void add_lat_sample(struct thread_data *td, enum fio_ddir ddir, td_io_u_unlock(td); } -void add_bw_sample(struct thread_data *td, enum fio_ddir ddir, unsigned int bs, - struct timeval *t) +void add_bw_sample(struct thread_data *td, struct io_u *io_u, + unsigned int bytes, unsigned long spent) +{ + struct thread_stat *ts = &td->ts; + unsigned long rate; + + if (spent) + rate = bytes * 1000 / spent; + else + rate = 0; + + td_io_u_lock(td); + + add_stat_sample(&ts->bw_stat[io_u->ddir], rate); + + if (td->bw_log) + add_log_sample(td, td->bw_log, rate, io_u->ddir, bytes, io_u->offset); + + td->stat_io_bytes[io_u->ddir] = td->this_io_bytes[io_u->ddir]; + td_io_u_unlock(td); +} + +static int add_bw_samples(struct thread_data *td, struct timeval *t) { struct thread_stat *ts = &td->ts; unsigned long spent, rate; + enum fio_ddir ddir; + unsigned int next, next_log; - if (!ddir_rw(ddir)) - return; + next_log = td->o.bw_avg_time; spent = mtime_since(&td->bw_sample_time, t); - if (spent < td->o.bw_avg_time) - return; + if (spent < td->o.bw_avg_time && + td->o.bw_avg_time - spent >= LOG_MSEC_SLACK) + return td->o.bw_avg_time - spent; td_io_u_lock(td); @@ -2138,28 +2404,59 @@ void add_bw_sample(struct thread_data *td, enum fio_ddir ddir, unsigned int bs, add_stat_sample(&ts->bw_stat[ddir], rate); - if (td->bw_log) - add_log_sample(td, td->bw_log, rate, ddir, bs, 0); + if (td->bw_log) { + unsigned int bs = 0; + + if (td->o.min_bs[ddir] == td->o.max_bs[ddir]) + bs = td->o.min_bs[ddir]; + + next = add_log_sample(td, td->bw_log, rate, ddir, bs, 0); + next_log = min(next_log, next); + } td->stat_io_bytes[ddir] = td->this_io_bytes[ddir]; } - fio_gettime(&td->bw_sample_time, NULL); + timeval_add_msec(&td->bw_sample_time, td->o.bw_avg_time); + td_io_u_unlock(td); + + if (spent <= td->o.bw_avg_time) + return min(next_log, td->o.bw_avg_time); + + next = td->o.bw_avg_time - (1 + spent - td->o.bw_avg_time); + return min(next, next_log); } -void add_iops_sample(struct thread_data *td, enum fio_ddir ddir, unsigned int bs, - struct timeval *t) +void add_iops_sample(struct thread_data *td, struct io_u *io_u, + unsigned int bytes) +{ + struct thread_stat *ts = &td->ts; + + td_io_u_lock(td); + + add_stat_sample(&ts->iops_stat[io_u->ddir], 1); + + if (td->iops_log) + add_log_sample(td, td->iops_log, 1, io_u->ddir, bytes, io_u->offset); + + td->stat_io_blocks[io_u->ddir] = td->this_io_blocks[io_u->ddir]; + td_io_u_unlock(td); +} + +static int add_iops_samples(struct thread_data *td, struct timeval *t) { struct thread_stat *ts = &td->ts; unsigned long spent, iops; + enum fio_ddir ddir; + unsigned int next, next_log; - if (!ddir_rw(ddir)) - return; + next_log = td->o.iops_avg_time; spent = mtime_since(&td->iops_sample_time, t); - if (spent < td->o.iops_avg_time) - return; + if (spent < td->o.iops_avg_time && + td->o.iops_avg_time - spent >= LOG_MSEC_SLACK) + return td->o.iops_avg_time - spent; td_io_u_lock(td); @@ -2180,14 +2477,61 @@ void add_iops_sample(struct thread_data *td, enum fio_ddir ddir, unsigned int bs add_stat_sample(&ts->iops_stat[ddir], iops); - if (td->iops_log) - add_log_sample(td, td->iops_log, iops, ddir, bs, 0); + if (td->iops_log) { + unsigned int bs = 0; + + if (td->o.min_bs[ddir] == td->o.max_bs[ddir]) + bs = td->o.min_bs[ddir]; + + next = add_log_sample(td, td->iops_log, iops, ddir, bs, 0); + next_log = min(next_log, next); + } td->stat_io_blocks[ddir] = td->this_io_blocks[ddir]; } - fio_gettime(&td->iops_sample_time, NULL); + timeval_add_msec(&td->iops_sample_time, td->o.iops_avg_time); + td_io_u_unlock(td); + + if (spent <= td->o.iops_avg_time) + return min(next_log, td->o.iops_avg_time); + + next = td->o.iops_avg_time - (1 + spent - td->o.iops_avg_time); + return min(next, next_log); +} + +/* + * Returns msecs to next event + */ +int calc_log_samples(void) +{ + struct thread_data *td; + unsigned int next = ~0U, tmp; + struct timeval now; + int i; + + fio_gettime(&now, NULL); + + for_each_td(td, i) { + if (in_ramp_time(td) || + !(td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING)) { + next = min(td->o.iops_avg_time, td->o.bw_avg_time); + continue; + } + if (!per_unit_log(td->bw_log)) { + tmp = add_bw_samples(td, &now); + if (tmp < next) + next = tmp; + } + if (!per_unit_log(td->iops_log)) { + tmp = add_iops_samples(td, &now); + if (tmp < next) + next = tmp; + } + } + + return next == ~0U ? 0 : next; } void stat_init(void) @@ -2210,8 +2554,7 @@ void stat_exit(void) */ void show_running_run_stats(void) { - helper_do_stat = 1; - pthread_cond_signal(&helper_cond); + helper_do_stat(); } uint32_t *io_u_block_info(struct thread_data *td, struct io_u *io_u)