X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=stat.c;h=0bb21d0093ac05a4e71cda24cea5cfde25ee916b;hp=ca066175d94243f51d7bd2773f0de1b4982c1fa1;hb=dcf9844e850e01d7c4db59960bbc4450a8cbf7ef;hpb=c5103619279883ee9291ed4793bb6ad39b436101 diff --git a/stat.c b/stat.c index ca066175..0bb21d00 100644 --- a/stat.c +++ b/stat.c @@ -15,6 +15,10 @@ #include "idletime.h" #include "lib/pow2.h" #include "lib/output_buffer.h" +#include "helper_thread.h" +#include "smalloc.h" + +#define LOG_MSEC_SLACK 10 struct fio_mutex *stat_mutex; @@ -253,13 +257,13 @@ out: free(ovals); } -int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max, - double *mean, double *dev) +bool calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max, + double *mean, double *dev) { double n = (double) is->samples; if (n == 0) - return 0; + return false; *min = is->min_val; *max = is->max_val; @@ -270,12 +274,13 @@ int calc_lat(struct io_stat *is, unsigned long *min, unsigned long *max, else *dev = 0; - return 1; + return true; } void show_group_stats(struct group_run_stats *rs, struct buf_output *out) { - char *p1, *p2, *p3, *p4; + char *io, *agg, *min, *max; + char *ioalt, *aggalt, *minalt, *maxalt; const char *str[] = { " READ", " WRITE" , " TRIM"}; int i; @@ -287,22 +292,28 @@ void show_group_stats(struct group_run_stats *rs, struct buf_output *out) if (!rs->max_run[i]) continue; - p1 = num2str(rs->io_kb[i], 6, rs->kb_base, i2p, 8); - p2 = num2str(rs->agg[i], 6, rs->kb_base, i2p, rs->unit_base); - p3 = num2str(rs->min_bw[i], 6, rs->kb_base, i2p, rs->unit_base); - p4 = num2str(rs->max_bw[i], 6, rs->kb_base, i2p, rs->unit_base); - - log_buf(out, "%s: io=%s, aggrb=%s/s, minb=%s/s, maxb=%s/s," - " mint=%llumsec, maxt=%llumsec\n", + io = num2str(rs->iobytes[i], 4, 1, i2p, N2S_BYTE); + ioalt = num2str(rs->iobytes[i], 4, 1, !i2p, N2S_BYTE); + agg = num2str(rs->agg[i], 4, 1, i2p, rs->unit_base); + aggalt = num2str(rs->agg[i], 4, 1, !i2p, rs->unit_base); + min = num2str(rs->min_bw[i], 4, 1, i2p, rs->unit_base); + minalt = num2str(rs->min_bw[i], 4, 1, !i2p, rs->unit_base); + max = num2str(rs->max_bw[i], 4, 1, i2p, rs->unit_base); + maxalt = num2str(rs->max_bw[i], 4, 1, !i2p, rs->unit_base); + log_buf(out, "%s: bw=%s (%s), %s-%s (%s-%s), io=%s (%s), run=%llu-%llumsec\n", rs->unified_rw_rep ? " MIXED" : str[i], - p1, p2, p3, p4, + agg, aggalt, min, max, minalt, maxalt, io, ioalt, (unsigned long long) rs->min_run[i], (unsigned long long) rs->max_run[i]); - free(p1); - free(p2); - free(p3); - free(p4); + free(io); + free(agg); + free(min); + free(max); + free(ioalt); + free(aggalt); + free(minalt); + free(maxalt); } } @@ -360,11 +371,11 @@ static void display_lat(const char *name, unsigned long min, unsigned long max, const char *base = "(usec)"; char *minp, *maxp; - if (!usec_to_msec(&min, &max, &mean, &dev)) + if (usec_to_msec(&min, &max, &mean, &dev)) base = "(msec)"; - minp = num2str(min, 6, 1, 0, 0); - maxp = num2str(max, 6, 1, 0, 0); + minp = num2str(min, 6, 1, 0, N2S_NONE); + maxp = num2str(max, 6, 1, 0, N2S_NONE); log_buf(out, " %s %s: min=%s, max=%s, avg=%5.02f," " stdev=%5.02f\n", name, base, minp, maxp, mean, dev); @@ -376,11 +387,11 @@ static void display_lat(const char *name, unsigned long min, unsigned long max, static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts, int ddir, struct buf_output *out) { - const char *str[] = { "read ", "write", "trim" }; + const char *str[] = { " read", "write", " trim" }; unsigned long min, max, runt; unsigned long long bw, iops; double mean, dev; - char *io_p, *bw_p, *iops_p; + char *io_p, *bw_p, *bw_p_alt, *iops_p; int i2p; assert(ddir_rw(ddir)); @@ -392,19 +403,21 @@ static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts, runt = ts->runtime[ddir]; bw = (1000 * ts->io_bytes[ddir]) / runt; - io_p = num2str(ts->io_bytes[ddir], 6, 1, i2p, 8); - bw_p = num2str(bw, 6, 1, i2p, ts->unit_base); + io_p = num2str(ts->io_bytes[ddir], 4, 1, i2p, N2S_BYTE); + bw_p = num2str(bw, 4, 1, i2p, ts->unit_base); + bw_p_alt = num2str(bw, 4, 1, !i2p, ts->unit_base); iops = (1000 * (uint64_t)ts->total_io_u[ddir]) / runt; - iops_p = num2str(iops, 6, 1, 0, 0); + iops_p = num2str(iops, 4, 1, 0, N2S_NONE); - log_buf(out, " %s: io=%s, bw=%s/s, iops=%s, runt=%6llumsec\n", - rs->unified_rw_rep ? "mixed" : str[ddir], - io_p, bw_p, iops_p, - (unsigned long long) ts->runtime[ddir]); + log_buf(out, " %s: IOPS=%s, BW=%s (%s)(%s/%llumsec)\n", + rs->unified_rw_rep ? "mixed" : str[ddir], + iops_p, bw_p, bw_p_alt, io_p, + (unsigned long long) ts->runtime[ddir]); free(io_p); free(bw_p); + free(bw_p_alt); free(iops_p); if (calc_lat(&ts->slat_stat[ddir], &min, &max, &mean, &dev)) @@ -422,7 +435,16 @@ static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts, } if (calc_lat(&ts->bw_stat[ddir], &min, &max, &mean, &dev)) { double p_of_agg = 100.0, fkb_base = (double)rs->kb_base; - const char *bw_str = (rs->unit_base == 1 ? "Kbit" : "KB"); + const char *bw_str; + + if ((rs->unit_base == 1) && i2p) + bw_str = "Kibit"; + else if (rs->unit_base == 1) + bw_str = "kbit"; + else if (i2p) + bw_str = "KiB"; + else + bw_str = "kB"; if (rs->unit_base == 1) { min *= 8.0; @@ -442,12 +464,11 @@ static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts, max /= fkb_base; mean /= fkb_base; dev /= fkb_base; - bw_str = (rs->unit_base == 1 ? "Mbit" : "MB"); + bw_str = (rs->unit_base == 1 ? "Mibit" : "MiB"); } - log_buf(out, " bw (%-4s/s): min=%5lu, max=%5lu, per=%3.2f%%," - " avg=%5.02f, stdev=%5.02f\n", bw_str, min, max, - p_of_agg, mean, dev); + log_buf(out, " bw (%5s/s): min=%5lu, max=%5lu, per=%3.2f%%, avg=%5.02f, stdev=%5.02f\n", + bw_str, min, max, p_of_agg, mean, dev); } } @@ -653,6 +674,35 @@ static void show_block_infos(int nr_block_infos, uint32_t *block_infos, i == BLOCK_STATE_COUNT - 1 ? '\n' : ','); } +static void show_ss_normal(struct thread_stat *ts, struct buf_output *out) +{ + char *p1, *p1alt, *p2; + unsigned long long bw_mean, iops_mean; + const int i2p = is_power_of_2(ts->kb_base); + + if (!ts->ss_dur) + return; + + bw_mean = steadystate_bw_mean(ts); + iops_mean = steadystate_iops_mean(ts); + + p1 = num2str(bw_mean / ts->kb_base, 4, ts->kb_base, i2p, ts->unit_base); + p1alt = num2str(bw_mean / ts->kb_base, 4, ts->kb_base, !i2p, ts->unit_base); + p2 = num2str(iops_mean, 4, 1, 0, N2S_NONE); + + log_buf(out, " steadystate : attained=%s, bw=%s (%s), iops=%s, %s%s=%.3f%s\n", + ts->ss_state & __FIO_SS_ATTAINED ? "yes" : "no", + p1, p1alt, p2, + ts->ss_state & __FIO_SS_IOPS ? "iops" : "bw", + ts->ss_state & __FIO_SS_SLOPE ? " slope": " mean dev", + ts->ss_criterion.u.f, + ts->ss_state & __FIO_SS_PCT ? "%" : ""); + + free(p1); + free(p1alt); + free(p2); +} + static void show_thread_status_normal(struct thread_stat *ts, struct group_run_stats *rs, struct buf_output *out) @@ -665,6 +715,8 @@ static void show_thread_status_normal(struct thread_stat *ts, if (!ddir_rw_sum(ts->io_bytes) && !ddir_rw_sum(ts->total_io_u)) return; + + memset(time_buf, 0, sizeof(time_buf)); time(&time_p); os_ctime_r((const time_t *) &time_p, time_buf, sizeof(time_buf)); @@ -728,9 +780,9 @@ static void show_thread_status_normal(struct thread_stat *ts, io_u_dist[1], io_u_dist[2], io_u_dist[3], io_u_dist[4], io_u_dist[5], io_u_dist[6]); - log_buf(out, " issued : total=r=%llu/w=%llu/d=%llu," - " short=r=%llu/w=%llu/d=%llu," - " drop=r=%llu/w=%llu/d=%llu\n", + log_buf(out, " issued rwt: total=%llu,%llu,%llu," + " short=%llu,%llu,%llu," + " dropped=%llu,%llu,%llu\n", (unsigned long long) ts->total_io_u[0], (unsigned long long) ts->total_io_u[1], (unsigned long long) ts->total_io_u[2], @@ -757,6 +809,9 @@ static void show_thread_status_normal(struct thread_stat *ts, if (ts->nr_block_infos) show_block_infos(ts->nr_block_infos, ts->block_infos, ts->percentile_list, out); + + if (ts->ss_dur) + show_ss_normal(ts, out); } static void show_ddir_status_terse(struct thread_stat *ts, @@ -776,7 +831,7 @@ static void show_ddir_status_terse(struct thread_stat *ts, if (ts->runtime[ddir]) { uint64_t runt = ts->runtime[ddir]; - bw = ((1000 * ts->io_bytes[ddir]) / runt) / 1024; + bw = ((1000 * ts->io_bytes[ddir]) / runt) / 1024; /* KiB/s */ iops = (1000 * (uint64_t) ts->total_io_u[ddir]) / runt; } @@ -860,7 +915,7 @@ static void add_ddir_status_json(struct thread_stat *ts, if (ts->runtime[ddir]) { uint64_t runt = ts->runtime[ddir]; - bw = ((1000 * ts->io_bytes[ddir]) / runt) / 1024; + bw = ((1000 * ts->io_bytes[ddir]) / runt) / 1024; /* KiB/s */ iops = (1000.0 * (uint64_t) ts->total_io_u[ddir]) / runt; } @@ -1086,8 +1141,34 @@ static void show_thread_status_terse_v3_v4(struct thread_stat *ts, log_buf(out, "\n"); } +static void json_add_job_opts(struct json_object *root, const char *name, + struct flist_head *opt_list, bool num_jobs) +{ + struct json_object *dir_object; + struct flist_head *entry; + struct print_option *p; + + if (flist_empty(opt_list)) + return; + + dir_object = json_create_object(); + json_object_add_value_object(root, name, dir_object); + + flist_for_each(entry, opt_list) { + const char *pos = ""; + + p = flist_entry(entry, struct print_option, list); + if (!num_jobs && !strcmp(p->name, "numjobs")) + continue; + if (p->value) + pos = p->value; + json_object_add_value_string(dir_object, p->name, pos); + } +} + static struct json_object *show_thread_status_json(struct thread_stat *ts, - struct group_run_stats *rs) + struct group_run_stats *rs, + struct flist_head *opt_list) { struct json_object *root, *tmp; struct jobs_eta *je; @@ -1110,6 +1191,9 @@ static struct json_object *show_thread_status_json(struct thread_stat *ts, json_object_add_value_int(root, "elapsed", je->elapsed_sec); } + if (opt_list) + json_add_job_opts(root, "job options", opt_list, true); + add_ddir_status_json(ts, rs, DDIR_READ, root); add_ddir_status_json(ts, rs, DDIR_WRITE, root); add_ddir_status_json(ts, rs, DDIR_TRIM, root); @@ -1222,6 +1306,56 @@ static struct json_object *show_thread_status_json(struct thread_stat *ts, } } + if (ts->ss_dur) { + struct json_object *data; + struct json_array *iops, *bw; + int i, j, k; + char ss_buf[64]; + + snprintf(ss_buf, sizeof(ss_buf), "%s%s:%f%s", + ts->ss_state & __FIO_SS_IOPS ? "iops" : "bw", + ts->ss_state & __FIO_SS_SLOPE ? "_slope" : "", + (float) ts->ss_limit.u.f, + ts->ss_state & __FIO_SS_PCT ? "%" : ""); + + tmp = json_create_object(); + json_object_add_value_object(root, "steadystate", tmp); + json_object_add_value_string(tmp, "ss", ss_buf); + json_object_add_value_int(tmp, "duration", (int)ts->ss_dur); + json_object_add_value_int(tmp, "attained", (ts->ss_state & __FIO_SS_ATTAINED) > 0); + + snprintf(ss_buf, sizeof(ss_buf), "%f%s", (float) ts->ss_criterion.u.f, + ts->ss_state & __FIO_SS_PCT ? "%" : ""); + json_object_add_value_string(tmp, "criterion", ss_buf); + json_object_add_value_float(tmp, "max_deviation", ts->ss_deviation.u.f); + json_object_add_value_float(tmp, "slope", ts->ss_slope.u.f); + + data = json_create_object(); + json_object_add_value_object(tmp, "data", data); + bw = json_create_array(); + iops = json_create_array(); + + /* + ** if ss was attained or the buffer is not full, + ** ss->head points to the first element in the list. + ** otherwise it actually points to the second element + ** in the list + */ + if ((ts->ss_state & __FIO_SS_ATTAINED) || !(ts->ss_state & __FIO_SS_BUFFER_FULL)) + j = ts->ss_head; + else + j = ts->ss_head == 0 ? ts->ss_dur - 1 : ts->ss_head - 1; + for (i = 0; i < ts->ss_dur; i++) { + k = (j + i) % ts->ss_dur; + json_array_add_value_int(bw, ts->ss_bw_data[k]); + json_array_add_value_int(iops, ts->ss_iops_data[k]); + } + json_object_add_value_int(data, "bw_mean", steadystate_bw_mean(ts)); + json_object_add_value_int(data, "iops_mean", steadystate_iops_mean(ts)); + json_object_add_value_array(data, "iops", iops); + json_object_add_value_array(data, "bw", bw); + } + return root; } @@ -1239,6 +1373,7 @@ static void show_thread_status_terse(struct thread_stat *ts, struct json_object *show_thread_status(struct thread_stat *ts, struct group_run_stats *rs, + struct flist_head *opt_list, struct buf_output *out) { struct json_object *ret = NULL; @@ -1246,7 +1381,7 @@ struct json_object *show_thread_status(struct thread_stat *ts, if (output_format & FIO_OUTPUT_TERSE) show_thread_status_terse(ts, rs, out); if (output_format & FIO_OUTPUT_JSON) - ret = show_thread_status_json(ts, rs); + ret = show_thread_status_json(ts, rs, opt_list); if (output_format & FIO_OUTPUT_NORMAL) show_thread_status_normal(ts, rs, out); @@ -1302,7 +1437,7 @@ void sum_group_stats(struct group_run_stats *dst, struct group_run_stats *src) if (dst->min_bw[i] && dst->min_bw[i] > src->min_bw[i]) dst->min_bw[i] = src->min_bw[i]; - dst->io_kb[i] += src->io_kb[i]; + dst->iobytes[i] += src->iobytes[i]; dst->agg[i] += src->agg[i]; } @@ -1427,6 +1562,7 @@ void __show_run_stats(void) struct json_object *root = NULL; struct json_array *array = NULL; struct buf_output output[FIO_OUTPUT_NR]; + struct flist_head **opt_lists; runstats = malloc(sizeof(struct group_run_stats) * (groupid + 1)); @@ -1452,9 +1588,12 @@ void __show_run_stats(void) } threadstats = malloc(nr_ts * sizeof(struct thread_stat)); + opt_lists = malloc(nr_ts * sizeof(struct flist_head *)); - for (i = 0; i < nr_ts; i++) + for (i = 0; i < nr_ts; i++) { init_thread_stat(&threadstats[i]); + opt_lists[i] = NULL; + } j = 0; last_ts = -1; @@ -1473,6 +1612,7 @@ void __show_run_stats(void) ts->clat_percentiles = td->o.clat_percentiles; ts->percentile_precision = td->o.percentile_precision; memcpy(ts->percentile_list, td->o.percentile_list, sizeof(td->o.percentile_list)); + opt_lists[j] = &td->opt_list; idx++; ts->members++; @@ -1539,12 +1679,28 @@ void __show_run_stats(void) ts->block_infos[k] = td->ts.block_infos[k]; sum_thread_stats(ts, &td->ts, idx == 1); + + if (td->o.ss_dur) { + ts->ss_state = td->ss.state; + ts->ss_dur = td->ss.dur; + ts->ss_head = td->ss.head; + ts->ss_bw_data = td->ss.bw_data; + ts->ss_iops_data = td->ss.iops_data; + ts->ss_limit.u.f = td->ss.limit; + ts->ss_slope.u.f = td->ss.slope; + ts->ss_deviation.u.f = td->ss.deviation; + ts->ss_criterion.u.f = td->ss.criterion; + } + else + ts->ss_dur = ts->ss_state = 0; } for (i = 0; i < nr_ts; i++) { unsigned long long bw; ts = &threadstats[i]; + if (ts->groupid == -1) + continue; rs = &runstats[ts->groupid]; rs->kb_base = ts->kb_base; rs->unit_base = ts->unit_base; @@ -1559,19 +1715,14 @@ void __show_run_stats(void) rs->max_run[j] = ts->runtime[j]; bw = 0; - if (ts->runtime[j]) { - unsigned long runt = ts->runtime[j]; - unsigned long long kb; - - kb = ts->io_bytes[j] / rs->kb_base; - bw = kb * 1000 / runt; - } + if (ts->runtime[j]) + bw = ts->io_bytes[j] * 1000 / ts->runtime[j]; if (bw < rs->min_bw[j]) rs->min_bw[j] = bw; if (bw > rs->max_bw[j]) rs->max_bw[j] = bw; - rs->io_kb[j] += ts->io_bytes[j] / rs->kb_base; + rs->iobytes[j] += ts->io_bytes[j]; } } @@ -1582,7 +1733,7 @@ void __show_run_stats(void) for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) { if (rs->max_run[ddir]) - rs->agg[ddir] = (rs->io_kb[ddir] * 1000) / + rs->agg[ddir] = (rs->iobytes[ddir] * 1000) / rs->max_run[ddir]; } } @@ -1596,33 +1747,46 @@ void __show_run_stats(void) if (output_format & FIO_OUTPUT_NORMAL) log_buf(&output[__FIO_OUTPUT_NORMAL], "\n"); if (output_format & FIO_OUTPUT_JSON) { + struct thread_data *global; char time_buf[32]; - time_t time_p; + struct timeval now; + unsigned long long ms_since_epoch; - time(&time_p); - os_ctime_r((const time_t *) &time_p, time_buf, + gettimeofday(&now, NULL); + ms_since_epoch = (unsigned long long)(now.tv_sec) * 1000 + + (unsigned long long)(now.tv_usec) / 1000; + + os_ctime_r((const time_t *) &now.tv_sec, time_buf, sizeof(time_buf)); - time_buf[strlen(time_buf) - 1] = '\0'; + if (time_buf[strlen(time_buf) - 1] == '\n') + time_buf[strlen(time_buf) - 1] = '\0'; root = json_create_object(); json_object_add_value_string(root, "fio version", fio_version_string); - json_object_add_value_int(root, "timestamp", time_p); + json_object_add_value_int(root, "timestamp", now.tv_sec); + json_object_add_value_int(root, "timestamp_ms", ms_since_epoch); json_object_add_value_string(root, "time", time_buf); + global = get_global_options(); + json_add_job_opts(root, "global options", &global->opt_list, false); array = json_create_array(); json_object_add_value_array(root, "jobs", array); } + if (is_backend) + fio_server_send_job_options(&get_global_options()->opt_list, -1U); + for (i = 0; i < nr_ts; i++) { ts = &threadstats[i]; rs = &runstats[ts->groupid]; - if (is_backend) + if (is_backend) { + fio_server_send_job_options(opt_lists[i], i); fio_server_send_ts(ts, rs); - else { + } else { if (output_format & FIO_OUTPUT_TERSE) show_thread_status_terse(ts, rs, &output[__FIO_OUTPUT_TERSE]); if (output_format & FIO_OUTPUT_JSON) { - struct json_object *tmp = show_thread_status_json(ts, rs); + struct json_object *tmp = show_thread_status_json(ts, rs, opt_lists[i]); json_array_add_value_object(array, tmp); } if (output_format & FIO_OUTPUT_NORMAL) @@ -1665,6 +1829,7 @@ void __show_run_stats(void) log_info_flush(); free(runstats); free(threadstats); + free(opt_lists); } void show_run_stats(void) @@ -1687,19 +1852,19 @@ void __show_running_run_stats(void) fio_gettime(&tv, NULL); for_each_td(td, i) { - rt[i] = mtime_since(&td->start, &tv); - if (td_read(td) && td->io_bytes[DDIR_READ]) - td->ts.runtime[DDIR_READ] += rt[i]; - if (td_write(td) && td->io_bytes[DDIR_WRITE]) - td->ts.runtime[DDIR_WRITE] += rt[i]; - if (td_trim(td) && td->io_bytes[DDIR_TRIM]) - td->ts.runtime[DDIR_TRIM] += rt[i]; - td->update_rusage = 1; td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ]; td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE]; td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM]; td->ts.total_run_time = mtime_since(&td->epoch, &tv); + + rt[i] = mtime_since(&td->start, &tv); + if (td_read(td) && td->ts.io_bytes[DDIR_READ]) + td->ts.runtime[DDIR_READ] += rt[i]; + if (td_write(td) && td->ts.io_bytes[DDIR_WRITE]) + td->ts.runtime[DDIR_WRITE] += rt[i]; + if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM]) + td->ts.runtime[DDIR_TRIM] += rt[i]; } for_each_td(td, i) { @@ -1715,11 +1880,11 @@ void __show_running_run_stats(void) __show_run_stats(); for_each_td(td, i) { - if (td_read(td) && td->io_bytes[DDIR_READ]) + if (td_read(td) && td->ts.io_bytes[DDIR_READ]) td->ts.runtime[DDIR_READ] -= rt[i]; - if (td_write(td) && td->io_bytes[DDIR_WRITE]) + if (td_write(td) && td->ts.io_bytes[DDIR_WRITE]) td->ts.runtime[DDIR_WRITE] -= rt[i]; - if (td_trim(td) && td->io_bytes[DDIR_TRIM]) + if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM]) td->ts.runtime[DDIR_TRIM] -= rt[i]; } @@ -1803,58 +1968,184 @@ static inline void add_stat_sample(struct io_stat *is, unsigned long data) is->samples++; } -static void __add_log_sample(struct io_log *iolog, unsigned long val, +/* + * Return a struct io_logs, which is added to the tail of the log + * list for 'iolog'. + */ +static struct io_logs *get_new_log(struct io_log *iolog) +{ + size_t new_size, new_samples; + struct io_logs *cur_log; + + /* + * Cap the size at MAX_LOG_ENTRIES, so we don't keep doubling + * forever + */ + if (!iolog->cur_log_max) + new_samples = DEF_LOG_ENTRIES; + else { + new_samples = iolog->cur_log_max * 2; + if (new_samples > MAX_LOG_ENTRIES) + new_samples = MAX_LOG_ENTRIES; + } + + new_size = new_samples * log_entry_sz(iolog); + + cur_log = smalloc(sizeof(*cur_log)); + if (cur_log) { + INIT_FLIST_HEAD(&cur_log->list); + cur_log->log = malloc(new_size); + if (cur_log->log) { + cur_log->nr_samples = 0; + cur_log->max_samples = new_samples; + flist_add_tail(&cur_log->list, &iolog->io_logs); + iolog->cur_log_max = new_samples; + return cur_log; + } + sfree(cur_log); + } + + return NULL; +} + +/* + * Add and return a new log chunk, or return current log if big enough + */ +static struct io_logs *regrow_log(struct io_log *iolog) +{ + struct io_logs *cur_log; + int i; + + if (!iolog || iolog->disabled) + goto disable; + + cur_log = iolog_cur_log(iolog); + if (!cur_log) { + cur_log = get_new_log(iolog); + if (!cur_log) + return NULL; + } + + if (cur_log->nr_samples < cur_log->max_samples) + return cur_log; + + /* + * No room for a new sample. If we're compressing on the fly, flush + * out the current chunk + */ + if (iolog->log_gz) { + if (iolog_cur_flush(iolog, cur_log)) { + log_err("fio: failed flushing iolog! Will stop logging.\n"); + return NULL; + } + } + + /* + * Get a new log array, and add to our list + */ + cur_log = get_new_log(iolog); + if (!cur_log) { + log_err("fio: failed extending iolog! Will stop logging.\n"); + return NULL; + } + + if (!iolog->pending || !iolog->pending->nr_samples) + return cur_log; + + /* + * Flush pending items to new log + */ + for (i = 0; i < iolog->pending->nr_samples; i++) { + struct io_sample *src, *dst; + + src = get_sample(iolog, iolog->pending, i); + dst = get_sample(iolog, cur_log, i); + memcpy(dst, src, log_entry_sz(iolog)); + } + cur_log->nr_samples = iolog->pending->nr_samples; + + iolog->pending->nr_samples = 0; + return cur_log; +disable: + if (iolog) + iolog->disabled = true; + return NULL; +} + +void regrow_logs(struct thread_data *td) +{ + regrow_log(td->slat_log); + regrow_log(td->clat_log); + regrow_log(td->clat_hist_log); + regrow_log(td->lat_log); + regrow_log(td->bw_log); + regrow_log(td->iops_log); + td->flags &= ~TD_F_REGROW_LOGS; +} + +static struct io_logs *get_cur_log(struct io_log *iolog) +{ + struct io_logs *cur_log; + + cur_log = iolog_cur_log(iolog); + if (!cur_log) { + cur_log = get_new_log(iolog); + if (!cur_log) + return NULL; + } + + if (cur_log->nr_samples < cur_log->max_samples) + return cur_log; + + /* + * Out of space. If we're in IO offload mode, or we're not doing + * per unit logging (hence logging happens outside of the IO thread + * as well), add a new log chunk inline. If we're doing inline + * submissions, flag 'td' as needing a log regrow and we'll take + * care of it on the submission side. + */ + if (iolog->td->o.io_submit_mode == IO_MODE_OFFLOAD || + !per_unit_log(iolog)) + return regrow_log(iolog); + + iolog->td->flags |= TD_F_REGROW_LOGS; + assert(iolog->pending->nr_samples < iolog->pending->max_samples); + return iolog->pending; +} + +static void __add_log_sample(struct io_log *iolog, union io_sample_data data, enum fio_ddir ddir, unsigned int bs, unsigned long t, uint64_t offset) { - uint64_t nr_samples = iolog->nr_samples; - struct io_sample *s; + struct io_logs *cur_log; if (iolog->disabled) return; - - if (!iolog->nr_samples) + if (flist_empty(&iolog->io_logs)) iolog->avg_last = t; - if (iolog->nr_samples == iolog->max_samples) { - size_t new_size; - void *new_log; + cur_log = get_cur_log(iolog); + if (cur_log) { + struct io_sample *s; - new_size = 2 * iolog->max_samples * log_entry_sz(iolog); + s = get_sample(iolog, cur_log, cur_log->nr_samples); - if (iolog->log_gz && (new_size > iolog->log_gz)) { - if (iolog_flush(iolog, 0)) { - log_err("fio: failed flushing iolog! Will stop logging.\n"); - iolog->disabled = 1; - return; - } - nr_samples = iolog->nr_samples; - } else { - new_log = realloc(iolog->log, new_size); - if (!new_log) { - log_err("fio: failed extending iolog! Will stop logging.\n"); - iolog->disabled = 1; - return; - } - iolog->log = new_log; - iolog->max_samples <<= 1; - } - } - - s = get_sample(iolog, nr_samples); + s->data = data; + s->time = t + (iolog->td ? iolog->td->unix_epoch : 0); + io_sample_set_ddir(iolog, s, ddir); + s->bs = bs; - s->val = val; - s->time = t; - io_sample_set_ddir(iolog, s, ddir); - s->bs = bs; + if (iolog->log_offset) { + struct io_sample_offset *so = (void *) s; - if (iolog->log_offset) { - struct io_sample_offset *so = (void *) s; + so->offset = offset; + } - so->offset = offset; + cur_log->nr_samples++; + return; } - iolog->nr_samples++; + iolog->disabled = true; } static inline void reset_io_stat(struct io_stat *ios) @@ -1899,45 +2190,45 @@ void reset_io_stats(struct thread_data *td) } } -static void _add_stat_to_log(struct io_log *iolog, unsigned long elapsed) +static void __add_stat_to_log(struct io_log *iolog, enum fio_ddir ddir, + unsigned long elapsed, bool log_max) { /* * Note an entry in the log. Use the mean from the logged samples, * making sure to properly round up. Only write a log entry if we * had actual samples done. */ - if (iolog->avg_window[DDIR_READ].samples) { - unsigned long mr; + if (iolog->avg_window[ddir].samples) { + union io_sample_data data; - mr = iolog->avg_window[DDIR_READ].mean.u.f + 0.50; - __add_log_sample(iolog, mr, DDIR_READ, 0, elapsed, 0); - } - if (iolog->avg_window[DDIR_WRITE].samples) { - unsigned long mw; + if (log_max) + data.val = iolog->avg_window[ddir].max_val; + else + data.val = iolog->avg_window[ddir].mean.u.f + 0.50; - mw = iolog->avg_window[DDIR_WRITE].mean.u.f + 0.50; - __add_log_sample(iolog, mw, DDIR_WRITE, 0, elapsed, 0); + __add_log_sample(iolog, data, ddir, 0, elapsed, 0); } - if (iolog->avg_window[DDIR_TRIM].samples) { - unsigned long mw; - mw = iolog->avg_window[DDIR_TRIM].mean.u.f + 0.50; - __add_log_sample(iolog, mw, DDIR_TRIM, 0, elapsed, 0); - } + reset_io_stat(&iolog->avg_window[ddir]); +} + +static void _add_stat_to_log(struct io_log *iolog, unsigned long elapsed, + bool log_max) +{ + int ddir; - reset_io_stat(&iolog->avg_window[DDIR_READ]); - reset_io_stat(&iolog->avg_window[DDIR_WRITE]); - reset_io_stat(&iolog->avg_window[DDIR_TRIM]); + for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) + __add_stat_to_log(iolog, ddir, elapsed, log_max); } -static void add_log_sample(struct thread_data *td, struct io_log *iolog, - unsigned long val, enum fio_ddir ddir, +static long add_log_sample(struct thread_data *td, struct io_log *iolog, + union io_sample_data data, enum fio_ddir ddir, unsigned int bs, uint64_t offset) { unsigned long elapsed, this_window; if (!ddir_rw(ddir)) - return; + return 0; elapsed = mtime_since_now(&td->epoch); @@ -1945,48 +2236,55 @@ static void add_log_sample(struct thread_data *td, struct io_log *iolog, * If no time averaging, just add the log sample. */ if (!iolog->avg_msec) { - __add_log_sample(iolog, val, ddir, bs, elapsed, offset); - return; + __add_log_sample(iolog, data, ddir, bs, elapsed, offset); + return 0; } /* * Add the sample. If the time period has passed, then * add that entry to the log and clear. */ - add_stat_sample(&iolog->avg_window[ddir], val); + add_stat_sample(&iolog->avg_window[ddir], data.val); /* * If period hasn't passed, adding the above sample is all we * need to do. */ this_window = elapsed - iolog->avg_last; - if (this_window < iolog->avg_msec) - return; + if (elapsed < iolog->avg_last) + return iolog->avg_last - elapsed; + else if (this_window < iolog->avg_msec) { + int diff = iolog->avg_msec - this_window; + + if (inline_log(iolog) || diff > LOG_MSEC_SLACK) + return diff; + } - _add_stat_to_log(iolog, elapsed); + _add_stat_to_log(iolog, elapsed, td->o.log_max != 0); - iolog->avg_last = elapsed; + iolog->avg_last = elapsed - (this_window - iolog->avg_msec); + return iolog->avg_msec; } -void finalize_logs(struct thread_data *td) +void finalize_logs(struct thread_data *td, bool unit_logs) { unsigned long elapsed; elapsed = mtime_since_now(&td->epoch); - if (td->clat_log) - _add_stat_to_log(td->clat_log, elapsed); - if (td->slat_log) - _add_stat_to_log(td->slat_log, elapsed); - if (td->lat_log) - _add_stat_to_log(td->lat_log, elapsed); - if (td->bw_log) - _add_stat_to_log(td->bw_log, elapsed); - if (td->iops_log) - _add_stat_to_log(td->iops_log, elapsed); + if (td->clat_log && unit_logs) + _add_stat_to_log(td->clat_log, elapsed, td->o.log_max != 0); + if (td->slat_log && unit_logs) + _add_stat_to_log(td->slat_log, elapsed, td->o.log_max != 0); + if (td->lat_log && unit_logs) + _add_stat_to_log(td->lat_log, elapsed, td->o.log_max != 0); + if (td->bw_log && (unit_logs == per_unit_log(td->bw_log))) + _add_stat_to_log(td->bw_log, elapsed, td->o.log_max != 0); + if (td->iops_log && (unit_logs == per_unit_log(td->iops_log))) + _add_stat_to_log(td->iops_log, elapsed, td->o.log_max != 0); } -void add_agg_sample(unsigned long val, enum fio_ddir ddir, unsigned int bs) +void add_agg_sample(union io_sample_data data, enum fio_ddir ddir, unsigned int bs) { struct io_log *iolog; @@ -1994,7 +2292,7 @@ void add_agg_sample(unsigned long val, enum fio_ddir ddir, unsigned int bs) return; iolog = agg_io_log[ddir]; - __add_log_sample(iolog, val, ddir, bs, mtime_since_genesis(), 0); + __add_log_sample(iolog, data, ddir, bs, mtime_since_genesis(), 0); } static void add_clat_percentile_sample(struct thread_stat *ts, @@ -2009,21 +2307,59 @@ static void add_clat_percentile_sample(struct thread_stat *ts, void add_clat_sample(struct thread_data *td, enum fio_ddir ddir, unsigned long usec, unsigned int bs, uint64_t offset) { + unsigned long elapsed, this_window; struct thread_stat *ts = &td->ts; - - if (!ddir_rw(ddir)) - return; + struct io_log *iolog = td->clat_hist_log; td_io_u_lock(td); add_stat_sample(&ts->clat_stat[ddir], usec); if (td->clat_log) - add_log_sample(td, td->clat_log, usec, ddir, bs, offset); + add_log_sample(td, td->clat_log, sample_val(usec), ddir, bs, + offset); if (ts->clat_percentiles) add_clat_percentile_sample(ts, usec, ddir); + if (iolog && iolog->hist_msec) { + struct io_hist *hw = &iolog->hist_window[ddir]; + + hw->samples++; + elapsed = mtime_since_now(&td->epoch); + if (!hw->hist_last) + hw->hist_last = elapsed; + this_window = elapsed - hw->hist_last; + + if (this_window >= iolog->hist_msec) { + unsigned int *io_u_plat; + struct io_u_plat_entry *dst; + + /* + * Make a byte-for-byte copy of the latency histogram + * stored in td->ts.io_u_plat[ddir], recording it in a + * log sample. Note that the matching call to free() is + * located in iolog.c after printing this sample to the + * log file. + */ + io_u_plat = (unsigned int *) td->ts.io_u_plat[ddir]; + dst = malloc(sizeof(struct io_u_plat_entry)); + memcpy(&(dst->io_u_plat), io_u_plat, + FIO_IO_U_PLAT_NR * sizeof(unsigned int)); + flist_add(&dst->list, &hw->list); + __add_log_sample(iolog, sample_plat(dst), ddir, bs, + elapsed, offset); + + /* + * Update the last time we recorded as being now, minus + * any drift in time we encountered before actually + * making the record. + */ + hw->hist_last = elapsed - (this_window - iolog->hist_msec); + hw->samples = 0; + } + } + td_io_u_unlock(td); } @@ -2040,7 +2376,7 @@ void add_slat_sample(struct thread_data *td, enum fio_ddir ddir, add_stat_sample(&ts->slat_stat[ddir], usec); if (td->slat_log) - add_log_sample(td, td->slat_log, usec, ddir, bs, offset); + add_log_sample(td, td->slat_log, sample_val(usec), ddir, bs, offset); td_io_u_unlock(td); } @@ -2058,30 +2394,55 @@ void add_lat_sample(struct thread_data *td, enum fio_ddir ddir, add_stat_sample(&ts->lat_stat[ddir], usec); if (td->lat_log) - add_log_sample(td, td->lat_log, usec, ddir, bs, offset); + add_log_sample(td, td->lat_log, sample_val(usec), ddir, bs, + offset); td_io_u_unlock(td); } -void add_bw_sample(struct thread_data *td, enum fio_ddir ddir, unsigned int bs, - struct timeval *t) +void add_bw_sample(struct thread_data *td, struct io_u *io_u, + unsigned int bytes, unsigned long spent) +{ + struct thread_stat *ts = &td->ts; + unsigned long rate; + + if (spent) + rate = bytes * 1000 / spent; + else + rate = 0; + + td_io_u_lock(td); + + add_stat_sample(&ts->bw_stat[io_u->ddir], rate); + + if (td->bw_log) + add_log_sample(td, td->bw_log, sample_val(rate), io_u->ddir, + bytes, io_u->offset); + + td->stat_io_bytes[io_u->ddir] = td->this_io_bytes[io_u->ddir]; + td_io_u_unlock(td); +} + +static int add_bw_samples(struct thread_data *td, struct timeval *t) { struct thread_stat *ts = &td->ts; unsigned long spent, rate; + enum fio_ddir ddir; + unsigned int next, next_log; - if (!ddir_rw(ddir)) - return; + next_log = td->o.bw_avg_time; spent = mtime_since(&td->bw_sample_time, t); - if (spent < td->o.bw_avg_time) - return; + if (spent < td->o.bw_avg_time && + td->o.bw_avg_time - spent >= LOG_MSEC_SLACK) + return td->o.bw_avg_time - spent; td_io_u_lock(td); /* * Compute both read and write rates for the interval. */ - for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) { + for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) { uint64_t delta; delta = td->this_io_bytes[ddir] - td->stat_io_bytes[ddir]; @@ -2089,41 +2450,74 @@ void add_bw_sample(struct thread_data *td, enum fio_ddir ddir, unsigned int bs, continue; /* No entries for interval */ if (spent) - rate = delta * 1000 / spent / 1024; + rate = delta * 1000 / spent / 1024; /* KiB/s */ else rate = 0; add_stat_sample(&ts->bw_stat[ddir], rate); - if (td->bw_log) - add_log_sample(td, td->bw_log, rate, ddir, bs, 0); + if (td->bw_log) { + unsigned int bs = 0; + + if (td->o.min_bs[ddir] == td->o.max_bs[ddir]) + bs = td->o.min_bs[ddir]; + + next = add_log_sample(td, td->bw_log, sample_val(rate), + ddir, bs, 0); + next_log = min(next_log, next); + } td->stat_io_bytes[ddir] = td->this_io_bytes[ddir]; } - fio_gettime(&td->bw_sample_time, NULL); + timeval_add_msec(&td->bw_sample_time, td->o.bw_avg_time); + + td_io_u_unlock(td); + + if (spent <= td->o.bw_avg_time) + return min(next_log, td->o.bw_avg_time); + + next = td->o.bw_avg_time - (1 + spent - td->o.bw_avg_time); + return min(next, next_log); +} + +void add_iops_sample(struct thread_data *td, struct io_u *io_u, + unsigned int bytes) +{ + struct thread_stat *ts = &td->ts; + + td_io_u_lock(td); + + add_stat_sample(&ts->iops_stat[io_u->ddir], 1); + + if (td->iops_log) + add_log_sample(td, td->iops_log, sample_val(1), io_u->ddir, + bytes, io_u->offset); + + td->stat_io_blocks[io_u->ddir] = td->this_io_blocks[io_u->ddir]; td_io_u_unlock(td); } -void add_iops_sample(struct thread_data *td, enum fio_ddir ddir, unsigned int bs, - struct timeval *t) +static int add_iops_samples(struct thread_data *td, struct timeval *t) { struct thread_stat *ts = &td->ts; unsigned long spent, iops; + enum fio_ddir ddir; + unsigned int next, next_log; - if (!ddir_rw(ddir)) - return; + next_log = td->o.iops_avg_time; spent = mtime_since(&td->iops_sample_time, t); - if (spent < td->o.iops_avg_time) - return; + if (spent < td->o.iops_avg_time && + td->o.iops_avg_time - spent >= LOG_MSEC_SLACK) + return td->o.iops_avg_time - spent; td_io_u_lock(td); /* * Compute both read and write rates for the interval. */ - for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) { + for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) { uint64_t delta; delta = td->this_io_blocks[ddir] - td->stat_io_blocks[ddir]; @@ -2137,14 +2531,62 @@ void add_iops_sample(struct thread_data *td, enum fio_ddir ddir, unsigned int bs add_stat_sample(&ts->iops_stat[ddir], iops); - if (td->iops_log) - add_log_sample(td, td->iops_log, iops, ddir, bs, 0); + if (td->iops_log) { + unsigned int bs = 0; + + if (td->o.min_bs[ddir] == td->o.max_bs[ddir]) + bs = td->o.min_bs[ddir]; + + next = add_log_sample(td, td->iops_log, + sample_val(iops), ddir, bs, 0); + next_log = min(next_log, next); + } td->stat_io_blocks[ddir] = td->this_io_blocks[ddir]; } - fio_gettime(&td->iops_sample_time, NULL); + timeval_add_msec(&td->iops_sample_time, td->o.iops_avg_time); + td_io_u_unlock(td); + + if (spent <= td->o.iops_avg_time) + return min(next_log, td->o.iops_avg_time); + + next = td->o.iops_avg_time - (1 + spent - td->o.iops_avg_time); + return min(next, next_log); +} + +/* + * Returns msecs to next event + */ +int calc_log_samples(void) +{ + struct thread_data *td; + unsigned int next = ~0U, tmp; + struct timeval now; + int i; + + fio_gettime(&now, NULL); + + for_each_td(td, i) { + if (in_ramp_time(td) || + !(td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING)) { + next = min(td->o.iops_avg_time, td->o.bw_avg_time); + continue; + } + if (td->bw_log && !per_unit_log(td->bw_log)) { + tmp = add_bw_samples(td, &now); + if (tmp < next) + next = tmp; + } + if (td->iops_log && !per_unit_log(td->iops_log)) { + tmp = add_iops_samples(td, &now); + if (tmp < next) + next = tmp; + } + } + + return next == ~0U ? 0 : next; } void stat_init(void) @@ -2167,8 +2609,7 @@ void stat_exit(void) */ void show_running_run_stats(void) { - helper_do_stat = 1; - pthread_cond_signal(&helper_cond); + helper_do_stat(); } uint32_t *io_u_block_info(struct thread_data *td, struct io_u *io_u)