X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=stat.c;h=abdbb0e3fb9ba226382c1e511ccbe54e4b449aa7;hp=98ab63893900bcc09ba9a5be785dceffa30b1fff;hb=fd5d733fa34;hpb=971caeb177d3bc4f65fa31381bbfb83710bfc690 diff --git a/stat.c b/stat.c index 98ab6389..abdbb0e3 100644 --- a/stat.c +++ b/stat.c @@ -1,10 +1,7 @@ #include #include #include -#include #include -#include -#include #include #include "fio.h" @@ -17,6 +14,7 @@ #include "lib/output_buffer.h" #include "helper_thread.h" #include "smalloc.h" +#include "zbd.h" #define LOG_MSEC_SLACK 1 @@ -365,7 +363,7 @@ static void stat_calc_lat(struct thread_stat *ts, double *dst, * To keep the terse format unaltered, add all of the ns latency * buckets to the first us latency bucket */ -void stat_calc_lat_nu(struct thread_stat *ts, double *io_u_lat_u) +static void stat_calc_lat_nu(struct thread_stat *ts, double *io_u_lat_u) { unsigned long ntotal = 0, total = ddir_rw_sum(ts->total_io_u); int i; @@ -422,7 +420,7 @@ static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts, unsigned long runt; unsigned long long min, max, bw, iops; double mean, dev; - char *io_p, *bw_p, *bw_p_alt, *iops_p; + char *io_p, *bw_p, *bw_p_alt, *iops_p, *zbd_w_st = NULL; int i2p; if (ddir_sync(ddir)) { @@ -453,12 +451,16 @@ static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts, iops = (1000 * (uint64_t)ts->total_io_u[ddir]) / runt; iops_p = num2str(iops, ts->sig_figs, 1, 0, N2S_NONE); + if (ddir == DDIR_WRITE) + zbd_w_st = zbd_write_status(ts); - log_buf(out, " %s: IOPS=%s, BW=%s (%s)(%s/%llumsec)\n", + log_buf(out, " %s: IOPS=%s, BW=%s (%s)(%s/%llumsec)%s\n", rs->unified_rw_rep ? "mixed" : str[ddir], iops_p, bw_p, bw_p_alt, io_p, - (unsigned long long) ts->runtime[ddir]); + (unsigned long long) ts->runtime[ddir], + zbd_w_st ? : ""); + free(zbd_w_st); free(io_p); free(bw_p); free(bw_p_alt); @@ -622,8 +624,8 @@ static int block_state_category(int block_state) static int compare_block_infos(const void *bs1, const void *bs2) { - uint32_t block1 = *(uint32_t *)bs1; - uint32_t block2 = *(uint32_t *)bs2; + uint64_t block1 = *(uint64_t *)bs1; + uint64_t block2 = *(uint64_t *)bs2; int state1 = BLOCK_INFO_STATE(block1); int state2 = BLOCK_INFO_STATE(block2); int bscat1 = block_state_category(state1); @@ -673,7 +675,6 @@ static int calc_block_percentiles(int nr_block_infos, uint32_t *block_infos, if (len > 1) qsort((void *)plist, len, sizeof(plist[0]), double_cmp); - nr_uninit = 0; /* Start only after the uninit entries end */ for (nr_uninit = 0; nr_uninit < nr_block_infos @@ -1209,7 +1210,7 @@ static void show_thread_status_terse_all(struct thread_stat *ts, log_buf(out, ";%3.2f%%", io_u_lat_m[i]); /* disk util stats, if any */ - if (ver >= 3) + if (ver >= 3 && is_running_backend()) show_disk_util(1, NULL, out); /* Additional output if continue_on_error set - default off*/ @@ -1292,19 +1293,15 @@ static struct json_object *show_thread_status_json(struct thread_stat *ts, usr_cpu = 0; sys_cpu = 0; } + json_object_add_value_int(root, "job_runtime", ts->total_run_time); json_object_add_value_float(root, "usr_cpu", usr_cpu); json_object_add_value_float(root, "sys_cpu", sys_cpu); json_object_add_value_int(root, "ctx", ts->ctx); json_object_add_value_int(root, "majf", ts->majf); json_object_add_value_int(root, "minf", ts->minf); - - /* Calc % distribution of IO depths, usecond, msecond latency */ + /* Calc % distribution of IO depths */ stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist); - stat_calc_lat_n(ts, io_u_lat_n); - stat_calc_lat_u(ts, io_u_lat_u); - stat_calc_lat_m(ts, io_u_lat_m); - tmp = json_create_object(); json_object_add_value_object(root, "iodepth_level", tmp); /* Only show fixed 7 I/O depth levels*/ @@ -1317,6 +1314,44 @@ static struct json_object *show_thread_status_json(struct thread_stat *ts, json_object_add_value_float(tmp, (const char *)name, io_u_dist[i]); } + /* Calc % distribution of submit IO depths */ + stat_calc_dist(ts->io_u_submit, ts->total_submit, io_u_dist); + tmp = json_create_object(); + json_object_add_value_object(root, "iodepth_submit", tmp); + /* Only show fixed 7 I/O depth levels*/ + for (i = 0; i < 7; i++) { + char name[20]; + if (i == 0) + snprintf(name, 20, "0"); + else if (i < 6) + snprintf(name, 20, "%d", 1 << (i+1)); + else + snprintf(name, 20, ">=%d", 1 << i); + json_object_add_value_float(tmp, (const char *)name, io_u_dist[i]); + } + + /* Calc % distribution of completion IO depths */ + stat_calc_dist(ts->io_u_complete, ts->total_complete, io_u_dist); + tmp = json_create_object(); + json_object_add_value_object(root, "iodepth_complete", tmp); + /* Only show fixed 7 I/O depth levels*/ + for (i = 0; i < 7; i++) { + char name[20]; + if (i == 0) + snprintf(name, 20, "0"); + else if (i < 6) + snprintf(name, 20, "%d", 1 << (i+1)); + else + snprintf(name, 20, ">=%d", 1 << i); + json_object_add_value_float(tmp, (const char *)name, io_u_dist[i]); + } + + /* Calc % distribution of nsecond, usecond, msecond latency */ + stat_calc_dist(ts->io_u_map, ddir_rw_sum(ts->total_io_u), io_u_dist); + stat_calc_lat_n(ts, io_u_lat_n); + stat_calc_lat_u(ts, io_u_lat_u); + stat_calc_lat_m(ts, io_u_lat_m); + /* Nanosecond latency */ tmp = json_create_object(); json_object_add_value_object(root, "latency_ns", tmp); @@ -1402,7 +1437,7 @@ static struct json_object *show_thread_status_json(struct thread_stat *ts, if (ts->ss_dur) { struct json_object *data; struct json_array *iops, *bw; - int i, j, k; + int j, k, l; char ss_buf[64]; snprintf(ss_buf, sizeof(ss_buf), "%s%s:%f%s", @@ -1438,8 +1473,8 @@ static struct json_object *show_thread_status_json(struct thread_stat *ts, j = ts->ss_head; else j = ts->ss_head == 0 ? ts->ss_dur - 1 : ts->ss_head - 1; - for (i = 0; i < ts->ss_dur; i++) { - k = (j + i) % ts->ss_dur; + for (l = 0; l < ts->ss_dur; l++) { + k = (j + l) % ts->ss_dur; json_array_add_value_int(bw, ts->ss_bw_data[k]); json_array_add_value_int(iops, ts->ss_iops_data[k]); } @@ -1625,6 +1660,7 @@ void sum_thread_stats(struct thread_stat *dst, struct thread_stat *src, dst->total_run_time += src->total_run_time; dst->total_submit += src->total_submit; dst->total_complete += src->total_complete; + dst->nr_zone_resets += src->nr_zone_resets; } void init_group_run_stat(struct group_run_stats *gs) @@ -1892,6 +1928,8 @@ void __show_run_stats(void) if (is_backend) { fio_server_send_job_options(opt_lists[i], i); fio_server_send_ts(ts, rs); + if (output_format & FIO_OUTPUT_TERSE) + show_thread_status_terse(ts, rs, &output[__FIO_OUTPUT_TERSE]); } else { if (output_format & FIO_OUTPUT_TERSE) show_thread_status_terse(ts, rs, &output[__FIO_OUTPUT_TERSE]); @@ -1938,19 +1976,14 @@ void __show_run_stats(void) buf_output_free(out); } + fio_idle_prof_cleanup(); + log_info_flush(); free(runstats); free(threadstats); free(opt_lists); } -void show_run_stats(void) -{ - fio_sem_down(stat_sem); - __show_run_stats(); - fio_sem_up(stat_sem); -} - void __show_running_run_stats(void) { struct thread_data *td; @@ -2216,17 +2249,19 @@ static struct io_logs *get_cur_log(struct io_log *iolog) * submissions, flag 'td' as needing a log regrow and we'll take * care of it on the submission side. */ - if (iolog->td->o.io_submit_mode == IO_MODE_OFFLOAD || + if ((iolog->td && iolog->td->o.io_submit_mode == IO_MODE_OFFLOAD) || !per_unit_log(iolog)) return regrow_log(iolog); - iolog->td->flags |= TD_F_REGROW_LOGS; - assert(iolog->pending->nr_samples < iolog->pending->max_samples); + if (iolog->td) + iolog->td->flags |= TD_F_REGROW_LOGS; + if (iolog->pending) + assert(iolog->pending->nr_samples < iolog->pending->max_samples); return iolog->pending; } static void __add_log_sample(struct io_log *iolog, union io_sample_data data, - enum fio_ddir ddir, unsigned int bs, + enum fio_ddir ddir, unsigned long long bs, unsigned long t, uint64_t offset) { struct io_logs *cur_log; @@ -2308,6 +2343,7 @@ void reset_io_stats(struct thread_data *td) ts->total_submit = 0; ts->total_complete = 0; + ts->nr_zone_resets = 0; } static void __add_stat_to_log(struct io_log *iolog, enum fio_ddir ddir, @@ -2344,7 +2380,7 @@ static void _add_stat_to_log(struct io_log *iolog, unsigned long elapsed, static unsigned long add_log_sample(struct thread_data *td, struct io_log *iolog, union io_sample_data data, - enum fio_ddir ddir, unsigned int bs, + enum fio_ddir ddir, unsigned long long bs, uint64_t offset) { unsigned long elapsed, this_window; @@ -2406,7 +2442,7 @@ void finalize_logs(struct thread_data *td, bool unit_logs) _add_stat_to_log(td->iops_log, elapsed, td->o.log_max != 0); } -void add_agg_sample(union io_sample_data data, enum fio_ddir ddir, unsigned int bs) +void add_agg_sample(union io_sample_data data, enum fio_ddir ddir, unsigned long long bs) { struct io_log *iolog; @@ -2436,7 +2472,8 @@ static void add_clat_percentile_sample(struct thread_stat *ts, } void add_clat_sample(struct thread_data *td, enum fio_ddir ddir, - unsigned long long nsec, unsigned int bs, uint64_t offset) + unsigned long long nsec, unsigned long long bs, + uint64_t offset) { unsigned long elapsed, this_window; struct thread_stat *ts = &td->ts; @@ -2495,7 +2532,7 @@ void add_clat_sample(struct thread_data *td, enum fio_ddir ddir, } void add_slat_sample(struct thread_data *td, enum fio_ddir ddir, - unsigned long usec, unsigned int bs, uint64_t offset) + unsigned long usec, unsigned long long bs, uint64_t offset) { struct thread_stat *ts = &td->ts; @@ -2513,7 +2550,8 @@ void add_slat_sample(struct thread_data *td, enum fio_ddir ddir, } void add_lat_sample(struct thread_data *td, enum fio_ddir ddir, - unsigned long long nsec, unsigned int bs, uint64_t offset) + unsigned long long nsec, unsigned long long bs, + uint64_t offset) { struct thread_stat *ts = &td->ts; @@ -2596,7 +2634,7 @@ static int __add_samples(struct thread_data *td, struct timespec *parent_tv, add_stat_sample(&stat[ddir], rate); if (log) { - unsigned int bs = 0; + unsigned long long bs = 0; if (td->o.min_bs[ddir] == td->o.max_bs[ddir]) bs = td->o.min_bs[ddir];