Revert "ioengines: Make td_io_queue print log_err when got error "
[fio.git] / stat.c
diff --git a/stat.c b/stat.c
index 24fc679fa1c39d7f79db798c9175b58f559139a8..b98e8b27c3b0a70b2cb220411406665ac96d68f5 100644 (file)
--- a/stat.c
+++ b/stat.c
@@ -1,5 +1,6 @@
 #include <stdio.h>
 #include <string.h>
+#include <stdlib.h>
 #include <sys/time.h>
 #include <sys/stat.h>
 #include <math.h>
@@ -377,7 +378,7 @@ void show_group_stats(struct group_run_stats *rs, struct buf_output *out)
                free(maxalt);
        }
 
-       /* Need to aggregate statisitics to show mixed values */
+       /* Need to aggregate statistics to show mixed values */
        if (rs->unified_rw_rep == UNIFIED_BOTH)
                show_mixed_group_stats(rs, out);
 }
@@ -554,7 +555,7 @@ static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
 
        iops = (1000 * (uint64_t)ts->total_io_u[ddir]) / runt;
        iops_p = num2str(iops, ts->sig_figs, 1, 0, N2S_NONE);
-       if (ddir == DDIR_WRITE)
+       if (ddir == DDIR_WRITE || ddir == DDIR_TRIM)
                post_st = zbd_write_status(ts);
        else if (ddir == DDIR_READ && ts->cachehit && ts->cachemiss) {
                uint64_t total;
@@ -589,17 +590,19 @@ static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
        /* Only print per prio stats if there are >= 2 prios with samples */
        if (get_nr_prios_with_samples(ts, ddir) >= 2) {
                for (i = 0; i < ts->nr_clat_prio[ddir]; i++) {
-                       if (calc_lat(&ts->clat_prio[ddir][i].clat_stat, &min,
-                                    &max, &mean, &dev)) {
-                               char buf[64];
+                       char buf[64];
 
-                               snprintf(buf, sizeof(buf),
-                                        "%s prio %u/%u",
-                                        clat_type,
-                                        ts->clat_prio[ddir][i].ioprio >> 13,
-                                        ts->clat_prio[ddir][i].ioprio & 7);
-                               display_lat(buf, min, max, mean, dev, out);
-                       }
+                       if (!calc_lat(&ts->clat_prio[ddir][i].clat_stat, &min,
+                                     &max, &mean, &dev))
+                               continue;
+
+                       snprintf(buf, sizeof(buf),
+                                "%s prio %u/%u/%u",
+                                clat_type,
+                                ioprio_class(ts->clat_prio[ddir][i].ioprio),
+                                ioprio(ts->clat_prio[ddir][i].ioprio),
+                                ioprio_hint(ts->clat_prio[ddir][i].ioprio));
+                       display_lat(buf, min, max, mean, dev, out);
                }
        }
 
@@ -631,20 +634,23 @@ static void show_ddir_status(struct group_run_stats *rs, struct thread_stat *ts,
                /* Only print per prio stats if there are >= 2 prios with samples */
                if (get_nr_prios_with_samples(ts, ddir) >= 2) {
                        for (i = 0; i < ts->nr_clat_prio[ddir]; i++) {
-                               uint64_t prio_samples = ts->clat_prio[ddir][i].clat_stat.samples;
-
-                               if (prio_samples > 0) {
-                                       snprintf(prio_name, sizeof(prio_name),
-                                                "%s prio %u/%u (%.2f%% of IOs)",
-                                                clat_type,
-                                                ts->clat_prio[ddir][i].ioprio >> 13,
-                                                ts->clat_prio[ddir][i].ioprio & 7,
-                                                100. * (double) prio_samples / (double) samples);
-                                       show_clat_percentiles(ts->clat_prio[ddir][i].io_u_plat,
-                                                             prio_samples, ts->percentile_list,
-                                                             ts->percentile_precision,
-                                                             prio_name, out);
-                               }
+                               uint64_t prio_samples =
+                                       ts->clat_prio[ddir][i].clat_stat.samples;
+
+                               if (!prio_samples)
+                                       continue;
+
+                               snprintf(prio_name, sizeof(prio_name),
+                                        "%s prio %u/%u/%u (%.2f%% of IOs)",
+                                        clat_type,
+                                        ioprio_class(ts->clat_prio[ddir][i].ioprio),
+                                        ioprio(ts->clat_prio[ddir][i].ioprio),
+                                        ioprio_hint(ts->clat_prio[ddir][i].ioprio),
+                                        100. * (double) prio_samples / (double) samples);
+                               show_clat_percentiles(ts->clat_prio[ddir][i].io_u_plat,
+                                               prio_samples, ts->percentile_list,
+                                               ts->percentile_precision,
+                                               prio_name, out);
                        }
                }
        }
@@ -953,11 +959,13 @@ static void show_agg_stats(struct disk_util_agg *agg, int terse,
                return;
 
        if (!terse) {
-               log_buf(out, ", aggrios=%llu/%llu, aggrmerge=%llu/%llu, "
-                        "aggrticks=%llu/%llu, aggrin_queue=%llu, "
-                        "aggrutil=%3.2f%%",
+               log_buf(out, ", aggrios=%llu/%llu, aggsectors=%llu/%llu, "
+                        "aggrmerge=%llu/%llu, aggrticks=%llu/%llu, "
+                        "aggrin_queue=%llu, aggrutil=%3.2f%%",
                        (unsigned long long) agg->ios[0] / agg->slavecount,
                        (unsigned long long) agg->ios[1] / agg->slavecount,
+                       (unsigned long long) agg->sectors[0] / agg->slavecount,
+                       (unsigned long long) agg->sectors[1] / agg->slavecount,
                        (unsigned long long) agg->merges[0] / agg->slavecount,
                        (unsigned long long) agg->merges[1] / agg->slavecount,
                        (unsigned long long) agg->ticks[0] / agg->slavecount,
@@ -1026,11 +1034,14 @@ void print_disk_util(struct disk_util_stat *dus, struct disk_util_agg *agg,
                if (agg->slavecount)
                        log_buf(out, "  ");
 
-               log_buf(out, "  %s: ios=%llu/%llu, merge=%llu/%llu, "
-                        "ticks=%llu/%llu, in_queue=%llu, util=%3.2f%%",
+               log_buf(out, "  %s: ios=%llu/%llu, sectors=%llu/%llu, "
+                       "merge=%llu/%llu, ticks=%llu/%llu, in_queue=%llu, "
+                       "util=%3.2f%%",
                                dus->name,
                                (unsigned long long) dus->s.ios[0],
                                (unsigned long long) dus->s.ios[1],
+                               (unsigned long long) dus->s.sectors[0],
+                               (unsigned long long) dus->s.sectors[1],
                                (unsigned long long) dus->s.merges[0],
                                (unsigned long long) dus->s.merges[1],
                                (unsigned long long) dus->s.ticks[0],
@@ -1077,6 +1088,8 @@ void json_array_add_disk_util(struct disk_util_stat *dus,
        json_object_add_value_string(obj, "name", (const char *)dus->name);
        json_object_add_value_int(obj, "read_ios", dus->s.ios[0]);
        json_object_add_value_int(obj, "write_ios", dus->s.ios[1]);
+       json_object_add_value_int(obj, "read_sectors", dus->s.sectors[0]);
+       json_object_add_value_int(obj, "write_sectors", dus->s.sectors[1]);
        json_object_add_value_int(obj, "read_merges", dus->s.merges[0]);
        json_object_add_value_int(obj, "write_merges", dus->s.merges[1]);
        json_object_add_value_int(obj, "read_ticks", dus->s.ticks[0]);
@@ -1094,6 +1107,10 @@ void json_array_add_disk_util(struct disk_util_stat *dus,
                                agg->ios[0] / agg->slavecount);
        json_object_add_value_int(obj, "aggr_write_ios",
                                agg->ios[1] / agg->slavecount);
+       json_object_add_value_int(obj, "aggr_read_sectors",
+                               agg->sectors[0] / agg->slavecount);
+       json_object_add_value_int(obj, "aggr_write_sectors",
+                               agg->sectors[1] / agg->slavecount);
        json_object_add_value_int(obj, "aggr_read_merges",
                                agg->merges[0] / agg->slavecount);
        json_object_add_value_int(obj, "aggr_write_merge",
@@ -1507,22 +1524,26 @@ static void add_ddir_status_json(struct thread_stat *ts,
                json_object_add_value_array(dir_object, "prios", array);
 
                for (i = 0; i < ts->nr_clat_prio[ddir]; i++) {
-                       if (ts->clat_prio[ddir][i].clat_stat.samples > 0) {
-                               struct json_object *obj = json_create_object();
-                               unsigned long long class, level;
-
-                               class = ts->clat_prio[ddir][i].ioprio >> 13;
-                               json_object_add_value_int(obj, "prioclass", class);
-                               level = ts->clat_prio[ddir][i].ioprio & 7;
-                               json_object_add_value_int(obj, "prio", level);
-
-                               tmp_object = add_ddir_lat_json(ts,
-                                                              ts->clat_percentiles | ts->lat_percentiles,
-                                                              &ts->clat_prio[ddir][i].clat_stat,
-                                                              ts->clat_prio[ddir][i].io_u_plat);
-                               json_object_add_value_object(obj, obj_name, tmp_object);
-                               json_array_add_value_object(array, obj);
-                       }
+                       struct json_object *obj;
+
+                       if (!ts->clat_prio[ddir][i].clat_stat.samples)
+                               continue;
+
+                       obj = json_create_object();
+
+                       json_object_add_value_int(obj, "prioclass",
+                               ioprio_class(ts->clat_prio[ddir][i].ioprio));
+                       json_object_add_value_int(obj, "prio",
+                               ioprio(ts->clat_prio[ddir][i].ioprio));
+                       json_object_add_value_int(obj, "priohint",
+                               ioprio_hint(ts->clat_prio[ddir][i].ioprio));
+
+                       tmp_object = add_ddir_lat_json(ts,
+                                       ts->clat_percentiles | ts->lat_percentiles,
+                                       &ts->clat_prio[ddir][i].clat_stat,
+                                       ts->clat_prio[ddir][i].io_u_plat);
+                       json_object_add_value_object(obj, obj_name, tmp_object);
+                       json_array_add_value_object(array, obj);
                }
        }
 
@@ -1691,6 +1712,7 @@ static struct json_object *show_thread_status_json(struct thread_stat *ts,
        root = json_create_object();
        json_object_add_value_string(root, "jobname", ts->name);
        json_object_add_value_int(root, "groupid", ts->groupid);
+       json_object_add_value_int(root, "job_start", ts->job_start);
        json_object_add_value_int(root, "error", ts->error);
 
        /* ETA Info */
@@ -1698,6 +1720,7 @@ static struct json_object *show_thread_status_json(struct thread_stat *ts,
        if (je) {
                json_object_add_value_int(root, "eta", je->eta_sec);
                json_object_add_value_int(root, "elapsed", je->elapsed_sec);
+               free(je);
        }
 
        if (opt_list)
@@ -1867,6 +1890,7 @@ static struct json_object *show_thread_status_json(struct thread_stat *ts,
                struct json_array *iops, *bw;
                int j, k, l;
                char ss_buf[64];
+               int intervals = ts->ss_dur / (ss_check_interval / 1000L);
 
                snprintf(ss_buf, sizeof(ss_buf), "%s%s:%f%s",
                        ts->ss_state & FIO_SS_IOPS ? "iops" : "bw",
@@ -1900,9 +1924,9 @@ static struct json_object *show_thread_status_json(struct thread_stat *ts,
                if ((ts->ss_state & FIO_SS_ATTAINED) || !(ts->ss_state & FIO_SS_BUFFER_FULL))
                        j = ts->ss_head;
                else
-                       j = ts->ss_head == 0 ? ts->ss_dur - 1 : ts->ss_head - 1;
-               for (l = 0; l < ts->ss_dur; l++) {
-                       k = (j + l) % ts->ss_dur;
+                       j = ts->ss_head == 0 ? intervals - 1 : ts->ss_head - 1;
+               for (l = 0; l < intervals; l++) {
+                       k = (j + l) % intervals;
                        json_array_add_value_int(bw, ts->ss_bw_data[k]);
                        json_array_add_value_int(iops, ts->ss_iops_data[k]);
                }
@@ -2041,6 +2065,9 @@ void free_clat_prio_stats(struct thread_stat *ts)
 {
        enum fio_ddir ddir;
 
+       if (!ts)
+               return;
+
        for (ddir = 0; ddir < DDIR_RWDIR_CNT; ddir++) {
                sfree(ts->clat_prio[ddir]);
                ts->clat_prio[ddir] = NULL;
@@ -2361,7 +2388,6 @@ void init_thread_stat(struct thread_stat *ts)
 
 static void init_per_prio_stats(struct thread_stat *threadstats, int nr_ts)
 {
-       struct thread_data *td;
        struct thread_stat *ts;
        int i, j, last_ts, idx;
        enum fio_ddir ddir;
@@ -2375,7 +2401,7 @@ static void init_per_prio_stats(struct thread_stat *threadstats, int nr_ts)
         * store a 1 in ts->disable_prio_stat, and then do an additional
         * loop at the end where we invert the ts->disable_prio_stat values.
         */
-       for_each_td(td, i) {
+       for_each_td(td) {
                if (!td->o.stats)
                        continue;
                if (idx &&
@@ -2402,7 +2428,7 @@ static void init_per_prio_stats(struct thread_stat *threadstats, int nr_ts)
                }
 
                idx++;
-       }
+       } end_for_each();
 
        /* Loop through all dst threadstats and fixup the values. */
        for (i = 0; i < nr_ts; i++) {
@@ -2414,7 +2440,6 @@ static void init_per_prio_stats(struct thread_stat *threadstats, int nr_ts)
 void __show_run_stats(void)
 {
        struct group_run_stats *runstats, *rs;
-       struct thread_data *td;
        struct thread_stat *threadstats, *ts;
        int i, j, k, nr_ts, last_ts, idx;
        bool kb_base_warned = false;
@@ -2435,7 +2460,7 @@ void __show_run_stats(void)
         */
        nr_ts = 0;
        last_ts = -1;
-       for_each_td(td, i) {
+       for_each_td(td) {
                if (!td->o.group_reporting) {
                        nr_ts++;
                        continue;
@@ -2447,7 +2472,7 @@ void __show_run_stats(void)
 
                last_ts = td->groupid;
                nr_ts++;
-       }
+       } end_for_each();
 
        threadstats = malloc(nr_ts * sizeof(struct thread_stat));
        opt_lists = malloc(nr_ts * sizeof(struct flist_head *));
@@ -2462,7 +2487,7 @@ void __show_run_stats(void)
        j = 0;
        last_ts = -1;
        idx = 0;
-       for_each_td(td, i) {
+       for_each_td(td) {
                if (!td->o.stats)
                        continue;
                if (idx && (!td->o.group_reporting ||
@@ -2502,6 +2527,7 @@ void __show_run_stats(void)
                         */
                        ts->thread_number = td->thread_number;
                        ts->groupid = td->groupid;
+                       ts->job_start = td->job_start;
 
                        /*
                         * first pid in group, not very useful...
@@ -2564,7 +2590,7 @@ void __show_run_stats(void)
                }
                else
                        ts->ss_dur = ts->ss_state = 0;
-       }
+       } end_for_each();
 
        for (i = 0; i < nr_ts; i++) {
                unsigned long long bw;
@@ -2717,33 +2743,34 @@ void __show_run_stats(void)
 
 int __show_running_run_stats(void)
 {
-       struct thread_data *td;
        unsigned long long *rt;
        struct timespec ts;
-       int i;
 
        fio_sem_down(stat_sem);
 
        rt = malloc(thread_number * sizeof(unsigned long long));
        fio_gettime(&ts, NULL);
 
-       for_each_td(td, i) {
+       for_each_td(td) {
+               if (td->runstate >= TD_EXITED)
+                       continue;
+
                td->update_rusage = 1;
                for_each_rw_ddir(ddir) {
                        td->ts.io_bytes[ddir] = td->io_bytes[ddir];
                }
                td->ts.total_run_time = mtime_since(&td->epoch, &ts);
 
-               rt[i] = mtime_since(&td->start, &ts);
+               rt[__td_index] = mtime_since(&td->start, &ts);
                if (td_read(td) && td->ts.io_bytes[DDIR_READ])
-                       td->ts.runtime[DDIR_READ] += rt[i];
+                       td->ts.runtime[DDIR_READ] += rt[__td_index];
                if (td_write(td) && td->ts.io_bytes[DDIR_WRITE])
-                       td->ts.runtime[DDIR_WRITE] += rt[i];
+                       td->ts.runtime[DDIR_WRITE] += rt[__td_index];
                if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM])
-                       td->ts.runtime[DDIR_TRIM] += rt[i];
-       }
+                       td->ts.runtime[DDIR_TRIM] += rt[__td_index];
+       } end_for_each();
 
-       for_each_td(td, i) {
+       for_each_td(td) {
                if (td->runstate >= TD_EXITED)
                        continue;
                if (td->rusage_sem) {
@@ -2751,18 +2778,21 @@ int __show_running_run_stats(void)
                        fio_sem_down(td->rusage_sem);
                }
                td->update_rusage = 0;
-       }
+       } end_for_each();
 
        __show_run_stats();
 
-       for_each_td(td, i) {
+       for_each_td(td) {
+               if (td->runstate >= TD_EXITED)
+                       continue;
+
                if (td_read(td) && td->ts.io_bytes[DDIR_READ])
-                       td->ts.runtime[DDIR_READ] -= rt[i];
+                       td->ts.runtime[DDIR_READ] -= rt[__td_index];
                if (td_write(td) && td->ts.io_bytes[DDIR_WRITE])
-                       td->ts.runtime[DDIR_WRITE] -= rt[i];
+                       td->ts.runtime[DDIR_WRITE] -= rt[__td_index];
                if (td_trim(td) && td->ts.io_bytes[DDIR_TRIM])
-                       td->ts.runtime[DDIR_TRIM] -= rt[i];
-       }
+                       td->ts.runtime[DDIR_TRIM] -= rt[__td_index];
+       } end_for_each();
 
        free(rt);
        fio_sem_up(stat_sem);
@@ -2859,7 +2889,10 @@ static struct io_logs *get_new_log(struct io_log *iolog)
         * forever
         */
        if (!iolog->cur_log_max) {
-               new_samples = iolog->td->o.log_entries;
+               if (iolog->td)
+                       new_samples = iolog->td->o.log_entries;
+               else
+                       new_samples = DEF_LOG_ENTRIES;
        } else {
                new_samples = iolog->cur_log_max * 2;
                if (new_samples > MAX_LOG_ENTRIES)
@@ -3017,7 +3050,9 @@ static void __add_log_sample(struct io_log *iolog, union io_sample_data data,
                s = get_sample(iolog, cur_log, cur_log->nr_samples);
 
                s->data = data;
-               s->time = t + (iolog->td ? iolog->td->unix_epoch : 0);
+               s->time = t;
+               if (iolog->td && iolog->td->o.log_alternate_epoch)
+                       s->time += iolog->td->alternate_epoch;
                io_sample_set_ddir(iolog, s, ddir);
                s->bs = bs;
                s->priority = priority;
@@ -3114,7 +3149,7 @@ void reset_io_stats(struct thread_data *td)
 }
 
 static void __add_stat_to_log(struct io_log *iolog, enum fio_ddir ddir,
-                             unsigned long elapsed, bool log_max)
+                             unsigned long elapsed, int log_max)
 {
        /*
         * Note an entry in the log. Use the mean from the logged samples,
@@ -3124,10 +3159,16 @@ static void __add_stat_to_log(struct io_log *iolog, enum fio_ddir ddir,
        if (iolog->avg_window[ddir].samples) {
                union io_sample_data data;
 
-               if (log_max)
-                       data.val = iolog->avg_window[ddir].max_val;
-               else
-                       data.val = iolog->avg_window[ddir].mean.u.f + 0.50;
+               if (log_max == IO_LOG_SAMPLE_AVG) {
+                       data.val.val0 = iolog->avg_window[ddir].mean.u.f + 0.50;
+                       data.val.val1 = 0;
+               } else if (log_max == IO_LOG_SAMPLE_MAX) {
+                       data.val.val0 = iolog->avg_window[ddir].max_val;
+                       data.val.val1 = 0;
+               } else {
+                       data.val.val0 = iolog->avg_window[ddir].mean.u.f + 0.50;
+                       data.val.val1 = iolog->avg_window[ddir].max_val;
+               }
 
                __add_log_sample(iolog, data, ddir, 0, elapsed, 0, 0);
        }
@@ -3136,7 +3177,7 @@ static void __add_stat_to_log(struct io_log *iolog, enum fio_ddir ddir,
 }
 
 static void _add_stat_to_log(struct io_log *iolog, unsigned long elapsed,
-                            bool log_max)
+                            int log_max)
 {
        enum fio_ddir ddir;
 
@@ -3170,7 +3211,7 @@ static unsigned long add_log_sample(struct thread_data *td,
         * Add the sample. If the time period has passed, then
         * add that entry to the log and clear.
         */
-       add_stat_sample(&iolog->avg_window[ddir], data.val);
+       add_stat_sample(&iolog->avg_window[ddir], data.val.val0);
 
        /*
         * If period hasn't passed, adding the above sample is all we
@@ -3186,7 +3227,7 @@ static unsigned long add_log_sample(struct thread_data *td,
                        return diff;
        }
 
-       __add_stat_to_log(iolog, ddir, elapsed, td->o.log_max != 0);
+       __add_stat_to_log(iolog, ddir, elapsed, td->o.log_max);
 
        iolog->avg_last[ddir] = elapsed - (elapsed % iolog->avg_msec);
 
@@ -3200,15 +3241,15 @@ void finalize_logs(struct thread_data *td, bool unit_logs)
        elapsed = mtime_since_now(&td->epoch);
 
        if (td->clat_log && unit_logs)
-               _add_stat_to_log(td->clat_log, elapsed, td->o.log_max != 0);
+               _add_stat_to_log(td->clat_log, elapsed, td->o.log_max);
        if (td->slat_log && unit_logs)
-               _add_stat_to_log(td->slat_log, elapsed, td->o.log_max != 0);
+               _add_stat_to_log(td->slat_log, elapsed, td->o.log_max);
        if (td->lat_log && unit_logs)
-               _add_stat_to_log(td->lat_log, elapsed, td->o.log_max != 0);
+               _add_stat_to_log(td->lat_log, elapsed, td->o.log_max);
        if (td->bw_log && (unit_logs == per_unit_log(td->bw_log)))
-               _add_stat_to_log(td->bw_log, elapsed, td->o.log_max != 0);
+               _add_stat_to_log(td->bw_log, elapsed, td->o.log_max);
        if (td->iops_log && (unit_logs == per_unit_log(td->iops_log)))
-               _add_stat_to_log(td->iops_log, elapsed, td->o.log_max != 0);
+               _add_stat_to_log(td->iops_log, elapsed, td->o.log_max);
 }
 
 void add_agg_sample(union io_sample_data data, enum fio_ddir ddir,
@@ -3535,26 +3576,38 @@ static int add_iops_samples(struct thread_data *td, struct timespec *t)
                                td->ts.iops_stat, td->iops_log, false);
 }
 
+static bool td_in_logging_state(struct thread_data *td)
+{
+       if (in_ramp_time(td))
+               return false;
+
+       switch(td->runstate) {
+       case TD_RUNNING:
+       case TD_VERIFYING:
+       case TD_FINISHING:
+       case TD_EXITED:
+               return true;
+       default:
+               return false;
+       }
+}
+
 /*
  * Returns msecs to next event
  */
 int calc_log_samples(void)
 {
-       struct thread_data *td;
        unsigned int next = ~0U, tmp = 0, next_mod = 0, log_avg_msec_min = -1U;
        struct timespec now;
-       int i;
        long elapsed_time = 0;
 
-       fio_gettime(&now, NULL);
-
-       for_each_td(td, i) {
-               elapsed_time = mtime_since_now(&td->epoch);
+       for_each_td(td) {
+               fio_gettime(&now, NULL);
+               elapsed_time = mtime_since(&td->epoch, &now);
 
                if (!td->o.stats)
                        continue;
-               if (in_ramp_time(td) ||
-                   !(td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING)) {
+               if (!td_in_logging_state(td)) {
                        next = min(td->o.iops_avg_time, td->o.bw_avg_time);
                        continue;
                }
@@ -3575,7 +3628,7 @@ int calc_log_samples(void)
 
                if (tmp < next)
                        next = tmp;
-       }
+       } end_for_each();
 
        /* if log_avg_msec_min has not been changed, set it to 0 */
        if (log_avg_msec_min == -1U)