stat: Remove more superfluous casts
[fio.git] / stat.c
diff --git a/stat.c b/stat.c
index 33637900df622c2fae42841f19fc0ed5cd9ebdc1..a3736c9a8c67554cb55d30ee251084494d23e048 100644 (file)
--- a/stat.c
+++ b/stat.c
@@ -15,6 +15,7 @@
 #include "helper_thread.h"
 #include "smalloc.h"
 #include "zbd.h"
+#include "oslib/asprintf.h"
 
 #define LOG_MSEC_SLACK 1
 
@@ -158,7 +159,7 @@ unsigned int calc_clat_percentiles(uint64_t *io_u_plat, unsigned long long nr,
         * isn't a worry. Also note that this does not work for NaN values.
         */
        if (len > 1)
-               qsort((void *)plist, len, sizeof(plist[0]), double_cmp);
+               qsort(plist, len, sizeof(plist[0]), double_cmp);
 
        ovals = malloc(len * sizeof(*ovals));
        if (!ovals)
@@ -683,7 +684,7 @@ static int calc_block_percentiles(int nr_block_infos, uint32_t *block_infos,
         * isn't a worry. Also note that this does not work for NaN values.
         */
        if (len > 1)
-               qsort((void *)plist, len, sizeof(plist[0]), double_cmp);
+               qsort(plist, len, sizeof(plist[0]), double_cmp);
 
        /* Start only after the uninit entries end */
        for (nr_uninit = 0;
@@ -785,6 +786,218 @@ static void show_ss_normal(struct thread_stat *ts, struct buf_output *out)
        free(p2);
 }
 
+static void show_agg_stats(struct disk_util_agg *agg, int terse,
+                          struct buf_output *out)
+{
+       if (!agg->slavecount)
+               return;
+
+       if (!terse) {
+               log_buf(out, ", aggrios=%llu/%llu, aggrmerge=%llu/%llu, "
+                        "aggrticks=%llu/%llu, aggrin_queue=%llu, "
+                        "aggrutil=%3.2f%%",
+                       (unsigned long long) agg->ios[0] / agg->slavecount,
+                       (unsigned long long) agg->ios[1] / agg->slavecount,
+                       (unsigned long long) agg->merges[0] / agg->slavecount,
+                       (unsigned long long) agg->merges[1] / agg->slavecount,
+                       (unsigned long long) agg->ticks[0] / agg->slavecount,
+                       (unsigned long long) agg->ticks[1] / agg->slavecount,
+                       (unsigned long long) agg->time_in_queue / agg->slavecount,
+                       agg->max_util.u.f);
+       } else {
+               log_buf(out, ";slaves;%llu;%llu;%llu;%llu;%llu;%llu;%llu;%3.2f%%",
+                       (unsigned long long) agg->ios[0] / agg->slavecount,
+                       (unsigned long long) agg->ios[1] / agg->slavecount,
+                       (unsigned long long) agg->merges[0] / agg->slavecount,
+                       (unsigned long long) agg->merges[1] / agg->slavecount,
+                       (unsigned long long) agg->ticks[0] / agg->slavecount,
+                       (unsigned long long) agg->ticks[1] / agg->slavecount,
+                       (unsigned long long) agg->time_in_queue / agg->slavecount,
+                       agg->max_util.u.f);
+       }
+}
+
+static void aggregate_slaves_stats(struct disk_util *masterdu)
+{
+       struct disk_util_agg *agg = &masterdu->agg;
+       struct disk_util_stat *dus;
+       struct flist_head *entry;
+       struct disk_util *slavedu;
+       double util;
+
+       flist_for_each(entry, &masterdu->slaves) {
+               slavedu = flist_entry(entry, struct disk_util, slavelist);
+               dus = &slavedu->dus;
+               agg->ios[0] += dus->s.ios[0];
+               agg->ios[1] += dus->s.ios[1];
+               agg->merges[0] += dus->s.merges[0];
+               agg->merges[1] += dus->s.merges[1];
+               agg->sectors[0] += dus->s.sectors[0];
+               agg->sectors[1] += dus->s.sectors[1];
+               agg->ticks[0] += dus->s.ticks[0];
+               agg->ticks[1] += dus->s.ticks[1];
+               agg->time_in_queue += dus->s.time_in_queue;
+               agg->slavecount++;
+
+               util = (double) (100 * dus->s.io_ticks / (double) slavedu->dus.s.msec);
+               /* System utilization is the utilization of the
+                * component with the highest utilization.
+                */
+               if (util > agg->max_util.u.f)
+                       agg->max_util.u.f = util;
+
+       }
+
+       if (agg->max_util.u.f > 100.0)
+               agg->max_util.u.f = 100.0;
+}
+
+void print_disk_util(struct disk_util_stat *dus, struct disk_util_agg *agg,
+                    int terse, struct buf_output *out)
+{
+       double util = 0;
+
+       if (dus->s.msec)
+               util = (double) 100 * dus->s.io_ticks / (double) dus->s.msec;
+       if (util > 100.0)
+               util = 100.0;
+
+       if (!terse) {
+               if (agg->slavecount)
+                       log_buf(out, "  ");
+
+               log_buf(out, "  %s: ios=%llu/%llu, merge=%llu/%llu, "
+                        "ticks=%llu/%llu, in_queue=%llu, util=%3.2f%%",
+                               dus->name,
+                               (unsigned long long) dus->s.ios[0],
+                               (unsigned long long) dus->s.ios[1],
+                               (unsigned long long) dus->s.merges[0],
+                               (unsigned long long) dus->s.merges[1],
+                               (unsigned long long) dus->s.ticks[0],
+                               (unsigned long long) dus->s.ticks[1],
+                               (unsigned long long) dus->s.time_in_queue,
+                               util);
+       } else {
+               log_buf(out, ";%s;%llu;%llu;%llu;%llu;%llu;%llu;%llu;%3.2f%%",
+                               dus->name,
+                               (unsigned long long) dus->s.ios[0],
+                               (unsigned long long) dus->s.ios[1],
+                               (unsigned long long) dus->s.merges[0],
+                               (unsigned long long) dus->s.merges[1],
+                               (unsigned long long) dus->s.ticks[0],
+                               (unsigned long long) dus->s.ticks[1],
+                               (unsigned long long) dus->s.time_in_queue,
+                               util);
+       }
+
+       /*
+        * If the device has slaves, aggregate the stats for
+        * those slave devices also.
+        */
+       show_agg_stats(agg, terse, out);
+
+       if (!terse)
+               log_buf(out, "\n");
+}
+
+void json_array_add_disk_util(struct disk_util_stat *dus,
+               struct disk_util_agg *agg, struct json_array *array)
+{
+       struct json_object *obj;
+       double util = 0;
+
+       if (dus->s.msec)
+               util = (double) 100 * dus->s.io_ticks / (double) dus->s.msec;
+       if (util > 100.0)
+               util = 100.0;
+
+       obj = json_create_object();
+       json_array_add_value_object(array, obj);
+
+       json_object_add_value_string(obj, "name", dus->name);
+       json_object_add_value_int(obj, "read_ios", dus->s.ios[0]);
+       json_object_add_value_int(obj, "write_ios", dus->s.ios[1]);
+       json_object_add_value_int(obj, "read_merges", dus->s.merges[0]);
+       json_object_add_value_int(obj, "write_merges", dus->s.merges[1]);
+       json_object_add_value_int(obj, "read_ticks", dus->s.ticks[0]);
+       json_object_add_value_int(obj, "write_ticks", dus->s.ticks[1]);
+       json_object_add_value_int(obj, "in_queue", dus->s.time_in_queue);
+       json_object_add_value_float(obj, "util", util);
+
+       /*
+        * If the device has slaves, aggregate the stats for
+        * those slave devices also.
+        */
+       if (!agg->slavecount)
+               return;
+       json_object_add_value_int(obj, "aggr_read_ios",
+                               agg->ios[0] / agg->slavecount);
+       json_object_add_value_int(obj, "aggr_write_ios",
+                               agg->ios[1] / agg->slavecount);
+       json_object_add_value_int(obj, "aggr_read_merges",
+                               agg->merges[0] / agg->slavecount);
+       json_object_add_value_int(obj, "aggr_write_merge",
+                               agg->merges[1] / agg->slavecount);
+       json_object_add_value_int(obj, "aggr_read_ticks",
+                               agg->ticks[0] / agg->slavecount);
+       json_object_add_value_int(obj, "aggr_write_ticks",
+                               agg->ticks[1] / agg->slavecount);
+       json_object_add_value_int(obj, "aggr_in_queue",
+                               agg->time_in_queue / agg->slavecount);
+       json_object_add_value_float(obj, "aggr_util", agg->max_util.u.f);
+}
+
+static void json_object_add_disk_utils(struct json_object *obj,
+                                      struct flist_head *head)
+{
+       struct json_array *array = json_create_array();
+       struct flist_head *entry;
+       struct disk_util *du;
+
+       json_object_add_value_array(obj, "disk_util", array);
+
+       flist_for_each(entry, head) {
+               du = flist_entry(entry, struct disk_util, list);
+
+               aggregate_slaves_stats(du);
+               json_array_add_disk_util(&du->dus, &du->agg, array);
+       }
+}
+
+void show_disk_util(int terse, struct json_object *parent,
+                   struct buf_output *out)
+{
+       struct flist_head *entry;
+       struct disk_util *du;
+       bool do_json;
+
+       if (!is_running_backend())
+               return;
+
+       if (flist_empty(&disk_list)) {
+               return;
+       }
+
+       if ((output_format & FIO_OUTPUT_JSON) && parent)
+               do_json = true;
+       else
+               do_json = false;
+
+       if (!terse && !do_json)
+               log_buf(out, "\nDisk stats (read/write):\n");
+
+       if (do_json)
+               json_object_add_disk_utils(parent, &disk_list);
+       else if (output_format & ~(FIO_OUTPUT_JSON | FIO_OUTPUT_JSON_PLUS)) {
+               flist_for_each(entry, &disk_list) {
+                       du = flist_entry(entry, struct disk_util, list);
+
+                       aggregate_slaves_stats(du);
+                       print_disk_util(&du->dus, &du->agg, terse, out);
+               }
+       }
+}
+
 static void show_thread_status_normal(struct thread_stat *ts,
                                      struct group_run_stats *rs,
                                      struct buf_output *out)
@@ -1095,7 +1308,7 @@ static void add_ddir_status_json(struct thread_stat *ts,
                json_object_add_value_object(tmp_object, "percentile", percentile_object);
        for (i = 0; i < len; i++) {
                snprintf(buf, sizeof(buf), "%f", ts->percentile_list[i].u.f);
-               json_object_add_value_int(percentile_object, (const char *)buf, ovals[i]);
+               json_object_add_value_int(percentile_object, buf, ovals[i]);
        }
 
        if (output_format & FIO_OUTPUT_JSON_PLUS) {
@@ -1107,12 +1320,12 @@ static void add_ddir_status_json(struct thread_stat *ts,
                        if (ddir_rw(ddir)) {
                                if (ts->io_u_plat[ddir][i]) {
                                        snprintf(buf, sizeof(buf), "%llu", plat_idx_to_val(i));
-                                       json_object_add_value_int(clat_bins_object, (const char *)buf, ts->io_u_plat[ddir][i]);
+                                       json_object_add_value_int(clat_bins_object, buf, ts->io_u_plat[ddir][i]);
                                }
                        } else {
                                if (ts->io_u_sync_plat[i]) {
                                        snprintf(buf, sizeof(buf), "%llu", plat_idx_to_val(i));
-                                       json_object_add_value_int(clat_bins_object, (const char *)buf, ts->io_u_sync_plat[i]);
+                                       json_object_add_value_int(clat_bins_object, buf, ts->io_u_sync_plat[i]);
                                }
                        }
                }
@@ -1448,7 +1661,7 @@ static struct json_object *show_thread_status_json(struct thread_stat *ts,
                                snprintf(buf, sizeof(buf), "%f",
                                         ts->percentile_list[i].u.f);
                                json_object_add_value_int(percentile_object,
-                                                         (const char *)buf,
+                                                         buf,
                                                          percentiles[i]);
                        }
 
@@ -2123,6 +2336,9 @@ static int check_status_file(void)
        }
        if (temp_dir == NULL)
                temp_dir = "/tmp";
+#ifdef __COVERITY__
+       __coverity_tainted_data_sanitize__(temp_dir);
+#endif
 
        snprintf(fio_status_file_path, sizeof(fio_status_file_path), "%s/%s", temp_dir, FIO_STATUS_FILE);
 
@@ -2580,7 +2796,7 @@ void add_clat_sample(struct thread_data *td, enum fio_ddir ddir,
                        io_u_plat = (uint64_t *) td->ts.io_u_plat[ddir];
                        dst = malloc(sizeof(struct io_u_plat_entry));
                        memcpy(&(dst->io_u_plat), io_u_plat,
-                               FIO_IO_U_PLAT_NR * sizeof(unsigned int));
+                               FIO_IO_U_PLAT_NR * sizeof(uint64_t));
                        flist_add(&dst->list, &hw->list);
                        __add_log_sample(iolog, sample_plat(dst), ddir, bs,
                                                elapsed, offset);