Add a simple json encoder and use it to print fio output in json format
[fio.git] / diskutil.c
index 4ddaa33355bdd7a4093ec2aaa056f94617c228ed..d2c0b976f1c89208cf30cc99feed81e438a03b8c 100644 (file)
@@ -14,6 +14,9 @@
 static int last_majdev, last_mindev;
 static struct disk_util *last_du;
 
+static struct fio_mutex *disk_util_mutex;
+static int disk_util_exit;
+
 FLIST_HEAD(disk_list);
 
 static struct disk_util *__init_per_file_disk_util(struct thread_data *td,
@@ -31,7 +34,7 @@ static void disk_util_free(struct disk_util *du)
                flist_del(&slave->slavelist);
                slave->users--;
        }
-       
+
        fio_mutex_remove(du->lock);
        sfree(du);
 }
@@ -102,17 +105,26 @@ static void update_io_tick_disk(struct disk_util *du)
        memcpy(ldus, &__dus, sizeof(__dus));
 }
 
-void update_io_ticks(void)
+int update_io_ticks(void)
 {
        struct flist_head *entry;
        struct disk_util *du;
+       int ret = 0;
 
        dprint(FD_DISKUTIL, "update io ticks\n");
 
-       flist_for_each(entry, &disk_list) {
-               du = flist_entry(entry, struct disk_util, list);
-               update_io_tick_disk(du);
-       }
+       fio_mutex_down(disk_util_mutex);
+
+       if (!disk_util_exit) {
+               flist_for_each(entry, &disk_list) {
+                       du = flist_entry(entry, struct disk_util, list);
+                       update_io_tick_disk(du);
+               }
+       } else
+               ret = 1;
+
+       fio_mutex_up(disk_util_mutex);
+       return ret;
 }
 
 static struct disk_util *disk_util_exists(int major, int minor)
@@ -120,13 +132,18 @@ static struct disk_util *disk_util_exists(int major, int minor)
        struct flist_head *entry;
        struct disk_util *du;
 
+       fio_mutex_down(disk_util_mutex);
+
        flist_for_each(entry, &disk_list) {
                du = flist_entry(entry, struct disk_util, list);
 
-               if (major == du->major && minor == du->minor)
+               if (major == du->major && minor == du->minor) {
+                       fio_mutex_up(disk_util_mutex);
                        return du;
+               }
        }
 
+       fio_mutex_up(disk_util_mutex);
        return NULL;
 }
 
@@ -215,7 +232,7 @@ static void find_add_disk_slaves(struct thread_data *td, char *path,
                    !strcmp(dirent->d_name, ".."))
                        continue;
 
-               sprintf(temppath, "%s/%s", slavesdir, dirent->d_name);
+               sprintf(temppath, "%s%s%s", slavesdir, FIO_OS_PATH_SEPARATOR, dirent->d_name);
                /* Can we always assume that the slaves device entries
                 * are links to the real directories for the slave
                 * devices?
@@ -240,7 +257,7 @@ static void find_add_disk_slaves(struct thread_data *td, char *path,
                if (slavedu)
                        continue;
 
-               sprintf(temppath, "%s/%s", slavesdir, slavepath);
+               sprintf(temppath, "%s%s%s", slavesdir, FIO_OS_PATH_SEPARATOR, slavepath);
                __init_per_file_disk_util(td, majdev, mindev, temppath);
                slavedu = disk_util_exists(majdev, mindev);
 
@@ -273,9 +290,11 @@ static struct disk_util *disk_util_add(struct thread_data *td, int majdev,
        du->minor = mindev;
        INIT_FLIST_HEAD(&du->slavelist);
        INIT_FLIST_HEAD(&du->slaves);
-       du->lock = fio_mutex_init(1);
+       du->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
        du->users = 0;
 
+       fio_mutex_down(disk_util_mutex);
+
        flist_for_each(entry, &disk_list) {
                __du = flist_entry(entry, struct disk_util, list);
 
@@ -283,6 +302,7 @@ static struct disk_util *disk_util_add(struct thread_data *td, int majdev,
 
                if (!strcmp((char *) du->dus.name, (char *) __du->dus.name)) {
                        disk_util_free(du);
+                       fio_mutex_up(disk_util_mutex);
                        return __du;
                }
        }
@@ -293,6 +313,8 @@ static struct disk_util *disk_util_add(struct thread_data *td, int majdev,
        get_io_ticks(du, &du->last_dus);
 
        flist_add_tail(&du->list, &disk_list);
+       fio_mutex_up(disk_util_mutex);
+
        find_add_disk_slaves(td, path, du);
        return du;
 }
@@ -327,7 +349,7 @@ static int find_block_dir(int majdev, int mindev, char *path, int link_ok)
                if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
                        continue;
 
-               sprintf(full_path, "%s/%s", path, dir->d_name);
+               sprintf(full_path, "%s%s%s", path, FIO_OS_PATH_SEPARATOR, dir->d_name);
 
                if (!strcmp(dir->d_name, "dev")) {
                        if (!check_dev_match(majdev, mindev, full_path)) {
@@ -470,7 +492,7 @@ static void show_agg_stats(struct disk_util_agg *agg, int terse)
                                agg->time_in_queue / agg->slavecount,
                                agg->max_util.u.f);
        } else {
-               log_info("slaves;%u;%u;%u;%u;%u;%u;%u;%3.2f%%",
+               log_info(";slaves;%u;%u;%u;%u;%u;%u;%u;%3.2f%%",
                                agg->ios[0] / agg->slavecount,
                                agg->ios[1] / agg->slavecount,
                                agg->merges[0] / agg->slavecount,
@@ -521,6 +543,11 @@ void free_disk_util(void)
 {
        struct disk_util *du;
 
+       disk_util_exit = 1;
+       wait_for_disk_thread_exit();
+
+       fio_mutex_down(disk_util_mutex);
+
        while (!flist_empty(&disk_list)) {
                du = flist_entry(disk_list.next, struct disk_util, list);
                flist_del(&du->list);
@@ -528,6 +555,8 @@ void free_disk_util(void)
        }
 
        last_majdev = last_mindev = -1;
+       fio_mutex_up(disk_util_mutex);
+       fio_mutex_remove(disk_util_mutex);
 }
 
 void print_disk_util(struct disk_util_stat *dus, struct disk_util_agg *agg,
@@ -540,10 +569,10 @@ void print_disk_util(struct disk_util_stat *dus, struct disk_util_agg *agg,
        if (util > 100.0)
                util = 100.0;
 
-       if (agg->slavecount)
-               log_info("  ");
-
        if (!terse) {
+               if (agg->slavecount)
+                       log_info("  ");
+
                log_info("  %s: ios=%u/%u, merge=%u/%u, ticks=%u/%u, "
                         "in_queue=%u, util=%3.2f%%", dus->name,
                                        dus->ios[0], dus->ios[1],
@@ -562,28 +591,96 @@ void print_disk_util(struct disk_util_stat *dus, struct disk_util_agg *agg,
         * If the device has slaves, aggregate the stats for
         * those slave devices also.
         */
-       if (agg->slavecount)
-               show_agg_stats(agg, terse);
+       show_agg_stats(agg, terse);
 
        if (!terse)
                log_info("\n");
 }
 
-void show_disk_util(int terse)
+static void print_disk_util_json(struct disk_util *du, struct json_array *array)
+{
+       double util = 0;
+       struct disk_util_stat *dus = &du->dus;
+       struct disk_util_agg *agg = &du->agg;
+       struct json_object *obj;
+
+       obj = json_create_object();
+       json_array_add_value_object(array, obj);
+
+       if (dus->msec)
+               util = (double) 100 * dus->io_ticks / (double) dus->msec;
+       if (util > 100.0)
+               util = 100.0;
+
+
+       json_object_add_value_string(obj, "name", dus->name);
+       json_object_add_value_int(obj, "read_ios", dus->ios[0]);
+       json_object_add_value_int(obj, "write_ios", dus->ios[1]);
+       json_object_add_value_int(obj, "read_merges", dus->merges[0]);
+       json_object_add_value_int(obj, "write_merges", dus->merges[1]);
+       json_object_add_value_int(obj, "read_ticks", dus->ticks[0]);
+       json_object_add_value_int(obj, "write_ticks", dus->ticks[1]);
+       json_object_add_value_int(obj, "in_queue", dus->time_in_queue);
+       json_object_add_value_float(obj, "util", util);
+
+       /*
+        * If the device has slaves, aggregate the stats for
+        * those slave devices also.
+        */
+       if (!agg->slavecount)
+               return;
+       json_object_add_value_int(obj, "aggr_read_ios",
+                               agg->ios[0] / agg->slavecount);
+       json_object_add_value_int(obj, "aggr_write_ios",
+                               agg->ios[1] / agg->slavecount);
+       json_object_add_value_int(obj, "aggr_read_merges",
+                               agg->merges[0] / agg->slavecount);
+       json_object_add_value_int(obj, "aggr_write_merge",
+                               agg->merges[1] / agg->slavecount);
+       json_object_add_value_int(obj, "aggr_read_ticks",
+                               agg->ticks[0] / agg->slavecount);
+       json_object_add_value_int(obj, "aggr_write_ticks",
+                               agg->ticks[1] / agg->slavecount);
+       json_object_add_value_int(obj, "aggr_in_queue",
+                               agg->time_in_queue / agg->slavecount);
+       json_object_add_value_float(obj, "aggr_util", agg->max_util.u.f);
+}
+
+void show_disk_util(int terse, struct json_object *parent)
 {
        struct flist_head *entry;
        struct disk_util *du;
+       struct json_array *array = NULL;
+
+       fio_mutex_down(disk_util_mutex);
 
-       if (flist_empty(&disk_list))
+       if (flist_empty(&disk_list)) {
+               fio_mutex_up(disk_util_mutex);
                return;
+       }
 
        if (!terse)
                log_info("\nDisk stats (read/write):\n");
 
+       if (terse && terse_version == 4) {
+               array = json_create_array();
+               json_object_add_value_array(parent, "disk_util", array);
+       }
+
        flist_for_each(entry, &disk_list) {
                du = flist_entry(entry, struct disk_util, list);
 
                aggregate_slaves_stats(du);
-               print_disk_util(&du->dus, &du->agg, terse);
+               if (terse && terse_version == 4)
+                       print_disk_util_json(du, array);
+               else
+                       print_disk_util(&du->dus, &du->agg, terse);
        }
+
+       fio_mutex_up(disk_util_mutex);
+}
+
+void setup_disk_util(void)
+{
+       disk_util_mutex = fio_mutex_init(FIO_MUTEX_UNLOCKED);
 }