Merge branch 'fgp_fixes' of https://github.com/sitsofe/fio
authorJens Axboe <axboe@kernel.dk>
Wed, 11 Oct 2017 14:26:28 +0000 (08:26 -0600)
committerJens Axboe <axboe@kernel.dk>
Wed, 11 Oct 2017 14:26:28 +0000 (08:26 -0600)
19 files changed:
HOWTO
Makefile
appveyor.yml
backend.c
blktrace.c
configure
engines/filecreate.c [new file with mode: 0644]
engines/windowsaio.c
examples/filecreate-ioengine.fio [new file with mode: 0644]
filesetup.c
fio.1
fio_time.h
gettime.c
io_u.c
ioengines.h
options.c
stat.c
stat.h
tools/fio_jsonplus_clat2csv

diff --git a/HOWTO b/HOWTO
index 8fad2ce6f4d889e8de2013391ee1dad00787681d..d3f957bf4db7ecdeab63bfc5f38bbbd8150881be 100644 (file)
--- a/HOWTO
+++ b/HOWTO
@@ -217,6 +217,9 @@ Command line options
 .. option:: --max-jobs=nr
 
        Set the maximum number of threads/processes to support to `nr`.
+       NOTE: On Linux, it may be necessary to increase the shared-memory
+       limit ('/proc/sys/kernel/shmmax') if fio runs into errors while
+       creating jobs.
 
 .. option:: --server=args
 
@@ -1797,6 +1800,10 @@ I/O engine
                        absolute or relative. See :file:`engines/skeleton_external.c` for
                        details of writing an external I/O engine.
 
+               **filecreate**
+                       Simply create the files and do no IO to them.  You still need to
+                       set  `filesize` so that all the accounting still occurs, but no
+                       actual IO will be done other than creating the file.
 
 I/O engine specific parameters
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
index 3764da55085102d8d67a20d533e67cafb91ac295..76243ffb056e5cebf82939d62c66678bc476f0dc 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -42,7 +42,7 @@ SOURCE :=     $(sort $(patsubst $(SRCDIR)/%,%,$(wildcard $(SRCDIR)/crc/*.c)) \
                eta.c verify.c memory.c io_u.c parse.c mutex.c options.c \
                smalloc.c filehash.c profile.c debug.c engines/cpu.c \
                engines/mmap.c engines/sync.c engines/null.c engines/net.c \
-               engines/ftruncate.c \
+               engines/ftruncate.c engines/filecreate.c \
                server.c client.c iolog.c backend.c libfio.c flow.c cconv.c \
                gettime-thread.c helpers.c json.c idletime.c td_error.c \
                profiles/tiobench.c profiles/act.c io_u_queue.c filelock.c \
index 39f50a80cf169b9fbc5da5c4e0601cdeea35becb..844afa59227380256a94cd287ecf0df61469a153 100644 (file)
@@ -1,16 +1,21 @@
-clone_depth: 50
+clone_depth: 1
 environment:
+  CYG_MIRROR: http://cygwin.mirror.constant.com
+  CYG_ROOT: C:\cygwin64
   MAKEFLAGS: -j 2
   matrix:
     - platform: x86_64
       BUILD_ARCH: x64
-      CYG_ROOT: C:\cygwin64
+      PACKAGE_ARCH: x86_64
       CONFIGURE_OPTIONS:
     - platform: x86
       BUILD_ARCH: x86
-      CYG_ROOT: C:\cygwin
+      PACKAGE_ARCH: i686
       CONFIGURE_OPTIONS: --build-32bit-win
 
+install:
+  - '%CYG_ROOT%\setup-x86_64.exe --quiet-mode --no-shortcuts --only-site --site "%CYG_MIRROR%" --packages "mingw64-%PACKAGE_ARCH%-zlib" > NULL'
+
 build_script:
   - SET PATH=%CYG_ROOT%\bin;%PATH%
   - 'bash.exe -lc "cd \"${APPVEYOR_BUILD_FOLDER}\" && ./configure --extra-cflags=\"-Werror\" ${CONFIGURE_OPTIONS} && make.exe'
index ba6f58540b1c8135d5e3cf957e8d96b520c49ac8..d98e5fe4e8f8040b41a14a153ed40085756c7181 100644 (file)
--- a/backend.c
+++ b/backend.c
@@ -1929,11 +1929,7 @@ static void reap_threads(unsigned int *nr_running, uint64_t *t_rate,
        for_each_td(td, i) {
                int flags = 0;
 
-               /*
-                * ->io_ops is NULL for a thread that has closed its
-                * io engine
-                */
-               if (td->io_ops && !strcmp(td->io_ops->name, "cpuio"))
+                if (!strcmp(td->o.ioengine, "cpuio"))
                        cputhreads++;
                else
                        realthreads++;
index 65b600f5cfed020c2008e3c77511829fcd4cf02b..4b791d7eb3f1f0b9b23432840ffcb48ef252ed16 100644 (file)
@@ -500,10 +500,8 @@ int load_blktrace(struct thread_data *td, const char *filename, int need_swap)
                handle_trace(td, &t, ios, rw_bs);
        } while (1);
 
-       for (i = 0; i < td->files_index; i++) {
-               f = td->files[i];
+       for_each_file(td, f, i)
                trace_add_open_close_event(td, f->fileno, FIO_LOG_CLOSE_FILE);
-       }
 
        fifo_free(fifo);
        close(fd);
index cefd61032284ddc36013a7d7ab1d8aaa9bc71b71..749cb1fba35697399480ed9fad4344312453f944 100755 (executable)
--- a/configure
+++ b/configure
@@ -225,7 +225,20 @@ if test "$show_help" = "yes" ; then
 fi
 
 cross_prefix=${cross_prefix-${CROSS_COMPILE}}
-cc="${CC-${cross_prefix}gcc}"
+# Preferred compiler (can be overriden later after we know the platform):
+#  ${CC} (if set)
+#  ${cross_prefix}gcc (if cross-prefix specified)
+#  gcc if available
+#  clang if available
+if test -z "${CC}${cross_prefix}"; then
+  if has gcc; then
+    cc=gcc
+  elif has clang; then
+    cc=clang
+  fi
+else
+  cc="${CC-${cross_prefix}gcc}"
+fi
 
 if check_define __ANDROID__ ; then
   targetos="Android"
@@ -301,16 +314,16 @@ SunOS)
 CYGWIN*)
   # We still force some options, so keep this message here.
   echo "Forcing some known good options on Windows"
-  if test -z "$CC" ; then
+  if test -z "${CC}${cross_prefix}"; then
     if test ! -z "$build_32bit_win" && test "$build_32bit_win" = "yes"; then
-      CC="i686-w64-mingw32-gcc"
+      cc="i686-w64-mingw32-gcc"
       if test -e "../zlib/contrib/vstudio/vc14/x86/ZlibStatReleaseWithoutAsm/zlibstat.lib"; then
         echo "Building with zlib support"
         output_sym "CONFIG_ZLIB"
         echo "LIBS=../zlib/contrib/vstudio/vc14/x86/ZlibStatReleaseWithoutAsm/zlibstat.lib" >> $config_host_mak
       fi
     else
-      CC="x86_64-w64-mingw32-gcc"
+      cc="x86_64-w64-mingw32-gcc"
       if test -e "../zlib/contrib/vstudio/vc14/x64/ZlibStatReleaseWithoutAsm/zlibstat.lib"; then
         echo "Building with zlib support"
         output_sym "CONFIG_ZLIB"
@@ -340,11 +353,24 @@ CYGWIN*)
   tls_thread="yes"
   static_assert="yes"
   ipv6="yes"
-  echo "CC=$CC" >> $config_host_mak
   echo "BUILD_CFLAGS=$CFLAGS -I../zlib -include config-host.h -D_GNU_SOURCE" >> $config_host_mak
   ;;
 esac
 
+# Now we know the target platform we can have another guess at the preferred
+# compiler when it wasn't explictly set
+if test -z "${CC}${cross_prefix}"; then
+  if test "$targetos" = "FreeBSD" || test "$targetos" = "Darwin"; then
+    if has clang; then
+      cc=clang
+    fi
+  fi
+fi
+if test -z "$cc"; then
+    echo "configure: failed to find compiler"
+    exit 1
+fi
+
 if test ! -z "$cpu" ; then
   # command line argument
   :
@@ -415,18 +441,6 @@ case "$cpu" in
   ;;
 esac
 
-if test -z "$CC" ; then
-  if test "$targetos" = "FreeBSD"; then
-    if has clang; then
-      CC=clang
-    else
-      CC=gcc
-    fi
-  fi
-fi
-
-cc="${CC-${cross_prefix}gcc}"
-
 ##########################################
 # check cross compile
 
diff --git a/engines/filecreate.c b/engines/filecreate.c
new file mode 100644 (file)
index 0000000..0c3bcdd
--- /dev/null
@@ -0,0 +1,119 @@
+/*
+ * filecreate engine
+ *
+ * IO engine that doesn't do any IO, just creates files and tracks the latency
+ * of the file creation.
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include "../fio.h"
+#include "../filehash.h"
+
+struct fc_data {
+       enum fio_ddir stat_ddir;
+};
+
+static int open_file(struct thread_data *td, struct fio_file *f)
+{
+       struct timespec start;
+       int do_lat = !td->o.disable_lat;
+
+       dprint(FD_FILE, "fd open %s\n", f->file_name);
+
+       if (f->filetype != FIO_TYPE_FILE) {
+               log_err("fio: only files are supported fallocate \n");
+               return 1;
+       }
+       if (!strcmp(f->file_name, "-")) {
+               log_err("fio: can't read/write to stdin/out\n");
+               return 1;
+       }
+
+       if (do_lat)
+               fio_gettime(&start, NULL);
+
+       f->fd = open(f->file_name, O_CREAT|O_RDWR, 0600);
+
+       if (f->fd == -1) {
+               char buf[FIO_VERROR_SIZE];
+               int e = errno;
+
+               snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
+               td_verror(td, e, buf);
+               return 1;
+       }
+
+       if (do_lat) {
+               struct fc_data *data = td->io_ops_data;
+               uint64_t nsec;
+
+               nsec = ntime_since_now(&start);
+               add_clat_sample(td, data->stat_ddir, nsec, 0, 0);
+       }
+
+       return 0;
+}
+
+static int queue_io(struct thread_data *td, struct io_u fio_unused *io_u)
+{
+       return FIO_Q_COMPLETED;
+}
+
+/*
+ * Ensure that we at least have a block size worth of IO to do for each
+ * file. If the job file has td->o.size < nr_files * block_size, then
+ * fio won't do anything.
+ */
+static int get_file_size(struct thread_data *td, struct fio_file *f)
+{
+       f->real_file_size = td_min_bs(td);
+       return 0;
+}
+
+static int init(struct thread_data *td)
+{
+       struct fc_data *data;
+
+       data = calloc(1, sizeof(*data));
+
+       if (td_read(td))
+               data->stat_ddir = DDIR_READ;
+       else if (td_write(td))
+               data->stat_ddir = DDIR_WRITE;
+
+       td->io_ops_data = data;
+       return 0;
+}
+
+static void cleanup(struct thread_data *td)
+{
+       struct fc_data *data = td->io_ops_data;
+
+       free(data);
+}
+
+static struct ioengine_ops ioengine = {
+       .name           = "filecreate",
+       .version        = FIO_IOOPS_VERSION,
+       .init           = init,
+       .cleanup        = cleanup,
+       .queue          = queue_io,
+       .get_file_size  = get_file_size,
+       .open_file      = open_file,
+       .close_file     = generic_close_file,
+       .flags          = FIO_DISKLESSIO | FIO_SYNCIO | FIO_FAKEIO |
+                               FIO_NOSTATS | FIO_NOFILEHASH,
+};
+
+static void fio_init fio_filecreate_register(void)
+{
+       register_ioengine(&ioengine);
+}
+
+static void fio_exit fio_filecreate_unregister(void)
+{
+       unregister_ioengine(&ioengine);
+}
index 314eaadf480c485a00f4753b2cd53206b5964211..a66b1df4ee1162e704d0360e5acba8de1c22bd27 100644 (file)
@@ -142,6 +142,44 @@ static void fio_windowsaio_cleanup(struct thread_data *td)
        }
 }
 
+static int windowsaio_invalidate_cache(struct fio_file *f)
+{
+       DWORD error;
+       DWORD isharemode = (FILE_SHARE_DELETE | FILE_SHARE_READ |
+                       FILE_SHARE_WRITE);
+       HANDLE ihFile;
+       int rc = 0;
+
+       /*
+        * Encourage Windows to drop cached parts of a file by temporarily
+        * opening it for non-buffered access. Note: this will only work when
+        * the following is the only thing with the file open on the whole
+        * system.
+        */
+       dprint(FD_IO, "windowaio: attempt invalidate cache for %s\n",
+                       f->file_name);
+       ihFile = CreateFile(f->file_name, 0, isharemode, NULL, OPEN_EXISTING,
+                       FILE_FLAG_NO_BUFFERING, NULL);
+
+       if (ihFile != INVALID_HANDLE_VALUE) {
+               if (!CloseHandle(ihFile)) {
+                       error = GetLastError();
+                       log_info("windowsaio: invalidation fd close %s "
+                                "failed: error %d\n", f->file_name, error);
+                       rc = 1;
+               }
+       } else {
+               error = GetLastError();
+               if (error != ERROR_FILE_NOT_FOUND) {
+                       log_info("windowsaio: cache invalidation of %s failed: "
+                                       "error %d\n", f->file_name, error);
+                       rc = 1;
+               }
+       }
+
+       return rc;
+}
+
 static int fio_windowsaio_open_file(struct thread_data *td, struct fio_file *f)
 {
        int rc = 0;
@@ -200,6 +238,11 @@ static int fio_windowsaio_open_file(struct thread_data *td, struct fio_file *f)
        else
                openmode = OPEN_EXISTING;
 
+       /* If we're going to use direct I/O, Windows will try and invalidate
+        * its cache at that point so there's no need to do it here */
+       if (td->o.invalidate_cache && !td->o.odirect)
+               windowsaio_invalidate_cache(f);
+
        f->hFile = CreateFile(f->file_name, access, sharemode,
                NULL, openmode, flags, NULL);
 
diff --git a/examples/filecreate-ioengine.fio b/examples/filecreate-ioengine.fio
new file mode 100644 (file)
index 0000000..ec7caad
--- /dev/null
@@ -0,0 +1,35 @@
+# Example filecreate job
+#
+# create_on_open is needed so that the open happens during the run and not the
+# setup.
+#
+# openfiles needs to be set so that you do not exceed the maximum allowed open
+# files.
+#
+# filesize needs to be set to a non zero value so fio will actually run, but the
+# IO will not really be done and the write latency numbers will only reflect the
+# open times.
+[global]
+create_on_open=1
+nrfiles=31250
+ioengine=filecreate
+fallocate=none
+filesize=4k
+openfiles=1
+
+[t0]
+[t1]
+[t2]
+[t3]
+[t4]
+[t5]
+[t6]
+[t7]
+[t8]
+[t9]
+[t10]
+[t11]
+[t12]
+[t13]
+[t14]
+[t15]
index 891a55a1ddb97ab30c7cc375cd9246fed21addd8..0631a01f96a635789f925e6f54f7abf9a23ce92f 100644 (file)
@@ -1342,6 +1342,7 @@ void close_and_free_files(struct thread_data *td)
 {
        struct fio_file *f;
        unsigned int i;
+       bool use_free = td_ioengine_flagged(td, FIO_NOFILEHASH);
 
        dprint(FD_FILE, "close files\n");
 
@@ -1361,13 +1362,19 @@ void close_and_free_files(struct thread_data *td)
                        td_io_unlink_file(td, f);
                }
 
-               sfree(f->file_name);
+               if (use_free)
+                       free(f->file_name);
+               else
+                       sfree(f->file_name);
                f->file_name = NULL;
                if (fio_file_axmap(f)) {
                        axmap_free(f->io_axmap);
                        f->io_axmap = NULL;
                }
-               sfree(f);
+               if (use_free)
+                       free(f);
+               else
+                       sfree(f);
        }
 
        td->o.filename = NULL;
@@ -1481,7 +1488,10 @@ static struct fio_file *alloc_new_file(struct thread_data *td)
 {
        struct fio_file *f;
 
-       f = smalloc(sizeof(*f));
+       if (td_ioengine_flagged(td, FIO_NOFILEHASH))
+               f = calloc(1, sizeof(*f));
+       else
+               f = smalloc(sizeof(*f));
        if (!f) {
                assert(0);
                return NULL;
@@ -1564,7 +1574,10 @@ int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
        if (td->io_ops && td_ioengine_flagged(td, FIO_DISKLESSIO))
                f->real_file_size = -1ULL;
 
-       f->file_name = smalloc_strdup(file_name);
+       if (td_ioengine_flagged(td, FIO_NOFILEHASH))
+               f->file_name = strdup(file_name);
+       else
+               f->file_name = smalloc_strdup(file_name);
        if (!f->file_name)
                assert(0);
 
@@ -1588,7 +1601,8 @@ int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
        if (f->filetype == FIO_TYPE_FILE)
                td->nr_normal_files++;
 
-       set_already_allocated(file_name);
+       if (td->o.numjobs > 1)
+               set_already_allocated(file_name);
 
        if (inc)
                td->o.nr_files++;
@@ -1768,7 +1782,10 @@ void dup_files(struct thread_data *td, struct thread_data *org)
                __f = alloc_new_file(td);
 
                if (f->file_name) {
-                       __f->file_name = smalloc_strdup(f->file_name);
+                       if (td_ioengine_flagged(td, FIO_NOFILEHASH))
+                               __f->file_name = strdup(f->file_name);
+                       else
+                               __f->file_name = smalloc_strdup(f->file_name);
                        if (!__f->file_name)
                                assert(0);
 
diff --git a/fio.1 b/fio.1
index b943db2289d66c87ced5bd3b7112cf9b580db8ab..6e7d1f8b1e4647e3e87912304a67c5e51101222b 100644 (file)
--- a/fio.1
+++ b/fio.1
@@ -113,6 +113,8 @@ All fio parser warnings are fatal, causing fio to exit with an error.
 .TP
 .BI \-\-max\-jobs \fR=\fPnr
 Set the maximum number of threads/processes to support to \fInr\fR.
+NOTE: On Linux, it may be necessary to increase the shared-memory limit
+(`/proc/sys/kernel/shmmax') if fio runs into errors while creating jobs.
 .TP
 .BI \-\-server \fR=\fPargs
 Start a backend server, with \fIargs\fR specifying what to listen to.
@@ -1577,6 +1579,10 @@ the engine filename, e.g. `ioengine=external:/tmp/foo.o' to load
 ioengine `foo.o' in `/tmp'. The path can be either
 absolute or relative. See `engines/skeleton_external.c' in the fio source for
 details of writing an external I/O engine.
+.TP
+.B filecreate
+Create empty files only.  \fBfilesize\fR still needs to be specified so that fio
+will run and grab latency results, but no IO will actually be done on the files.
 .SS "I/O engine specific parameters"
 In addition, there are some parameters which are only valid when a specific
 \fBioengine\fR is in use. These are used identically to normal parameters,
index f4eac793f4fb3311dbd77069fc5a392477f33fef..c7c3dbbad957240d5371e77967f7486b0dd028e3 100644 (file)
@@ -5,6 +5,7 @@
 
 struct thread_data;
 extern uint64_t ntime_since(const struct timespec *, const struct timespec *);
+extern uint64_t ntime_since_now(const struct timespec *);
 extern uint64_t utime_since(const struct timespec *, const struct timespec *);
 extern uint64_t utime_since_now(const struct timespec *);
 extern uint64_t mtime_since(const struct timespec *, const struct timespec *);
index 3dcaaf680803fdcdb798de6009745f7821118d39..79455284a30d715b819a779bf3e7943c602eb549 100644 (file)
--- a/gettime.c
+++ b/gettime.c
@@ -448,6 +448,14 @@ uint64_t ntime_since(const struct timespec *s, const struct timespec *e)
        return nsec + (sec * 1000000000LL);
 }
 
+uint64_t ntime_since_now(const struct timespec *s)
+{
+       struct timespec now;
+
+       fio_gettime(&now, NULL);
+       return ntime_since(s, &now);
+}
+
 uint64_t utime_since(const struct timespec *s, const struct timespec *e)
 {
        int64_t sec, usec;
diff --git a/io_u.c b/io_u.c
index 58c23202bd3d20bb1eee1b4fbcf1731d9bf312ea..fb4180a3bc35f16cf6a0463b01be1ff9b9e7f347 100644 (file)
--- a/io_u.c
+++ b/io_u.c
@@ -1779,7 +1779,7 @@ static void account_io_completion(struct thread_data *td, struct io_u *io_u,
        if (td->parent)
                td = td->parent;
 
-       if (!td->o.stats)
+       if (!td->o.stats || td_ioengine_flagged(td, FIO_NOSTATS))
                return;
 
        if (no_reduce)
index 177cbc053c33ba8aedf7dbb092d15bdc92b58ccb..32b18edadf1f2dbbdcdf2ae66df3ea264a4a16c8 100644 (file)
@@ -59,6 +59,8 @@ enum fio_ioengine_flags {
        FIO_MEMALIGN    = 1 << 9,       /* engine wants aligned memory */
        FIO_BIT_BASED   = 1 << 10,      /* engine uses a bit base (e.g. uses Kbit as opposed to KB) */
        FIO_FAKEIO      = 1 << 11,      /* engine pretends to do IO */
+       FIO_NOSTATS     = 1 << 12,      /* don't do IO stats */
+       FIO_NOFILEHASH  = 1 << 13,      /* doesn't hash the files for lookup later. */
 };
 
 /*
index 5c1abe91817dc7c3ba62fab0108fbac041f3c032..ddcc4e5adc140a90d7b26884ebcea2e3692f4eda 100644 (file)
--- a/options.c
+++ b/options.c
@@ -1843,6 +1843,10 @@ struct fio_option fio_options[FIO_MAX_OPTS] = {
                            .help = "DAX Device based IO engine",
                          },
 #endif
+                         {
+                           .ival = "filecreate",
+                           .help = "File creation engine",
+                         },
                          { .ival = "external",
                            .help = "Load external engine (append name)",
                            .cb = str_ioengine_external_cb,
diff --git a/stat.c b/stat.c
index 09afa5bdd8f5bb0fb3c771d01bae2fa1f491dc1e..c5a68ad5c489cb0baf083220edbd63c4d2bcf591 100644 (file)
--- a/stat.c
+++ b/stat.c
@@ -962,7 +962,7 @@ static void add_ddir_status_json(struct thread_stat *ts,
        unsigned int len;
        int i;
        const char *ddirname[] = {"read", "write", "trim"};
-       struct json_object *dir_object, *tmp_object, *percentile_object, *clat_bins_object;
+       struct json_object *dir_object, *tmp_object, *percentile_object, *clat_bins_object = NULL;
        char buf[120];
        double p_of_agg = 100.0;
 
@@ -1036,7 +1036,9 @@ static void add_ddir_status_json(struct thread_stat *ts,
 
        if (output_format & FIO_OUTPUT_JSON_PLUS) {
                clat_bins_object = json_create_object();
-               json_object_add_value_object(tmp_object, "bins", clat_bins_object);
+               if (ts->clat_percentiles)
+                       json_object_add_value_object(tmp_object, "bins", clat_bins_object);
+
                for(i = 0; i < FIO_IO_U_PLAT_NR; i++) {
                        if (ts->io_u_plat[ddir][i]) {
                                snprintf(buf, sizeof(buf), "%llu", plat_idx_to_val(i));
@@ -1055,6 +1057,9 @@ static void add_ddir_status_json(struct thread_stat *ts,
        json_object_add_value_int(tmp_object, "max", max);
        json_object_add_value_float(tmp_object, "mean", mean);
        json_object_add_value_float(tmp_object, "stddev", dev);
+       if (output_format & FIO_OUTPUT_JSON_PLUS && ts->lat_percentiles)
+               json_object_add_value_object(tmp_object, "bins", clat_bins_object);
+
        if (ovals)
                free(ovals);
 
diff --git a/stat.h b/stat.h
index 848331bb5e47fef2438cf19a910fed5c02b712bc..3fda084156bbfb984d34985e686a33ad3bb44082 100644 (file)
--- a/stat.h
+++ b/stat.h
@@ -23,6 +23,16 @@ struct group_run_stats {
 #define FIO_IO_U_LAT_U_NR 10
 #define FIO_IO_U_LAT_M_NR 12
 
+/*
+ * Constants for clat percentiles
+ */
+#define FIO_IO_U_PLAT_BITS 6
+#define FIO_IO_U_PLAT_VAL (1 << FIO_IO_U_PLAT_BITS)
+#define FIO_IO_U_PLAT_GROUP_NR 29
+#define FIO_IO_U_PLAT_NR (FIO_IO_U_PLAT_GROUP_NR * FIO_IO_U_PLAT_VAL)
+#define FIO_IO_U_LIST_MAX_LEN 20 /* The size of the default and user-specified
+                                       list of percentiles */
+
 /*
  * Aggregate clat samples to report percentile(s) of them.
  *
@@ -34,7 +44,7 @@ struct group_run_stats {
  *
  * FIO_IO_U_PLAT_GROUP_NR and FIO_IO_U_PLAT_BITS determine the maximum
  * range being tracked for latency samples. The maximum value tracked
- * accurately will be 2^(GROUP_NR + PLAT_BITS -1) microseconds.
+ * accurately will be 2^(GROUP_NR + PLAT_BITS - 1) nanoseconds.
  *
  * FIO_IO_U_PLAT_GROUP_NR and FIO_IO_U_PLAT_BITS determine the memory
  * requirement of storing those aggregate counts. The memory used will
@@ -98,22 +108,15 @@ struct group_run_stats {
  *     3       8       2               [256,511]               64
  *     4       9       3               [512,1023]              64
  *     ...     ...     ...             [...,...]               ...
- *     18      23      17              [8838608,+inf]**        64
+ *     28      33      27              [8589934592,+inf]**     64
  *
  *  * Special cases: when n < (M-1) or when n == (M-1), in both cases,
  *    the value cannot be rounded off. Use all bits of the sample as
  *    index.
  *
- *  ** If a sample's MSB is greater than 23, it will be counted as 23.
+ *  ** If a sample's MSB is greater than 33, it will be counted as 33.
  */
 
-#define FIO_IO_U_PLAT_BITS 6
-#define FIO_IO_U_PLAT_VAL (1 << FIO_IO_U_PLAT_BITS)
-#define FIO_IO_U_PLAT_GROUP_NR 29
-#define FIO_IO_U_PLAT_NR (FIO_IO_U_PLAT_GROUP_NR * FIO_IO_U_PLAT_VAL)
-#define FIO_IO_U_LIST_MAX_LEN 20 /* The size of the default and user-specified
-                                       list of percentiles */
-
 /*
  * Trim cycle count measurements
  */
index d4ac16e422f6a6df3f61c3af3ebcb9d8a4edc8fe..64fdc9f3a00b187605c5c02db12391c4bf1d5291 100755 (executable)
@@ -107,8 +107,16 @@ def main():
 
         prev_ddir = None
         for ddir in ddir_set:
+            if 'bins' in jsondata['jobs'][jobnum][ddir]['clat_ns']:
+                bins_loc = 'clat_ns'
+            elif 'bins' in jsondata['jobs'][jobnum][ddir]['lat_ns']:
+                bins_loc = 'lat_ns'
+            else:
+                raise RuntimeError("Latency bins not found. "
+                                   "Are you sure you are using json+ output?")
+
             bins[ddir] = [[int(key), value] for key, value in
-                          jsondata['jobs'][jobnum][ddir]['clat_ns']
+                          jsondata['jobs'][jobnum][ddir][bins_loc]
                           ['bins'].iteritems()]
             bins[ddir] = sorted(bins[ddir], key=lambda bin: bin[0])
 
@@ -123,7 +131,7 @@ def main():
         outfile = stub + '_job' + str(jobnum) + ext
 
         with open(outfile, 'w') as output:
-            output.write("clat_nsec, ")
+            output.write("{0}ec, ".format(bins_loc))
             ddir_list = list(ddir_set)
             for ddir in ddir_list:
                 output.write("{0}_count, {0}_cumulative, {0}_percentile, ".