.. option:: --max-jobs=nr
Set the maximum number of threads/processes to support to `nr`.
+ NOTE: On Linux, it may be necessary to increase the shared-memory
+ limit ('/proc/sys/kernel/shmmax') if fio runs into errors while
+ creating jobs.
.. option:: --server=args
absolute or relative. See :file:`engines/skeleton_external.c` for
details of writing an external I/O engine.
+ **filecreate**
+ Simply create the files and do no IO to them. You still need to
+ set `filesize` so that all the accounting still occurs, but no
+ actual IO will be done other than creating the file.
I/O engine specific parameters
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
eta.c verify.c memory.c io_u.c parse.c mutex.c options.c \
smalloc.c filehash.c profile.c debug.c engines/cpu.c \
engines/mmap.c engines/sync.c engines/null.c engines/net.c \
- engines/ftruncate.c \
+ engines/ftruncate.c engines/filecreate.c \
server.c client.c iolog.c backend.c libfio.c flow.c cconv.c \
gettime-thread.c helpers.c json.c idletime.c td_error.c \
profiles/tiobench.c profiles/act.c io_u_queue.c filelock.c \
-clone_depth: 50
+clone_depth: 1
environment:
+ CYG_MIRROR: http://cygwin.mirror.constant.com
+ CYG_ROOT: C:\cygwin64
MAKEFLAGS: -j 2
matrix:
- platform: x86_64
BUILD_ARCH: x64
- CYG_ROOT: C:\cygwin64
+ PACKAGE_ARCH: x86_64
CONFIGURE_OPTIONS:
- platform: x86
BUILD_ARCH: x86
- CYG_ROOT: C:\cygwin
+ PACKAGE_ARCH: i686
CONFIGURE_OPTIONS: --build-32bit-win
+install:
+ - '%CYG_ROOT%\setup-x86_64.exe --quiet-mode --no-shortcuts --only-site --site "%CYG_MIRROR%" --packages "mingw64-%PACKAGE_ARCH%-zlib" > NULL'
+
build_script:
- SET PATH=%CYG_ROOT%\bin;%PATH%
- 'bash.exe -lc "cd \"${APPVEYOR_BUILD_FOLDER}\" && ./configure --extra-cflags=\"-Werror\" ${CONFIGURE_OPTIONS} && make.exe'
for_each_td(td, i) {
int flags = 0;
- /*
- * ->io_ops is NULL for a thread that has closed its
- * io engine
- */
- if (td->io_ops && !strcmp(td->io_ops->name, "cpuio"))
+ if (!strcmp(td->o.ioengine, "cpuio"))
cputhreads++;
else
realthreads++;
handle_trace(td, &t, ios, rw_bs);
} while (1);
- for (i = 0; i < td->files_index; i++) {
- f = td->files[i];
+ for_each_file(td, f, i)
trace_add_open_close_event(td, f->fileno, FIO_LOG_CLOSE_FILE);
- }
fifo_free(fifo);
close(fd);
fi
cross_prefix=${cross_prefix-${CROSS_COMPILE}}
-cc="${CC-${cross_prefix}gcc}"
+# Preferred compiler (can be overriden later after we know the platform):
+# ${CC} (if set)
+# ${cross_prefix}gcc (if cross-prefix specified)
+# gcc if available
+# clang if available
+if test -z "${CC}${cross_prefix}"; then
+ if has gcc; then
+ cc=gcc
+ elif has clang; then
+ cc=clang
+ fi
+else
+ cc="${CC-${cross_prefix}gcc}"
+fi
if check_define __ANDROID__ ; then
targetos="Android"
CYGWIN*)
# We still force some options, so keep this message here.
echo "Forcing some known good options on Windows"
- if test -z "$CC" ; then
+ if test -z "${CC}${cross_prefix}"; then
if test ! -z "$build_32bit_win" && test "$build_32bit_win" = "yes"; then
- CC="i686-w64-mingw32-gcc"
+ cc="i686-w64-mingw32-gcc"
if test -e "../zlib/contrib/vstudio/vc14/x86/ZlibStatReleaseWithoutAsm/zlibstat.lib"; then
echo "Building with zlib support"
output_sym "CONFIG_ZLIB"
echo "LIBS=../zlib/contrib/vstudio/vc14/x86/ZlibStatReleaseWithoutAsm/zlibstat.lib" >> $config_host_mak
fi
else
- CC="x86_64-w64-mingw32-gcc"
+ cc="x86_64-w64-mingw32-gcc"
if test -e "../zlib/contrib/vstudio/vc14/x64/ZlibStatReleaseWithoutAsm/zlibstat.lib"; then
echo "Building with zlib support"
output_sym "CONFIG_ZLIB"
tls_thread="yes"
static_assert="yes"
ipv6="yes"
- echo "CC=$CC" >> $config_host_mak
echo "BUILD_CFLAGS=$CFLAGS -I../zlib -include config-host.h -D_GNU_SOURCE" >> $config_host_mak
;;
esac
+# Now we know the target platform we can have another guess at the preferred
+# compiler when it wasn't explictly set
+if test -z "${CC}${cross_prefix}"; then
+ if test "$targetos" = "FreeBSD" || test "$targetos" = "Darwin"; then
+ if has clang; then
+ cc=clang
+ fi
+ fi
+fi
+if test -z "$cc"; then
+ echo "configure: failed to find compiler"
+ exit 1
+fi
+
if test ! -z "$cpu" ; then
# command line argument
:
;;
esac
-if test -z "$CC" ; then
- if test "$targetos" = "FreeBSD"; then
- if has clang; then
- CC=clang
- else
- CC=gcc
- fi
- fi
-fi
-
-cc="${CC-${cross_prefix}gcc}"
-
##########################################
# check cross compile
--- /dev/null
+/*
+ * filecreate engine
+ *
+ * IO engine that doesn't do any IO, just creates files and tracks the latency
+ * of the file creation.
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include "../fio.h"
+#include "../filehash.h"
+
+struct fc_data {
+ enum fio_ddir stat_ddir;
+};
+
+static int open_file(struct thread_data *td, struct fio_file *f)
+{
+ struct timespec start;
+ int do_lat = !td->o.disable_lat;
+
+ dprint(FD_FILE, "fd open %s\n", f->file_name);
+
+ if (f->filetype != FIO_TYPE_FILE) {
+ log_err("fio: only files are supported fallocate \n");
+ return 1;
+ }
+ if (!strcmp(f->file_name, "-")) {
+ log_err("fio: can't read/write to stdin/out\n");
+ return 1;
+ }
+
+ if (do_lat)
+ fio_gettime(&start, NULL);
+
+ f->fd = open(f->file_name, O_CREAT|O_RDWR, 0600);
+
+ if (f->fd == -1) {
+ char buf[FIO_VERROR_SIZE];
+ int e = errno;
+
+ snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
+ td_verror(td, e, buf);
+ return 1;
+ }
+
+ if (do_lat) {
+ struct fc_data *data = td->io_ops_data;
+ uint64_t nsec;
+
+ nsec = ntime_since_now(&start);
+ add_clat_sample(td, data->stat_ddir, nsec, 0, 0);
+ }
+
+ return 0;
+}
+
+static int queue_io(struct thread_data *td, struct io_u fio_unused *io_u)
+{
+ return FIO_Q_COMPLETED;
+}
+
+/*
+ * Ensure that we at least have a block size worth of IO to do for each
+ * file. If the job file has td->o.size < nr_files * block_size, then
+ * fio won't do anything.
+ */
+static int get_file_size(struct thread_data *td, struct fio_file *f)
+{
+ f->real_file_size = td_min_bs(td);
+ return 0;
+}
+
+static int init(struct thread_data *td)
+{
+ struct fc_data *data;
+
+ data = calloc(1, sizeof(*data));
+
+ if (td_read(td))
+ data->stat_ddir = DDIR_READ;
+ else if (td_write(td))
+ data->stat_ddir = DDIR_WRITE;
+
+ td->io_ops_data = data;
+ return 0;
+}
+
+static void cleanup(struct thread_data *td)
+{
+ struct fc_data *data = td->io_ops_data;
+
+ free(data);
+}
+
+static struct ioengine_ops ioengine = {
+ .name = "filecreate",
+ .version = FIO_IOOPS_VERSION,
+ .init = init,
+ .cleanup = cleanup,
+ .queue = queue_io,
+ .get_file_size = get_file_size,
+ .open_file = open_file,
+ .close_file = generic_close_file,
+ .flags = FIO_DISKLESSIO | FIO_SYNCIO | FIO_FAKEIO |
+ FIO_NOSTATS | FIO_NOFILEHASH,
+};
+
+static void fio_init fio_filecreate_register(void)
+{
+ register_ioengine(&ioengine);
+}
+
+static void fio_exit fio_filecreate_unregister(void)
+{
+ unregister_ioengine(&ioengine);
+}
}
}
+static int windowsaio_invalidate_cache(struct fio_file *f)
+{
+ DWORD error;
+ DWORD isharemode = (FILE_SHARE_DELETE | FILE_SHARE_READ |
+ FILE_SHARE_WRITE);
+ HANDLE ihFile;
+ int rc = 0;
+
+ /*
+ * Encourage Windows to drop cached parts of a file by temporarily
+ * opening it for non-buffered access. Note: this will only work when
+ * the following is the only thing with the file open on the whole
+ * system.
+ */
+ dprint(FD_IO, "windowaio: attempt invalidate cache for %s\n",
+ f->file_name);
+ ihFile = CreateFile(f->file_name, 0, isharemode, NULL, OPEN_EXISTING,
+ FILE_FLAG_NO_BUFFERING, NULL);
+
+ if (ihFile != INVALID_HANDLE_VALUE) {
+ if (!CloseHandle(ihFile)) {
+ error = GetLastError();
+ log_info("windowsaio: invalidation fd close %s "
+ "failed: error %d\n", f->file_name, error);
+ rc = 1;
+ }
+ } else {
+ error = GetLastError();
+ if (error != ERROR_FILE_NOT_FOUND) {
+ log_info("windowsaio: cache invalidation of %s failed: "
+ "error %d\n", f->file_name, error);
+ rc = 1;
+ }
+ }
+
+ return rc;
+}
+
static int fio_windowsaio_open_file(struct thread_data *td, struct fio_file *f)
{
int rc = 0;
else
openmode = OPEN_EXISTING;
+ /* If we're going to use direct I/O, Windows will try and invalidate
+ * its cache at that point so there's no need to do it here */
+ if (td->o.invalidate_cache && !td->o.odirect)
+ windowsaio_invalidate_cache(f);
+
f->hFile = CreateFile(f->file_name, access, sharemode,
NULL, openmode, flags, NULL);
--- /dev/null
+# Example filecreate job
+#
+# create_on_open is needed so that the open happens during the run and not the
+# setup.
+#
+# openfiles needs to be set so that you do not exceed the maximum allowed open
+# files.
+#
+# filesize needs to be set to a non zero value so fio will actually run, but the
+# IO will not really be done and the write latency numbers will only reflect the
+# open times.
+[global]
+create_on_open=1
+nrfiles=31250
+ioengine=filecreate
+fallocate=none
+filesize=4k
+openfiles=1
+
+[t0]
+[t1]
+[t2]
+[t3]
+[t4]
+[t5]
+[t6]
+[t7]
+[t8]
+[t9]
+[t10]
+[t11]
+[t12]
+[t13]
+[t14]
+[t15]
{
struct fio_file *f;
unsigned int i;
+ bool use_free = td_ioengine_flagged(td, FIO_NOFILEHASH);
dprint(FD_FILE, "close files\n");
td_io_unlink_file(td, f);
}
- sfree(f->file_name);
+ if (use_free)
+ free(f->file_name);
+ else
+ sfree(f->file_name);
f->file_name = NULL;
if (fio_file_axmap(f)) {
axmap_free(f->io_axmap);
f->io_axmap = NULL;
}
- sfree(f);
+ if (use_free)
+ free(f);
+ else
+ sfree(f);
}
td->o.filename = NULL;
{
struct fio_file *f;
- f = smalloc(sizeof(*f));
+ if (td_ioengine_flagged(td, FIO_NOFILEHASH))
+ f = calloc(1, sizeof(*f));
+ else
+ f = smalloc(sizeof(*f));
if (!f) {
assert(0);
return NULL;
if (td->io_ops && td_ioengine_flagged(td, FIO_DISKLESSIO))
f->real_file_size = -1ULL;
- f->file_name = smalloc_strdup(file_name);
+ if (td_ioengine_flagged(td, FIO_NOFILEHASH))
+ f->file_name = strdup(file_name);
+ else
+ f->file_name = smalloc_strdup(file_name);
if (!f->file_name)
assert(0);
if (f->filetype == FIO_TYPE_FILE)
td->nr_normal_files++;
- set_already_allocated(file_name);
+ if (td->o.numjobs > 1)
+ set_already_allocated(file_name);
if (inc)
td->o.nr_files++;
__f = alloc_new_file(td);
if (f->file_name) {
- __f->file_name = smalloc_strdup(f->file_name);
+ if (td_ioengine_flagged(td, FIO_NOFILEHASH))
+ __f->file_name = strdup(f->file_name);
+ else
+ __f->file_name = smalloc_strdup(f->file_name);
if (!__f->file_name)
assert(0);
.TP
.BI \-\-max\-jobs \fR=\fPnr
Set the maximum number of threads/processes to support to \fInr\fR.
+NOTE: On Linux, it may be necessary to increase the shared-memory limit
+(`/proc/sys/kernel/shmmax') if fio runs into errors while creating jobs.
.TP
.BI \-\-server \fR=\fPargs
Start a backend server, with \fIargs\fR specifying what to listen to.
ioengine `foo.o' in `/tmp'. The path can be either
absolute or relative. See `engines/skeleton_external.c' in the fio source for
details of writing an external I/O engine.
+.TP
+.B filecreate
+Create empty files only. \fBfilesize\fR still needs to be specified so that fio
+will run and grab latency results, but no IO will actually be done on the files.
.SS "I/O engine specific parameters"
In addition, there are some parameters which are only valid when a specific
\fBioengine\fR is in use. These are used identically to normal parameters,
struct thread_data;
extern uint64_t ntime_since(const struct timespec *, const struct timespec *);
+extern uint64_t ntime_since_now(const struct timespec *);
extern uint64_t utime_since(const struct timespec *, const struct timespec *);
extern uint64_t utime_since_now(const struct timespec *);
extern uint64_t mtime_since(const struct timespec *, const struct timespec *);
return nsec + (sec * 1000000000LL);
}
+uint64_t ntime_since_now(const struct timespec *s)
+{
+ struct timespec now;
+
+ fio_gettime(&now, NULL);
+ return ntime_since(s, &now);
+}
+
uint64_t utime_since(const struct timespec *s, const struct timespec *e)
{
int64_t sec, usec;
if (td->parent)
td = td->parent;
- if (!td->o.stats)
+ if (!td->o.stats || td_ioengine_flagged(td, FIO_NOSTATS))
return;
if (no_reduce)
FIO_MEMALIGN = 1 << 9, /* engine wants aligned memory */
FIO_BIT_BASED = 1 << 10, /* engine uses a bit base (e.g. uses Kbit as opposed to KB) */
FIO_FAKEIO = 1 << 11, /* engine pretends to do IO */
+ FIO_NOSTATS = 1 << 12, /* don't do IO stats */
+ FIO_NOFILEHASH = 1 << 13, /* doesn't hash the files for lookup later. */
};
/*
.help = "DAX Device based IO engine",
},
#endif
+ {
+ .ival = "filecreate",
+ .help = "File creation engine",
+ },
{ .ival = "external",
.help = "Load external engine (append name)",
.cb = str_ioengine_external_cb,
unsigned int len;
int i;
const char *ddirname[] = {"read", "write", "trim"};
- struct json_object *dir_object, *tmp_object, *percentile_object, *clat_bins_object;
+ struct json_object *dir_object, *tmp_object, *percentile_object, *clat_bins_object = NULL;
char buf[120];
double p_of_agg = 100.0;
if (output_format & FIO_OUTPUT_JSON_PLUS) {
clat_bins_object = json_create_object();
- json_object_add_value_object(tmp_object, "bins", clat_bins_object);
+ if (ts->clat_percentiles)
+ json_object_add_value_object(tmp_object, "bins", clat_bins_object);
+
for(i = 0; i < FIO_IO_U_PLAT_NR; i++) {
if (ts->io_u_plat[ddir][i]) {
snprintf(buf, sizeof(buf), "%llu", plat_idx_to_val(i));
json_object_add_value_int(tmp_object, "max", max);
json_object_add_value_float(tmp_object, "mean", mean);
json_object_add_value_float(tmp_object, "stddev", dev);
+ if (output_format & FIO_OUTPUT_JSON_PLUS && ts->lat_percentiles)
+ json_object_add_value_object(tmp_object, "bins", clat_bins_object);
+
if (ovals)
free(ovals);
#define FIO_IO_U_LAT_U_NR 10
#define FIO_IO_U_LAT_M_NR 12
+/*
+ * Constants for clat percentiles
+ */
+#define FIO_IO_U_PLAT_BITS 6
+#define FIO_IO_U_PLAT_VAL (1 << FIO_IO_U_PLAT_BITS)
+#define FIO_IO_U_PLAT_GROUP_NR 29
+#define FIO_IO_U_PLAT_NR (FIO_IO_U_PLAT_GROUP_NR * FIO_IO_U_PLAT_VAL)
+#define FIO_IO_U_LIST_MAX_LEN 20 /* The size of the default and user-specified
+ list of percentiles */
+
/*
* Aggregate clat samples to report percentile(s) of them.
*
*
* FIO_IO_U_PLAT_GROUP_NR and FIO_IO_U_PLAT_BITS determine the maximum
* range being tracked for latency samples. The maximum value tracked
- * accurately will be 2^(GROUP_NR + PLAT_BITS -1) microseconds.
+ * accurately will be 2^(GROUP_NR + PLAT_BITS - 1) nanoseconds.
*
* FIO_IO_U_PLAT_GROUP_NR and FIO_IO_U_PLAT_BITS determine the memory
* requirement of storing those aggregate counts. The memory used will
* 3 8 2 [256,511] 64
* 4 9 3 [512,1023] 64
* ... ... ... [...,...] ...
- * 18 23 17 [8838608,+inf]** 64
+ * 28 33 27 [8589934592,+inf]** 64
*
* * Special cases: when n < (M-1) or when n == (M-1), in both cases,
* the value cannot be rounded off. Use all bits of the sample as
* index.
*
- * ** If a sample's MSB is greater than 23, it will be counted as 23.
+ * ** If a sample's MSB is greater than 33, it will be counted as 33.
*/
-#define FIO_IO_U_PLAT_BITS 6
-#define FIO_IO_U_PLAT_VAL (1 << FIO_IO_U_PLAT_BITS)
-#define FIO_IO_U_PLAT_GROUP_NR 29
-#define FIO_IO_U_PLAT_NR (FIO_IO_U_PLAT_GROUP_NR * FIO_IO_U_PLAT_VAL)
-#define FIO_IO_U_LIST_MAX_LEN 20 /* The size of the default and user-specified
- list of percentiles */
-
/*
* Trim cycle count measurements
*/
prev_ddir = None
for ddir in ddir_set:
+ if 'bins' in jsondata['jobs'][jobnum][ddir]['clat_ns']:
+ bins_loc = 'clat_ns'
+ elif 'bins' in jsondata['jobs'][jobnum][ddir]['lat_ns']:
+ bins_loc = 'lat_ns'
+ else:
+ raise RuntimeError("Latency bins not found. "
+ "Are you sure you are using json+ output?")
+
bins[ddir] = [[int(key), value] for key, value in
- jsondata['jobs'][jobnum][ddir]['clat_ns']
+ jsondata['jobs'][jobnum][ddir][bins_loc]
['bins'].iteritems()]
bins[ddir] = sorted(bins[ddir], key=lambda bin: bin[0])
outfile = stub + '_job' + str(jobnum) + ext
with open(outfile, 'w') as output:
- output.write("clat_nsec, ")
+ output.write("{0}ec, ".format(bins_loc))
ddir_list = list(ddir_set)
for ddir in ddir_list:
output.write("{0}_count, {0}_cumulative, {0}_percentile, ".