test_script:
- python.exe t/run-fio-tests.py --artifact-root test-artifacts --debug
+deploy:
+ - provider: GitHub
+ description: fio Windows installer
+ auth_token: # encrypted token from GitHub
+ secure: Tjj+xRQEV25P6dQgboUblTCKx/LtUOUav2bvzSCtwMhHMAxrrn2adod6nlTf0ItV
+ artifact: fio.msi # upload installer to release assets
+ draft: false
+ prerelease: false
+ on:
+ APPVEYOR_REPO_TAG: true # deploy on tag push only
+ DISTRO: cygwin
+
on_finish:
- 'bash.exe -lc "cd \"${APPVEYOR_BUILD_FOLDER}\" && [ -d test-artifacts ] && 7z a -t7z test-artifacts.7z test-artifacts -xr!foo.0.0 -xr!latency.?.0 -xr!fio_jsonplus_clat2csv.test && appveyor PushArtifact test-artifacts.7z'
#!/bin/sh
GVF=FIO-VERSION-FILE
-DEF_VER=fio-3.30
+DEF_VER=fio-3.31
LF='
'
effectively caps the file size at `real_size - offset`. Can be combined with
:option:`size` to constrain the start and end range of the I/O workload.
A percentage can be specified by a number between 1 and 100 followed by '%',
- for example, ``offset=20%`` to specify 20%. In ZBD mode, value can be set as
+ for example, ``offset=20%`` to specify 20%. In ZBD mode, value can be set as
number of zones using 'z'.
.. option:: offset_align=int
If this option is not specified, fio will use the full size of the given
files or devices. If the files do not exist, size must be given. It is also
possible to give size as a percentage between 1 and 100. If ``size=20%`` is
- given, fio will use 20% of the full size of the given files or devices.
+ given, fio will use 20% of the full size of the given files or devices.
In ZBD mode, value can also be set as number of zones using 'z'.
Can be combined with :option:`offset` to constrain the start and end range
that I/O will be done within.
The S3 key/access id.
+.. option:: http_s3_sse_customer_key=str : [http]
+
+ The encryption customer key in SSE server side.
+
+.. option:: http_s3_sse_customer_algorithm=str : [http]
+
+ The encryption customer algorithm in SSE server side.
+ Default is **AES256**
+
+.. option:: http_s3_storage_class=str : [http]
+
+ Which storage class to access. User-customizable settings.
+ Default is **STANDARD**
+
.. option:: http_swift_auth_token=str : [http]
The Swift auth token. See the example configuration file on how
Select the xnvme async command interface. This can take these values.
**emu**
- This is default and used to emulate asynchronous I/O.
+ This is default and use to emulate asynchronous I/O by using a
+ single thread to create a queue pair on top of a synchronous
+ I/O interface using the NVMe driver IOCTL.
**thrpool**
- Use thread pool for Asynchronous I/O.
+ Emulate an asynchronous I/O interface with a pool of userspace
+ threads on top of a synchronous I/O interface using the NVMe
+ driver IOCTL. By default four threads are used.
**io_uring**
- Use Linux io_uring/liburing for Asynchronous I/O.
+ Linux native asynchronous I/O interface which supports both
+ direct and buffered I/O.
+ **io_uring_cmd**
+ Fast Linux native asynchronous I/O interface for NVMe pass
+ through commands. This only works with NVMe character device
+ (/dev/ngXnY).
**libaio**
Use Linux aio for Asynchronous I/O.
**posix**
- Use POSIX aio for Asynchronous I/O.
+ Use the posix asynchronous I/O interface to perform one or
+ more I/O operations asynchronously.
**nil**
- Use nil-io; For introspective perf. evaluation
+ Do not transfer any data; just pretend to. This is mainly used
+ for introspective performance evaluation.
.. option:: xnvme_sync=str : [xnvme]
Select the xnvme synchronous command interface. This can take these values.
**nvme**
- This is default and uses Linux NVMe Driver ioctl() for synchronous I/O.
+ This is default and uses Linux NVMe Driver ioctl() for
+ synchronous I/O.
**psync**
- Use pread()/write() for synchronous I/O.
+ This supports regular as well as vectored pread() and pwrite()
+ commands.
+ **block**
+ This is the same as psync except that it also supports zone
+ management commands using Linux block layer IOCTLs.
.. option:: xnvme_admin=str : [xnvme]
Select the xnvme admin command interface. This can take these values.
**nvme**
- This is default and uses linux NVMe Driver ioctl() for admin commands.
+ This is default and uses linux NVMe Driver ioctl() for admin
+ commands.
**block**
Use Linux Block Layer ioctl() and sysfs for admin commands.
- **file_as_ns**
- Use file-stat to construct NVMe idfy responses.
.. option:: xnvme_dev_nsid=int : [xnvme]
- xnvme namespace identifier, for userspace NVMe driver.
+ xnvme namespace identifier for userspace NVMe driver, such as SPDK.
.. option:: xnvme_iovec=int : [xnvme]
appended, the total error count and the first error. The error field given
in the stats is the first error that was hit during the run.
+ Note: a write error from the device may go unnoticed by fio when using
+ buffered IO, as the write() (or similar) system call merely dirties the
+ kernel pages, unless :option:`sync` or :option:`direct` is used. Device IO
+ errors occur when the dirty data is actually written out to disk. If fully
+ sync writes aren't desirable, :option:`fsync` or :option:`fdatasync` can be
+ used as well. This is specific to writes, as reads are always synchronous.
+
The allowed values are:
**none**
pshared.c options.c \
smalloc.c filehash.c profile.c debug.c engines/cpu.c \
engines/mmap.c engines/sync.c engines/null.c engines/net.c \
- engines/ftruncate.c engines/filecreate.c engines/filestat.c engines/filedelete.c \
+ engines/ftruncate.c engines/fileoperations.c \
engines/exec.c \
server.c client.c iolog.c backend.c libfio.c flow.c cconv.c \
gettime-thread.c helpers.c json.c idletime.c td_error.c \
``pkgutil -i fio``.
Windows:
- Rebecca Cran <rebecca@bsdio.com> has fio packages for Windows at
- https://bsdio.com/fio/ . The latest builds for Windows can also
- be grabbed from https://ci.appveyor.com/project/axboe/fio by clicking
- the latest x86 or x64 build, then selecting the ARTIFACTS tab.
+ Beginning with fio 3.31 Windows installers are available on GitHub at
+ https://github.com/axboe/fio/releases. Rebecca Cran
+ <rebecca@bsdio.com> has fio packages for Windows at
+ https://bsdio.com/fio/ . The latest builds for Windows can also be
+ grabbed from https://ci.appveyor.com/project/axboe/fio by clicking the
+ latest x86 or x64 build and then selecting the Artifacts tab.
BSDs:
Packages for BSDs may be available from their binary package repositories.
for_each_td(td, i) {
print_status_init(td->thread_number - 1);
- if (!td->o.create_serialize) {
- /*
- * When operating on a single rile in parallel,
- * perform single-threaded early setup so that
- * when setup_files() does not run into issues
- * later.
- */
- if (!i && td->o.nr_files == 1) {
- if (setup_shared_file(td)) {
- exit_value++;
- if (td->error)
- log_err("fio: pid=%d, err=%d/%s\n",
- (int) td->pid, td->error, td->verror);
- td_set_runstate(td, TD_REAPED);
- todo--;
- }
- }
+ if (!td->o.create_serialize)
continue;
- }
if (fio_verify_load_state(td))
goto reap;
type "$1" >/dev/null 2>&1
}
+num() {
+ echo "$1" | grep -P -q "^[0-9]+$"
+}
+
check_define() {
cat > $TMPC <<EOF
#if !defined($1)
xnvme=""
libzbc=""
dfs=""
+seed_buckets=""
dynamic_engines="no"
prefix=/usr/local
;;
--enable-asan) asan="yes"
;;
+ --seed-buckets=*) seed_buckets="$optarg"
+ ;;
--help)
show_help="yes"
;;
echo "--dynamic-libengines Lib-based ioengines as dynamic libraries"
echo "--disable-dfs Disable DAOS File System support even if found"
echo "--enable-asan Enable address sanitizer"
+ echo "--seed-buckets= Number of seed buckets for the refill-buffer"
exit $exit_val
fi
fi
fi
print_config "TCMalloc support" "$tcmalloc"
+if ! num "$seed_buckets"; then
+ seed_buckets=4
+elif test "$seed_buckets" -lt 2; then
+ seed_buckets=2
+elif test "$seed_buckets" -gt 16; then
+ seed_buckets=16
+fi
+echo "#define CONFIG_SEED_BUCKETS $seed_buckets" >> $config_host_h
+print_config "seed_buckets" "$seed_buckets"
echo "LIBS+=$LIBS" >> $config_host_mak
echo "GFIO_LIBS+=$GFIO_LIBS" >> $config_host_mak
+++ /dev/null
-/*
- * filecreate engine
- *
- * IO engine that doesn't do any IO, just creates files and tracks the latency
- * of the file creation.
- */
-#include <stdio.h>
-#include <fcntl.h>
-#include <errno.h>
-
-#include "../fio.h"
-
-struct fc_data {
- enum fio_ddir stat_ddir;
-};
-
-static int open_file(struct thread_data *td, struct fio_file *f)
-{
- struct timespec start;
- int do_lat = !td->o.disable_lat;
-
- dprint(FD_FILE, "fd open %s\n", f->file_name);
-
- if (f->filetype != FIO_TYPE_FILE) {
- log_err("fio: only files are supported\n");
- return 1;
- }
- if (!strcmp(f->file_name, "-")) {
- log_err("fio: can't read/write to stdin/out\n");
- return 1;
- }
-
- if (do_lat)
- fio_gettime(&start, NULL);
-
- f->fd = open(f->file_name, O_CREAT|O_RDWR, 0600);
-
- if (f->fd == -1) {
- char buf[FIO_VERROR_SIZE];
- int e = errno;
-
- snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
- td_verror(td, e, buf);
- return 1;
- }
-
- if (do_lat) {
- struct fc_data *data = td->io_ops_data;
- uint64_t nsec;
-
- nsec = ntime_since_now(&start);
- add_clat_sample(td, data->stat_ddir, nsec, 0, 0, 0, 0);
- }
-
- return 0;
-}
-
-static enum fio_q_status queue_io(struct thread_data *td,
- struct io_u fio_unused *io_u)
-{
- return FIO_Q_COMPLETED;
-}
-
-/*
- * Ensure that we at least have a block size worth of IO to do for each
- * file. If the job file has td->o.size < nr_files * block_size, then
- * fio won't do anything.
- */
-static int get_file_size(struct thread_data *td, struct fio_file *f)
-{
- f->real_file_size = td_min_bs(td);
- return 0;
-}
-
-static int init(struct thread_data *td)
-{
- struct fc_data *data;
-
- data = calloc(1, sizeof(*data));
-
- if (td_read(td))
- data->stat_ddir = DDIR_READ;
- else if (td_write(td))
- data->stat_ddir = DDIR_WRITE;
-
- td->io_ops_data = data;
- return 0;
-}
-
-static void cleanup(struct thread_data *td)
-{
- struct fc_data *data = td->io_ops_data;
-
- free(data);
-}
-
-static struct ioengine_ops ioengine = {
- .name = "filecreate",
- .version = FIO_IOOPS_VERSION,
- .init = init,
- .cleanup = cleanup,
- .queue = queue_io,
- .get_file_size = get_file_size,
- .open_file = open_file,
- .close_file = generic_close_file,
- .flags = FIO_DISKLESSIO | FIO_SYNCIO | FIO_FAKEIO |
- FIO_NOSTATS | FIO_NOFILEHASH,
-};
-
-static void fio_init fio_filecreate_register(void)
-{
- register_ioengine(&ioengine);
-}
-
-static void fio_exit fio_filecreate_unregister(void)
-{
- unregister_ioengine(&ioengine);
-}
+++ /dev/null
-/*
- * file delete engine
- *
- * IO engine that doesn't do any IO, just delete files and track the latency
- * of the file deletion.
- */
-#include <stdio.h>
-#include <fcntl.h>
-#include <errno.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include "../fio.h"
-
-struct fc_data {
- enum fio_ddir stat_ddir;
-};
-
-static int delete_file(struct thread_data *td, struct fio_file *f)
-{
- struct timespec start;
- int do_lat = !td->o.disable_lat;
- int ret;
-
- dprint(FD_FILE, "fd delete %s\n", f->file_name);
-
- if (f->filetype != FIO_TYPE_FILE) {
- log_err("fio: only files are supported\n");
- return 1;
- }
- if (!strcmp(f->file_name, "-")) {
- log_err("fio: can't read/write to stdin/out\n");
- return 1;
- }
-
- if (do_lat)
- fio_gettime(&start, NULL);
-
- ret = unlink(f->file_name);
-
- if (ret == -1) {
- char buf[FIO_VERROR_SIZE];
- int e = errno;
-
- snprintf(buf, sizeof(buf), "delete(%s)", f->file_name);
- td_verror(td, e, buf);
- return 1;
- }
-
- if (do_lat) {
- struct fc_data *data = td->io_ops_data;
- uint64_t nsec;
-
- nsec = ntime_since_now(&start);
- add_clat_sample(td, data->stat_ddir, nsec, 0, 0, 0, 0);
- }
-
- return 0;
-}
-
-
-static enum fio_q_status queue_io(struct thread_data *td, struct io_u fio_unused *io_u)
-{
- return FIO_Q_COMPLETED;
-}
-
-static int init(struct thread_data *td)
-{
- struct fc_data *data;
-
- data = calloc(1, sizeof(*data));
-
- if (td_read(td))
- data->stat_ddir = DDIR_READ;
- else if (td_write(td))
- data->stat_ddir = DDIR_WRITE;
-
- td->io_ops_data = data;
- return 0;
-}
-
-static int delete_invalidate(struct thread_data *td, struct fio_file *f)
-{
- /* do nothing because file not opened */
- return 0;
-}
-
-static void cleanup(struct thread_data *td)
-{
- struct fc_data *data = td->io_ops_data;
-
- free(data);
-}
-
-static struct ioengine_ops ioengine = {
- .name = "filedelete",
- .version = FIO_IOOPS_VERSION,
- .init = init,
- .invalidate = delete_invalidate,
- .cleanup = cleanup,
- .queue = queue_io,
- .get_file_size = generic_get_file_size,
- .open_file = delete_file,
- .flags = FIO_SYNCIO | FIO_FAKEIO |
- FIO_NOSTATS | FIO_NOFILEHASH,
-};
-
-static void fio_init fio_filedelete_register(void)
-{
- register_ioengine(&ioengine);
-}
-
-static void fio_exit fio_filedelete_unregister(void)
-{
- unregister_ioengine(&ioengine);
-}
--- /dev/null
+/*
+ * fileoperations engine
+ *
+ * IO engine that doesn't do any IO, just operates files and tracks the latency
+ * of the file operation.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include "../fio.h"
+#include "../optgroup.h"
+#include "../oslib/statx.h"
+
+
+struct fc_data {
+ enum fio_ddir stat_ddir;
+};
+
+struct filestat_options {
+ void *pad;
+ unsigned int stat_type;
+};
+
+enum {
+ FIO_FILESTAT_STAT = 1,
+ FIO_FILESTAT_LSTAT = 2,
+ FIO_FILESTAT_STATX = 3,
+};
+
+static struct fio_option options[] = {
+ {
+ .name = "stat_type",
+ .lname = "stat_type",
+ .type = FIO_OPT_STR,
+ .off1 = offsetof(struct filestat_options, stat_type),
+ .help = "Specify stat system call type to measure lookup/getattr performance",
+ .def = "stat",
+ .posval = {
+ { .ival = "stat",
+ .oval = FIO_FILESTAT_STAT,
+ .help = "Use stat(2)",
+ },
+ { .ival = "lstat",
+ .oval = FIO_FILESTAT_LSTAT,
+ .help = "Use lstat(2)",
+ },
+ { .ival = "statx",
+ .oval = FIO_FILESTAT_STATX,
+ .help = "Use statx(2) if exists",
+ },
+ },
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_FILESTAT,
+ },
+ {
+ .name = NULL,
+ },
+};
+
+
+static int open_file(struct thread_data *td, struct fio_file *f)
+{
+ struct timespec start;
+ int do_lat = !td->o.disable_lat;
+
+ dprint(FD_FILE, "fd open %s\n", f->file_name);
+
+ if (f->filetype != FIO_TYPE_FILE) {
+ log_err("fio: only files are supported\n");
+ return 1;
+ }
+ if (!strcmp(f->file_name, "-")) {
+ log_err("fio: can't read/write to stdin/out\n");
+ return 1;
+ }
+
+ if (do_lat)
+ fio_gettime(&start, NULL);
+
+ f->fd = open(f->file_name, O_CREAT|O_RDWR, 0600);
+
+ if (f->fd == -1) {
+ char buf[FIO_VERROR_SIZE];
+ int e = errno;
+
+ snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
+ td_verror(td, e, buf);
+ return 1;
+ }
+
+ if (do_lat) {
+ struct fc_data *data = td->io_ops_data;
+ uint64_t nsec;
+
+ nsec = ntime_since_now(&start);
+ add_clat_sample(td, data->stat_ddir, nsec, 0, 0, 0, 0);
+ }
+
+ return 0;
+}
+
+static int stat_file(struct thread_data *td, struct fio_file *f)
+{
+ struct filestat_options *o = td->eo;
+ struct timespec start;
+ int do_lat = !td->o.disable_lat;
+ struct stat statbuf;
+#ifndef WIN32
+ struct statx statxbuf;
+ char *abspath;
+#endif
+ int ret;
+
+ dprint(FD_FILE, "fd stat %s\n", f->file_name);
+
+ if (f->filetype != FIO_TYPE_FILE) {
+ log_err("fio: only files are supported\n");
+ return 1;
+ }
+ if (!strcmp(f->file_name, "-")) {
+ log_err("fio: can't read/write to stdin/out\n");
+ return 1;
+ }
+
+ if (do_lat)
+ fio_gettime(&start, NULL);
+
+ switch (o->stat_type) {
+ case FIO_FILESTAT_STAT:
+ ret = stat(f->file_name, &statbuf);
+ break;
+ case FIO_FILESTAT_LSTAT:
+ ret = lstat(f->file_name, &statbuf);
+ break;
+ case FIO_FILESTAT_STATX:
+#ifndef WIN32
+ abspath = realpath(f->file_name, NULL);
+ if (abspath) {
+ ret = statx(-1, abspath, 0, STATX_ALL, &statxbuf);
+ free(abspath);
+ } else
+ ret = -1;
+#else
+ ret = -1;
+#endif
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+
+ if (ret == -1) {
+ char buf[FIO_VERROR_SIZE];
+ int e = errno;
+
+ snprintf(buf, sizeof(buf), "stat(%s) type=%u", f->file_name,
+ o->stat_type);
+ td_verror(td, e, buf);
+ return 1;
+ }
+
+ if (do_lat) {
+ struct fc_data *data = td->io_ops_data;
+ uint64_t nsec;
+
+ nsec = ntime_since_now(&start);
+ add_clat_sample(td, data->stat_ddir, nsec, 0, 0, 0, 0);
+ }
+
+ return 0;
+}
+
+
+static int delete_file(struct thread_data *td, struct fio_file *f)
+{
+ struct timespec start;
+ int do_lat = !td->o.disable_lat;
+ int ret;
+
+ dprint(FD_FILE, "fd delete %s\n", f->file_name);
+
+ if (f->filetype != FIO_TYPE_FILE) {
+ log_err("fio: only files are supported\n");
+ return 1;
+ }
+ if (!strcmp(f->file_name, "-")) {
+ log_err("fio: can't read/write to stdin/out\n");
+ return 1;
+ }
+
+ if (do_lat)
+ fio_gettime(&start, NULL);
+
+ ret = unlink(f->file_name);
+
+ if (ret == -1) {
+ char buf[FIO_VERROR_SIZE];
+ int e = errno;
+
+ snprintf(buf, sizeof(buf), "delete(%s)", f->file_name);
+ td_verror(td, e, buf);
+ return 1;
+ }
+
+ if (do_lat) {
+ struct fc_data *data = td->io_ops_data;
+ uint64_t nsec;
+
+ nsec = ntime_since_now(&start);
+ add_clat_sample(td, data->stat_ddir, nsec, 0, 0, 0, 0);
+ }
+
+ return 0;
+}
+
+static int invalidate_do_nothing(struct thread_data *td, struct fio_file *f)
+{
+ /* do nothing because file not opened */
+ return 0;
+}
+
+static enum fio_q_status queue_io(struct thread_data *td, struct io_u *io_u)
+{
+ return FIO_Q_COMPLETED;
+}
+
+/*
+ * Ensure that we at least have a block size worth of IO to do for each
+ * file. If the job file has td->o.size < nr_files * block_size, then
+ * fio won't do anything.
+ */
+static int get_file_size(struct thread_data *td, struct fio_file *f)
+{
+ f->real_file_size = td_min_bs(td);
+ return 0;
+}
+
+static int init(struct thread_data *td)
+{
+ struct fc_data *data;
+
+ data = calloc(1, sizeof(*data));
+
+ if (td_read(td))
+ data->stat_ddir = DDIR_READ;
+ else if (td_write(td))
+ data->stat_ddir = DDIR_WRITE;
+
+ td->io_ops_data = data;
+ return 0;
+}
+
+static void cleanup(struct thread_data *td)
+{
+ struct fc_data *data = td->io_ops_data;
+
+ free(data);
+}
+
+static struct ioengine_ops ioengine_filecreate = {
+ .name = "filecreate",
+ .version = FIO_IOOPS_VERSION,
+ .init = init,
+ .cleanup = cleanup,
+ .queue = queue_io,
+ .get_file_size = get_file_size,
+ .open_file = open_file,
+ .close_file = generic_close_file,
+ .flags = FIO_DISKLESSIO | FIO_SYNCIO | FIO_FAKEIO |
+ FIO_NOSTATS | FIO_NOFILEHASH,
+};
+
+static struct ioengine_ops ioengine_filestat = {
+ .name = "filestat",
+ .version = FIO_IOOPS_VERSION,
+ .init = init,
+ .cleanup = cleanup,
+ .queue = queue_io,
+ .invalidate = invalidate_do_nothing,
+ .get_file_size = generic_get_file_size,
+ .open_file = stat_file,
+ .flags = FIO_SYNCIO | FIO_FAKEIO |
+ FIO_NOSTATS | FIO_NOFILEHASH,
+ .options = options,
+ .option_struct_size = sizeof(struct filestat_options),
+};
+
+static struct ioengine_ops ioengine_filedelete = {
+ .name = "filedelete",
+ .version = FIO_IOOPS_VERSION,
+ .init = init,
+ .invalidate = invalidate_do_nothing,
+ .cleanup = cleanup,
+ .queue = queue_io,
+ .get_file_size = generic_get_file_size,
+ .open_file = delete_file,
+ .flags = FIO_SYNCIO | FIO_FAKEIO |
+ FIO_NOSTATS | FIO_NOFILEHASH,
+};
+
+
+static void fio_init fio_fileoperations_register(void)
+{
+ register_ioengine(&ioengine_filecreate);
+ register_ioengine(&ioengine_filestat);
+ register_ioengine(&ioengine_filedelete);
+}
+
+static void fio_exit fio_fileoperations_unregister(void)
+{
+ unregister_ioengine(&ioengine_filecreate);
+ unregister_ioengine(&ioengine_filestat);
+ unregister_ioengine(&ioengine_filedelete);
+}
+++ /dev/null
-/*
- * filestat engine
- *
- * IO engine that doesn't do any IO, just stat files and tracks the latency
- * of the file stat.
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <fcntl.h>
-#include <errno.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include "../fio.h"
-#include "../optgroup.h"
-#include "../oslib/statx.h"
-
-struct fc_data {
- enum fio_ddir stat_ddir;
-};
-
-struct filestat_options {
- void *pad;
- unsigned int stat_type;
-};
-
-enum {
- FIO_FILESTAT_STAT = 1,
- FIO_FILESTAT_LSTAT = 2,
- FIO_FILESTAT_STATX = 3,
-};
-
-static struct fio_option options[] = {
- {
- .name = "stat_type",
- .lname = "stat_type",
- .type = FIO_OPT_STR,
- .off1 = offsetof(struct filestat_options, stat_type),
- .help = "Specify stat system call type to measure lookup/getattr performance",
- .def = "stat",
- .posval = {
- { .ival = "stat",
- .oval = FIO_FILESTAT_STAT,
- .help = "Use stat(2)",
- },
- { .ival = "lstat",
- .oval = FIO_FILESTAT_LSTAT,
- .help = "Use lstat(2)",
- },
- { .ival = "statx",
- .oval = FIO_FILESTAT_STATX,
- .help = "Use statx(2) if exists",
- },
- },
- .category = FIO_OPT_C_ENGINE,
- .group = FIO_OPT_G_FILESTAT,
- },
- {
- .name = NULL,
- },
-};
-
-static int stat_file(struct thread_data *td, struct fio_file *f)
-{
- struct filestat_options *o = td->eo;
- struct timespec start;
- int do_lat = !td->o.disable_lat;
- struct stat statbuf;
-#ifndef WIN32
- struct statx statxbuf;
- char *abspath;
-#endif
- int ret;
-
- dprint(FD_FILE, "fd stat %s\n", f->file_name);
-
- if (f->filetype != FIO_TYPE_FILE) {
- log_err("fio: only files are supported\n");
- return 1;
- }
- if (!strcmp(f->file_name, "-")) {
- log_err("fio: can't read/write to stdin/out\n");
- return 1;
- }
-
- if (do_lat)
- fio_gettime(&start, NULL);
-
- switch (o->stat_type){
- case FIO_FILESTAT_STAT:
- ret = stat(f->file_name, &statbuf);
- break;
- case FIO_FILESTAT_LSTAT:
- ret = lstat(f->file_name, &statbuf);
- break;
- case FIO_FILESTAT_STATX:
-#ifndef WIN32
- abspath = realpath(f->file_name, NULL);
- if (abspath) {
- ret = statx(-1, abspath, 0, STATX_ALL, &statxbuf);
- free(abspath);
- } else
- ret = -1;
-#else
- ret = -1;
-#endif
- break;
- default:
- ret = -1;
- break;
- }
-
- if (ret == -1) {
- char buf[FIO_VERROR_SIZE];
- int e = errno;
-
- snprintf(buf, sizeof(buf), "stat(%s) type=%u", f->file_name,
- o->stat_type);
- td_verror(td, e, buf);
- return 1;
- }
-
- if (do_lat) {
- struct fc_data *data = td->io_ops_data;
- uint64_t nsec;
-
- nsec = ntime_since_now(&start);
- add_clat_sample(td, data->stat_ddir, nsec, 0, 0, 0, 0);
- }
-
- return 0;
-}
-
-static enum fio_q_status queue_io(struct thread_data *td, struct io_u fio_unused *io_u)
-{
- return FIO_Q_COMPLETED;
-}
-
-static int init(struct thread_data *td)
-{
- struct fc_data *data;
-
- data = calloc(1, sizeof(*data));
-
- if (td_read(td))
- data->stat_ddir = DDIR_READ;
- else if (td_write(td))
- data->stat_ddir = DDIR_WRITE;
-
- td->io_ops_data = data;
- return 0;
-}
-
-static void cleanup(struct thread_data *td)
-{
- struct fc_data *data = td->io_ops_data;
-
- free(data);
-}
-
-static int stat_invalidate(struct thread_data *td, struct fio_file *f)
-{
- /* do nothing because file not opened */
- return 0;
-}
-
-static struct ioengine_ops ioengine = {
- .name = "filestat",
- .version = FIO_IOOPS_VERSION,
- .init = init,
- .cleanup = cleanup,
- .queue = queue_io,
- .invalidate = stat_invalidate,
- .get_file_size = generic_get_file_size,
- .open_file = stat_file,
- .flags = FIO_SYNCIO | FIO_FAKEIO |
- FIO_NOSTATS | FIO_NOFILEHASH,
- .options = options,
- .option_struct_size = sizeof(struct filestat_options),
-};
-
-static void fio_init fio_filestat_register(void)
-{
- register_ioengine(&ioengine);
-}
-
-static void fio_exit fio_filestat_unregister(void)
-{
- unregister_ioengine(&ioengine);
-}
char *s3_key;
char *s3_keyid;
char *s3_region;
+ char *s3_sse_customer_key;
+ char *s3_sse_customer_algorithm;
+ char *s3_storage_class;
char *swift_auth_token;
int verbose;
unsigned int mode;
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_HTTP,
},
+ {
+ .name = "http_s3_sse_customer_key",
+ .lname = "SSE Customer Key",
+ .type = FIO_OPT_STR_STORE,
+ .help = "S3 SSE Customer Key",
+ .off1 = offsetof(struct http_options, s3_sse_customer_key),
+ .def = "",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_HTTP,
+ },
+ {
+ .name = "http_s3_sse_customer_algorithm",
+ .lname = "SSE Customer Algorithm",
+ .type = FIO_OPT_STR_STORE,
+ .help = "S3 SSE Customer Algorithm",
+ .off1 = offsetof(struct http_options, s3_sse_customer_algorithm),
+ .def = "AES256",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_HTTP,
+ },
+ {
+ .name = "http_s3_storage_class",
+ .lname = "S3 Storage class",
+ .type = FIO_OPT_STR_STORE,
+ .help = "S3 Storage Class",
+ .off1 = offsetof(struct http_options, s3_storage_class),
+ .def = "STANDARD",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_HTTP,
+ },
{
.name = "http_mode",
.lname = "Request mode to use",
return _conv_hex(hash, MD5_DIGEST_LENGTH);
}
+static char *_conv_base64_encode(const unsigned char *p, size_t len)
+{
+ char *r, *ret;
+ int i;
+ static const char sEncodingTable[] = {
+ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
+ 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
+ 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
+ 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f',
+ 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
+ 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
+ 'w', 'x', 'y', 'z', '0', '1', '2', '3',
+ '4', '5', '6', '7', '8', '9', '+', '/'
+ };
+
+ size_t out_len = 4 * ((len + 2) / 3);
+ ret = r = malloc(out_len + 1);
+
+ for (i = 0; i < len - 2; i += 3) {
+ *r++ = sEncodingTable[(p[i] >> 2) & 0x3F];
+ *r++ = sEncodingTable[((p[i] & 0x3) << 4) | ((int) (p[i + 1] & 0xF0) >> 4)];
+ *r++ = sEncodingTable[((p[i + 1] & 0xF) << 2) | ((int) (p[i + 2] & 0xC0) >> 6)];
+ *r++ = sEncodingTable[p[i + 2] & 0x3F];
+ }
+
+ if (i < len) {
+ *r++ = sEncodingTable[(p[i] >> 2) & 0x3F];
+ if (i == (len - 1)) {
+ *r++ = sEncodingTable[((p[i] & 0x3) << 4)];
+ *r++ = '=';
+ } else {
+ *r++ = sEncodingTable[((p[i] & 0x3) << 4) | ((int) (p[i + 1] & 0xF0) >> 4)];
+ *r++ = sEncodingTable[((p[i + 1] & 0xF) << 2)];
+ }
+ *r++ = '=';
+ }
+
+ ret[out_len]=0;
+ return ret;
+}
+
+static char *_gen_base64_md5(const unsigned char *p, size_t len)
+{
+ unsigned char hash[MD5_DIGEST_LENGTH];
+ MD5((unsigned char*)p, len, hash);
+ return _conv_base64_encode(hash, MD5_DIGEST_LENGTH);
+}
+
static void _hmac(unsigned char *md, void *key, int key_len, char *data) {
#ifndef CONFIG_HAVE_OPAQUE_HMAC_CTX
HMAC_CTX _ctx;
char date_iso[32];
char method[8];
char dkey[128];
- char creq[512];
- char sts[256];
+ char creq[4096];
+ char sts[512];
char s[512];
char *uri_encoded = NULL;
char *dsha = NULL;
const char *service = "s3";
const char *aws = "aws4_request";
unsigned char md[SHA256_DIGEST_LENGTH];
+ unsigned char sse_key[33] = {0};
+ char *sse_key_base64 = NULL;
+ char *sse_key_md5_base64 = NULL;
time_t t = time(NULL);
struct tm *gtm = gmtime(&t);
strftime (date_iso, sizeof(date_iso), "%Y%m%dT%H%M%SZ", gtm);
uri_encoded = _aws_uriencode(uri);
+ if (o->s3_sse_customer_key != NULL)
+ strncpy((char*)sse_key, o->s3_sse_customer_key, sizeof(sse_key) - 1);
+
if (op == DDIR_WRITE) {
dsha = _gen_hex_sha256(buf, len);
sprintf(method, "PUT");
}
/* Create the canonical request first */
- snprintf(creq, sizeof(creq),
- "%s\n"
- "%s\n"
- "\n"
- "host:%s\n"
- "x-amz-content-sha256:%s\n"
- "x-amz-date:%s\n"
- "\n"
- "host;x-amz-content-sha256;x-amz-date\n"
- "%s"
- , method
- , uri_encoded, o->host, dsha, date_iso, dsha);
+ if (sse_key[0] != '\0') {
+ sse_key_base64 = _conv_base64_encode(sse_key, sizeof(sse_key) - 1);
+ sse_key_md5_base64 = _gen_base64_md5(sse_key, sizeof(sse_key) - 1);
+ snprintf(creq, sizeof(creq),
+ "%s\n"
+ "%s\n"
+ "\n"
+ "host:%s\n"
+ "x-amz-content-sha256:%s\n"
+ "x-amz-date:%s\n"
+ "x-amz-server-side-encryption-customer-algorithm:%s\n"
+ "x-amz-server-side-encryption-customer-key:%s\n"
+ "x-amz-server-side-encryption-customer-key-md5:%s\n"
+ "x-amz-storage-class:%s\n"
+ "\n"
+ "host;x-amz-content-sha256;x-amz-date;"
+ "x-amz-server-side-encryption-customer-algorithm;"
+ "x-amz-server-side-encryption-customer-key;"
+ "x-amz-server-side-encryption-customer-key-md5;"
+ "x-amz-storage-class\n"
+ "%s"
+ , method
+ , uri_encoded, o->host, dsha, date_iso
+ , o->s3_sse_customer_algorithm, sse_key_base64
+ , sse_key_md5_base64, o->s3_storage_class, dsha);
+ } else {
+ snprintf(creq, sizeof(creq),
+ "%s\n"
+ "%s\n"
+ "\n"
+ "host:%s\n"
+ "x-amz-content-sha256:%s\n"
+ "x-amz-date:%s\n"
+ "x-amz-storage-class:%s\n"
+ "\n"
+ "host;x-amz-content-sha256;x-amz-date;x-amz-storage-class\n"
+ "%s"
+ , method
+ , uri_encoded, o->host, dsha, date_iso, o->s3_storage_class, dsha);
+ }
csha = _gen_hex_sha256(creq, strlen(creq));
snprintf(sts, sizeof(sts), "AWS4-HMAC-SHA256\n%s\n%s/%s/%s/%s\n%s",
- date_iso, date_short, o->s3_region, service, aws, csha);
+ date_iso, date_short, o->s3_region, service, aws, csha);
snprintf((char *)dkey, sizeof(dkey), "AWS4%s", o->s3_key);
_hmac(md, dkey, strlen(dkey), date_short);
snprintf(s, sizeof(s), "x-amz-date: %s", date_iso);
slist = curl_slist_append(slist, s);
- snprintf(s, sizeof(s), "Authorization: AWS4-HMAC-SHA256 Credential=%s/%s/%s/s3/aws4_request,"
- "SignedHeaders=host;x-amz-content-sha256;x-amz-date,Signature=%s",
- o->s3_keyid, date_short, o->s3_region, signature);
+ if (sse_key[0] != '\0') {
+ snprintf(s, sizeof(s), "x-amz-server-side-encryption-customer-algorithm: %s", o->s3_sse_customer_algorithm);
+ slist = curl_slist_append(slist, s);
+ snprintf(s, sizeof(s), "x-amz-server-side-encryption-customer-key: %s", sse_key_base64);
+ slist = curl_slist_append(slist, s);
+ snprintf(s, sizeof(s), "x-amz-server-side-encryption-customer-key-md5: %s", sse_key_md5_base64);
+ slist = curl_slist_append(slist, s);
+ }
+
+ snprintf(s, sizeof(s), "x-amz-storage-class: %s", o->s3_storage_class);
+ slist = curl_slist_append(slist, s);
+
+ if (sse_key[0] != '\0') {
+ snprintf(s, sizeof(s), "Authorization: AWS4-HMAC-SHA256 Credential=%s/%s/%s/s3/aws4_request,"
+ "SignedHeaders=host;x-amz-content-sha256;"
+ "x-amz-date;x-amz-server-side-encryption-customer-algorithm;"
+ "x-amz-server-side-encryption-customer-key;"
+ "x-amz-server-side-encryption-customer-key-md5;"
+ "x-amz-storage-class,"
+ "Signature=%s",
+ o->s3_keyid, date_short, o->s3_region, signature);
+ } else {
+ snprintf(s, sizeof(s), "Authorization: AWS4-HMAC-SHA256 Credential=%s/%s/%s/s3/aws4_request,"
+ "SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-storage-class,Signature=%s",
+ o->s3_keyid, date_short, o->s3_region, signature);
+ }
slist = curl_slist_append(slist, s);
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, slist);
free(csha);
free(dsha);
free(signature);
+ if (sse_key_base64 != NULL) {
+ free(sse_key_base64);
+ free(sse_key_md5_base64);
+ }
}
static void _add_swift_header(CURL *curl, struct curl_slist *slist, struct http_options *o,
if (td->o.iodepth != 1) {
nd->io_us = (struct io_u **) malloc(td->o.iodepth * sizeof(struct io_u *));
memset(nd->io_us, 0, td->o.iodepth * sizeof(struct io_u *));
+ td->io_ops->flags |= FIO_ASYNCIO_SETS_ISSUE_TIME;
} else
td->io_ops->flags |= FIO_SYNCIO;
+ td_set_ioengine_flags(td);
return nd;
}
static void xnvme_fioe_cleanup(struct thread_data *td)
{
- struct xnvme_fioe_data *xd = td->io_ops_data;
+ struct xnvme_fioe_data *xd = NULL;
int err;
+ if (!td->io_ops_data)
+ return;
+
+ xd = td->io_ops_data;
+
err = pthread_mutex_lock(&g_serialize);
if (err)
log_err("ioeng->cleanup(): pthread_mutex_lock(), err(%d)\n", err);
/* NOTE: using the first device for buffer-allocators) */
static void xnvme_fioe_iomem_free(struct thread_data *td)
{
- struct xnvme_fioe_data *xd = td->io_ops_data;
- struct xnvme_fioe_fwrap *fwrap = &xd->files[0];
+ struct xnvme_fioe_data *xd = NULL;
+ struct xnvme_fioe_fwrap *fwrap = NULL;
+
+ if (!td->io_ops_data)
+ return;
+
+ xd = td->io_ops_data;
+ fwrap = &xd->files[0];
if (!fwrap->dev) {
log_err("ioeng->iomem_free(): failed no dev-handle\n");
--- /dev/null
+# Example test for the HTTP engine's S3 support against Amazon AWS.
+# Obviously, you have to adjust the S3 credentials; for this example,
+# they're passed in via the environment.
+# And you can set the SSE Customer Key and Algorithm to test Server
+# Side Encryption.
+#
+
+[global]
+ioengine=http
+name=test
+direct=1
+filename=/larsmb-fio-test/object
+http_verbose=0
+https=on
+http_mode=s3
+http_s3_key=${S3_KEY}
+http_s3_keyid=${S3_ID}
+http_host=s3.eu-central-1.amazonaws.com
+http_s3_region=eu-central-1
+http_s3_sse_customer_key=${SSE_KEY}
+http_s3_sse_customer_algorithm=AES256
+group_reporting
+
+# With verify, this both writes and reads the object
+[create]
+rw=write
+bs=4k
+size=64k
+io_size=4k
+verify=sha256
+
+[trim]
+stonewall
+rw=trim
+bs=4k
+size=64k
+io_size=4k
+
--- /dev/null
+# Example test for the HTTP engine's S3 support against Amazon AWS.
+# Obviously, you have to adjust the S3 credentials; for this example,
+# they're passed in via the environment.
+# And here add storage class parameter, you can set normal test for
+# STANDARD and compression test for another storage class.
+#
+
+[global]
+ioengine=http
+name=test
+direct=1
+filename=/larsmb-fio-test/object
+http_verbose=0
+https=on
+http_mode=s3
+http_s3_key=${S3_KEY}
+http_s3_keyid=${S3_ID}
+http_host=s3.eu-central-1.amazonaws.com
+http_s3_region=eu-central-1
+http_s3_storage_class=${STORAGE_CLASS}
+group_reporting
+
+# With verify, this both writes and reads the object
+[create]
+rw=write
+bs=4k
+size=64k
+io_size=4k
+verify=sha256
+
+[trim]
+stonewall
+rw=trim
+bs=4k
+size=64k
+io_size=4k
+
extern void close_files(struct thread_data *);
extern void close_and_free_files(struct thread_data *);
extern uint64_t get_start_offset(struct thread_data *, struct fio_file *);
-extern int __must_check setup_shared_file(struct thread_data *);
extern int __must_check setup_files(struct thread_data *);
extern int __must_check file_invalidate_cache(struct thread_data *, struct fio_file *);
#ifdef __cplusplus
if (unlink_file || new_layout) {
int ret;
- dprint(FD_FILE, "layout %d unlink %d %s\n", new_layout, unlink_file, f->file_name);
+ dprint(FD_FILE, "layout unlink %s\n", f->file_name);
ret = td_io_unlink_file(td, f);
if (ret != 0 && ret != ENOENT) {
}
}
-
- dprint(FD_FILE, "fill file %s, size %llu\n", f->file_name, (unsigned long long) f->real_file_size);
-
left = f->real_file_size;
bs = td->o.max_bs[DDIR_WRITE];
if (bs > left)
return true;
}
-int setup_shared_file(struct thread_data *td)
-{
- struct fio_file *f;
- uint64_t file_size;
- int err = 0;
-
- if (td->o.nr_files > 1) {
- log_err("fio: shared file setup called for multiple files\n");
- return -1;
- }
-
- get_file_sizes(td);
-
- f = td->files[0];
-
- if (f == NULL) {
- log_err("fio: NULL shared file\n");
- return -1;
- }
-
- file_size = thread_number * td->o.size;
- dprint(FD_FILE, "shared setup %s real_file_size=%llu, desired=%llu\n",
- f->file_name, (unsigned long long)f->real_file_size, (unsigned long long)file_size);
-
- if (f->real_file_size < file_size) {
- dprint(FD_FILE, "fio: extending shared file\n");
- f->real_file_size = file_size;
- err = extend_file(td, f);
- if (!err)
- err = __file_invalidate_cache(td, f, 0, f->real_file_size);
- get_file_sizes(td);
- dprint(FD_FILE, "shared setup new real_file_size=%llu\n",
- (unsigned long long)f->real_file_size);
- }
-
- return err;
-}
-
/*
* Open the files and setup files sizes, creating files if necessary.
*/
const unsigned long long bs = td_min_bs(td);
uint64_t fs = 0;
- dprint(FD_FILE, "setup files (thread_number=%d, subjob_number=%d)\n", td->thread_number, td->subjob_number);
+ dprint(FD_FILE, "setup files\n");
old_state = td_bump_runstate(td, TD_SETTING_UP);
.RS
.P
.PD 0
-z means Zone
+z means Zone
.P
.PD
.RE
effectively caps the file size at `real_size \- offset'. Can be combined with
\fBsize\fR to constrain the start and end range of the I/O workload.
A percentage can be specified by a number between 1 and 100 followed by '%',
-for example, `offset=20%' to specify 20%. In ZBD mode, value can be set as
+for example, `offset=20%' to specify 20%. In ZBD mode, value can be set as
number of zones using 'z'.
.TP
.BI offset_align \fR=\fPint
intended to operate on a file in parallel disjoint segments, with even
spacing between the starting points. Percentages can be used for this option.
If a percentage is given, the generated offset will be aligned to the minimum
-\fBblocksize\fR or to the value of \fBoffset_align\fR if provided.In ZBD mode, value
+\fBblocksize\fR or to the value of \fBoffset_align\fR if provided.In ZBD mode, value
can be set as number of zones using 'z'.
.TP
.BI number_ios \fR=\fPint
files or devices. If the files do not exist, size must be given. It is also
possible to give size as a percentage between 1 and 100. If `size=20%' is
given, fio will use 20% of the full size of the given files or devices. In ZBD mode,
-size can be given in units of number of zones using 'z'. Can be combined with \fBoffset\fR to
+size can be given in units of number of zones using 'z'. Can be combined with \fBoffset\fR to
constrain the start and end range that I/O will be done within.
.TP
.BI io_size \fR=\fPint[%|z] "\fR,\fB io_limit" \fR=\fPint[%|z]
.BI filesize \fR=\fPirange(int)
Individual file sizes. May be a range, in which case fio will select sizes
for files at random within the given range. If not given, each created file
-is the same size. This option overrides \fBsize\fR in terms of file size,
+is the same size. This option overrides \fBsize\fR in terms of file size,
i.e. \fBsize\fR becomes merely the default for \fBio_size\fR (and
has no effect it all if \fBio_size\fR is set explicitly).
.TP
.BI (http)http_s3_keyid \fR=\fPstr
The S3 key/access id.
.TP
+.BI (http)http_s3_sse_customer_key \fR=\fPstr
+The encryption customer key in SSE server side.
+.TP
+.BI (http)http_s3_sse_customer_algorithm \fR=\fPstr
+The encryption customer algorithm in SSE server side. Default is \fBAES256\fR
+.TP
+.BI (http)http_s3_storage_class \fR=\fPstr
+Which storage class to access. User-customizable settings. Default is \fBSTANDARD\fR
+.TP
.BI (http)http_swift_auth_token \fR=\fPstr
The Swift auth token. See the example configuration file on how to
retrieve this.
.RS
.TP
.B emu
-This is default and used to emulate asynchronous I/O
+This is default and use to emulate asynchronous I/O by using a single thread to
+create a queue pair on top of a synchronous I/O interface using the NVMe driver
+IOCTL.
.TP
.BI thrpool
-Use thread pool for Asynchronous I/O
+Emulate an asynchronous I/O interface with a pool of userspace threads on top
+of a synchronous I/O interface using the NVMe driver IOCTL. By default four
+threads are used.
.TP
.BI io_uring
-Use Linux io_uring/liburing for Asynchronous I/O
+Linux native asynchronous I/O interface which supports both direct and buffered
+I/O.
.TP
.BI libaio
Use Linux aio for Asynchronous I/O
.TP
.BI posix
-Use POSIX aio for Asynchronous I/O
+Use the posix asynchronous I/O interface to perform one or more I/O operations
+asynchronously.
.TP
.BI nil
-Use nil-io; For introspective perf. evaluation
+Do not transfer any data; just pretend to. This is mainly used for
+introspective performance evaluation.
.RE
.RE
.TP
.RS
.TP
.B nvme
-This is default and uses Linux NVMe Driver ioctl() for synchronous I/O
+This is default and uses Linux NVMe Driver ioctl() for synchronous I/O.
.TP
.BI psync
-Use pread()/write() for synchronous I/O
+This supports regular as well as vectored pread() and pwrite() commands.
+.TP
+.BI block
+This is the same as psync except that it also supports zone management
+commands using Linux block layer IOCTLs.
.RE
.RE
.TP
.RS
.TP
.B nvme
-This is default and uses Linux NVMe Driver ioctl() for admin commands
+This is default and uses Linux NVMe Driver ioctl() for admin commands.
.TP
.BI block
-Use Linux Block Layer ioctl() and sysfs for admin commands
-.TP
-.BI file_as_ns
-Use file-stat as to construct NVMe idfy responses
+Use Linux Block Layer ioctl() and sysfs for admin commands.
.RE
.RE
.TP
.BI (xnvme)xnvme_dev_nsid\fR=\fPint
-xnvme namespace identifier, for userspace NVMe driver.
+xnvme namespace identifier for userspace NVMe driver such as SPDK.
.TP
.BI (xnvme)xnvme_iovec
If this option is set, xnvme will use vectored read/write commands.
completed. If this option is used, there are two more stats that are
appended, the total error count and the first error. The error field given
in the stats is the first error that was hit during the run.
+.RS
+.P
+Note: a write error from the device may go unnoticed by fio when using buffered
+IO, as the write() (or similar) system call merely dirties the kernel pages,
+unless `sync' or `direct' is used. Device IO errors occur when the dirty data is
+actually written out to disk. If fully sync writes aren't desirable, `fsync' or
+`fdatasync' can be used as well. This is specific to writes, as reads are always
+synchronous.
+.RS
+.P
The allowed values are:
.RS
.RS
__init_rand64(&state->state64, seed);
}
-void __fill_random_buf(void *buf, unsigned int len, uint64_t seed)
+void __fill_random_buf_small(void *buf, unsigned int len, uint64_t seed)
{
uint64_t *b = buf;
uint64_t *e = b + len / sizeof(*b);
__builtin_memcpy(e, &seed, rest);
}
+void __fill_random_buf(void *buf, unsigned int len, uint64_t seed)
+{
+ static uint64_t prime[] = {1, 2, 3, 5, 7, 11, 13, 17,
+ 19, 23, 29, 31, 37, 41, 43, 47};
+ uint64_t *b, *e, s[CONFIG_SEED_BUCKETS];
+ unsigned int rest;
+ int p;
+
+ /*
+ * Calculate the max index which is multiples of the seed buckets.
+ */
+ rest = (len / sizeof(*b) / CONFIG_SEED_BUCKETS) * CONFIG_SEED_BUCKETS;
+
+ b = buf;
+ e = b + rest;
+
+ rest = len - (rest * sizeof(*b));
+
+ for (p = 0; p < CONFIG_SEED_BUCKETS; p++)
+ s[p] = seed * prime[p];
+
+ for (; b != e; b += CONFIG_SEED_BUCKETS) {
+ for (p = 0; p < CONFIG_SEED_BUCKETS; ++p) {
+ b[p] = s[p];
+ s[p] = __hash_u64(s[p]);
+ }
+ }
+
+ __fill_random_buf_small(b, rest, s[0]);
+}
+
uint64_t fill_random_buf(struct frand_state *fs, void *buf,
unsigned int len)
{
--- /dev/null
+# Expected result: mean(slat) + mean(clat) = mean(lat)
+# Buggy result: equality does not hold
+
+[test]
+ioengine=libaio
+size=1M
+iodepth=16
--- /dev/null
+# Expected result: mean(slat) + mean(clat) = mean(lat)
+# Buggy result: equality does not hold
+
+[test]
+ioengine=null
+size=1M
+iodepth=16
--- /dev/null
+# Expected result: mean(slat) + mean(clat) = mean(lat)
+# Buggy result: equality does not hold
+# This is similar to t0015 and t0016 except that is uses posixaio which is
+# available on more platforms and does not have a commit hook
+
+[test]
+ioengine=posixaio
+size=1M
+iodepth=16
return
+class FioJobTest_t0015(FioJobTest):
+ """Test consists of fio test jobs t0015 and t0016
+ Confirm that mean(slat) + mean(clat) = mean(tlat)"""
+
+ def check_result(self):
+ super(FioJobTest_t0015, self).check_result()
+
+ if not self.passed:
+ return
+
+ slat = self.json_data['jobs'][0]['read']['slat_ns']['mean']
+ clat = self.json_data['jobs'][0]['read']['clat_ns']['mean']
+ tlat = self.json_data['jobs'][0]['read']['lat_ns']['mean']
+ logging.debug('Test %d: slat %f, clat %f, tlat %f', self.testnum, slat, clat, tlat)
+
+ if abs(slat + clat - tlat) > 1:
+ self.failure_reason = "{0} slat {1} + clat {2} = {3} != tlat {4},".format(
+ self.failure_reason, slat, clat, slat+clat, tlat)
+ self.passed = False
+
+
class FioJobTest_iops_rate(FioJobTest):
"""Test consists of fio test job t0009
Confirm that job0 iops == 1000
'output_format': 'json',
'requirements': [],
},
+ {
+ 'test_id': 15,
+ 'test_class': FioJobTest_t0015,
+ 'job': 't0015-e78980ff.fio',
+ 'success': SUCCESS_DEFAULT,
+ 'pre_job': None,
+ 'pre_success': None,
+ 'output_format': 'json',
+ 'requirements': [Requirements.linux, Requirements.libaio],
+ },
+ {
+ 'test_id': 16,
+ 'test_class': FioJobTest_t0015,
+ 'job': 't0016-d54ae22.fio',
+ 'success': SUCCESS_DEFAULT,
+ 'pre_job': None,
+ 'pre_success': None,
+ 'output_format': 'json',
+ 'requirements': [],
+ },
+ {
+ 'test_id': 17,
+ 'test_class': FioJobTest_t0015,
+ 'job': 't0017.fio',
+ 'success': SUCCESS_DEFAULT,
+ 'pre_job': None,
+ 'pre_success': None,
+ 'output_format': 'json',
+ 'requirements': [Requirements.not_windows],
+ },
{
'test_id': 1000,
'test_class': FioExeTest,