#include <unistd.h>
#include <ctype.h>
#include <string.h>
-#include <getopt.h>
#include <assert.h>
#include <libgen.h>
#include <fcntl.h>
#include "lib/fls.h"
#include "options.h"
+#include "crc/crc32c.h"
+
/*
* Check if mmap/mmaphuge has a :/foo/bar/file at the end. If so, return that.
*/
struct thread_data *td = data;
char *nr = get_opt_postfix(str);
- td->o.ddir_nr = 1;
- if (nr) {
- td->o.ddir_nr = atoi(nr);
- free(nr);
+ td->o.ddir_seq_nr = 1;
+ td->o.ddir_seq_add = 0;
+
+ if (!nr)
+ return 0;
+
+ if (td_random(td))
+ td->o.ddir_seq_nr = atoi(nr);
+ else {
+ long long val;
+
+ if (str_to_decimal(nr, &val, 1, td)) {
+ log_err("fio: rw postfix parsing failed\n");
+ free(nr);
+ return 1;
+ }
+
+ td->o.ddir_seq_add = val;
}
+ free(nr);
return 0;
}
+#ifdef FIO_HAVE_LIBAIO
+static int str_libaio_cb(void *data, const char *str)
+{
+ struct thread_data *td = data;
+
+ if (!strcmp(str, "userspace_reap")) {
+ td->o.userspace_libaio_reap = 1;
+ return 0;
+ }
+
+ log_err("fio: bad libaio sub-option: %s\n", str);
+ return 1;
+}
+#endif
+
static int str_mem_cb(void *data, const char *mem)
{
struct thread_data *td = data;
return 0;
}
-static int str_lockmem_cb(void fio_unused *data, unsigned long *val)
+static int str_verify_cb(void *data, const char *mem)
+{
+ struct thread_data *td = data;
+
+ if (td->o.verify != VERIFY_CRC32C_INTEL)
+ return 0;
+
+ if (!crc32c_intel_works()) {
+ log_info("fio: System does not support hw accelerated crc32c. Falling back to sw crc32c.\n");
+ td->o.verify = VERIFY_CRC32C;
+ }
+
+ return 0;
+}
+
+static int fio_clock_source_cb(void *data, const char *str)
+{
+ struct thread_data *td = data;
+
+ fio_clock_source = td->o.clocksource;
+ fio_time_init();
+ return 0;
+}
+
+static int str_lockmem_cb(void fio_unused *data, unsigned long long *val)
{
mlock_size = *val;
return 0;
}
-static int str_rwmix_read_cb(void *data, unsigned int *val)
+static int str_rwmix_read_cb(void *data, unsigned long long *val)
{
struct thread_data *td = data;
return 0;
}
-static int str_rwmix_write_cb(void *data, unsigned int *val)
+static int str_rwmix_write_cb(void *data, unsigned long long *val)
{
struct thread_data *td = data;
}
#ifdef FIO_HAVE_IOPRIO
-static int str_prioclass_cb(void *data, unsigned int *val)
+static int str_prioclass_cb(void *data, unsigned long long *val)
{
struct thread_data *td = data;
unsigned short mask;
return 0;
}
-static int str_prio_cb(void *data, unsigned int *val)
+static int str_prio_cb(void *data, unsigned long long *val)
{
struct thread_data *td = data;
}
#ifdef FIO_HAVE_CPU_AFFINITY
-static int str_cpumask_cb(void *data, unsigned int *val)
+static int str_cpumask_cb(void *data, unsigned long long *val)
{
struct thread_data *td = data;
unsigned int i;
return 1;
}
- max_cpu = sysconf(_SC_NPROCESSORS_ONLN);
+ max_cpu = cpus_online();
for (i = 0; i < sizeof(int) * 8; i++) {
if ((1 << i) & *val) {
strip_blank_front(&str);
strip_blank_end(str);
- max_cpu = sysconf(_SC_NPROCESSORS_ONLN);
+ max_cpu = cpus_online();
while ((cpu = strsep(&str, ",")) != NULL) {
char *str2, *cpu2;
}
#endif
+#ifdef FIO_HAVE_TRIM
+static int str_verify_trim_cb(void *data, unsigned long long *val)
+{
+ struct thread_data *td = data;
+
+ td->o.trim_percentage = *val;
+ return 0;
+}
+#endif
+
static int str_fst_cb(void *data, const char *str)
{
struct thread_data *td = data;
static int check_dir(struct thread_data *td, char *fname)
{
+#if 0
char file[PATH_MAX], *dir;
int elen = 0;
sprintf(file + elen, "%s", fname);
dir = dirname(file);
-#if 0
{
struct stat sb;
/*
return add_dir_files(td, td->o.opendir);
}
-static int str_verify_offset_cb(void *data, unsigned int *off)
+static int str_verify_offset_cb(void *data, unsigned long long *off)
{
struct thread_data *td = data;
}
}
td->o.verify_pattern_bytes = i;
+ /*
+ * VERIFY_META could already be set
+ */
+ if (td->o.verify == VERIFY_NONE)
+ td->o.verify = VERIFY_PATTERN;
return 0;
}
struct thread_data *td = data;
int val = *il;
+ td->o.disable_lat = !!val;
td->o.disable_clat = !!val;
td->o.disable_slat = !!val;
td->o.disable_bw = !!val;
return 0;
}
-static int str_gtod_cpu_cb(void *data, int *il)
+static int str_gtod_cpu_cb(void *data, long long *il)
{
struct thread_data *td = data;
int val = *il;
return 0;
}
+static int str_size_cb(void *data, unsigned long long *__val)
+{
+ struct thread_data *td = data;
+ unsigned long long v = *__val;
+
+ if (parse_is_percent(v)) {
+ td->o.size = 0;
+ td->o.size_percent = -1ULL - v;
+ } else
+ td->o.size = v;
+
+ return 0;
+}
+
static int rw_verify(struct fio_option *o, void *data)
{
struct thread_data *td = data;
},
},
},
+ {
+ .name = "rw_sequencer",
+ .type = FIO_OPT_STR,
+ .off1 = td_var_offset(rw_seq),
+ .help = "IO offset generator modifier",
+ .def = "sequential",
+ .posval = {
+ { .ival = "sequential",
+ .oval = RW_SEQ_SEQ,
+ .help = "Generate sequential offsets",
+ },
+ { .ival = "identical",
+ .oval = RW_SEQ_IDENT,
+ .help = "Generate identical offsets",
+ },
+ },
+ },
+
{
.name = "ioengine",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(ioengine),
.help = "IO engine to use",
- .def = "sync",
+ .def = FIO_PREFERRED_ENGINE,
.posval = {
{ .ival = "sync",
.help = "Use read/write",
.help = "Use pread/pwrite",
},
{ .ival = "vsync",
- .help = "Use readv/writev",
+ .help = "Use readv/writev",
},
#ifdef FIO_HAVE_LIBAIO
{ .ival = "libaio",
.help = "Linux native asynchronous IO",
+ .cb = str_libaio_cb,
},
#endif
#ifdef FIO_HAVE_POSIXAIO
{ .ival = "solarisaio",
.help = "Solaris native asynchronous IO",
},
+#endif
+#ifdef FIO_HAVE_WINDOWSAIO
+ { .ival = "windowsaio",
+ .help = "Windows native asynchronous IO"
+ },
#endif
{ .ival = "mmap",
- .help = "Memory mapped IO",
+ .help = "Memory mapped IO"
},
#ifdef FIO_HAVE_SPLICE
{ .ival = "splice",
},
#endif
{ .ival = "cpuio",
- .help = "CPU cycler burner engine",
+ .help = "CPU cycle burner engine",
},
#ifdef FIO_HAVE_GUASI
{ .ival = "guasi",
.help = "GUASI IO engine",
},
+#endif
+#ifdef FIO_HAVE_BINJECT
+ { .ival = "binject",
+ .help = "binject direct inject block engine",
+ },
+#endif
+#ifdef FIO_HAVE_RDMA
+ { .ival = "rdma",
+ .help = "RDMA IO engine",
+ },
#endif
{ .ival = "external",
.help = "Load external engine (append name)",
.name = "iodepth",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iodepth),
- .help = "Amount of IO buffers to keep in flight",
+ .help = "Number of IO buffers to keep in flight",
.minval = 1,
.def = "1",
},
{
.name = "size",
.type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(size),
- .minval = 1,
+ .cb = str_size_cb,
.help = "Total size of device or files",
},
{
.name = "fill_device",
+ .alias = "fill_fs",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(fill_device),
.help = "Write until an ENOSPC error occurs",
.def = "1",
.parent = "rw",
},
+ {
+ .name = "use_os_rand",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(use_os_rand),
+ .help = "Set to use OS random generator",
+ .def = "0",
+ .parent = "rw",
+ },
{
.name = "norandommap",
.type = FIO_OPT_STR_SET,
},
{
.name = "nrfiles",
+ .alias = "nr_files",
.type = FIO_OPT_INT,
.off1 = td_var_offset(nr_files),
.help = "Split job workload between this number of files",
#ifdef FIO_HAVE_FALLOCATE
{
.name = "fallocate",
- .type = FIO_OPT_BOOL,
- .off1 = td_var_offset(fallocate),
- .help = "Use fallocate() when laying out files",
- .def = "1",
- },
+ .type = FIO_OPT_STR,
+ .off1 = td_var_offset(fallocate_mode),
+ .help = "Whether pre-allocation is performed when laying out files",
+ .def = "posix",
+ .posval = {
+ { .ival = "none",
+ .oval = FIO_FALLOCATE_NONE,
+ .help = "Do not pre-allocate space",
+ },
+ { .ival = "posix",
+ .oval = FIO_FALLOCATE_POSIX,
+ .help = "Use posix_fallocate()",
+ },
+#ifdef FIO_HAVE_LINUX_FALLOCATE
+ { .ival = "keep",
+ .oval = FIO_FALLOCATE_KEEP_SIZE,
+ .help = "Use fallocate(..., FALLOC_FL_KEEP_SIZE, ...)",
+ },
#endif
+ /* Compatibility with former boolean values */
+ { .ival = "0",
+ .oval = FIO_FALLOCATE_NONE,
+ .help = "Alias for 'none'",
+ },
+ { .ival = "1",
+ .oval = FIO_FALLOCATE_POSIX,
+ .help = "Alias for 'posix'",
+ },
+ },
+ },
+#endif /* FIO_HAVE_FALLOCATE */
{
.name = "fadvise_hint",
.type = FIO_OPT_BOOL,
.help = "Issue fdatasync for writes every given number of blocks",
.def = "0",
},
+ {
+ .name = "write_barrier",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(barrier_blocks),
+ .help = "Make every Nth write a barrier write",
+ .def = "0",
+ },
#ifdef FIO_HAVE_SYNC_FILE_RANGE
{
.name = "sync_file_range",
},
{
.name = "startdelay",
- .type = FIO_OPT_INT,
+ .type = FIO_OPT_STR_VAL_TIME,
.off1 = td_var_offset(start_delay),
.help = "Only start job when this period has passed",
.def = "0",
.off1 = td_var_offset(ramp_time),
.help = "Ramp up time before measuring performance",
},
+ {
+ .name = "clocksource",
+ .type = FIO_OPT_STR,
+ .cb = fio_clock_source_cb,
+ .off1 = td_var_offset(clocksource),
+ .help = "What type of timing source to use",
+ .posval = {
+ { .ival = "gettimeofday",
+ .oval = CS_GTOD,
+ .help = "Use gettimeofday(2) for timing",
+ },
+ { .ival = "clock_gettime",
+ .oval = CS_CGETTIME,
+ .help = "Use clock_gettime(2) for timing",
+ },
+#ifdef ARCH_HAVE_CPU_CLOCK
+ { .ival = "cpu",
+ .oval = CS_CPUCLOCK,
+ .help = "Use CPU private clock",
+ },
+#endif
+ },
+ },
{
.name = "mem",
.alias = "iomem",
.type = FIO_OPT_STR,
.off1 = td_var_offset(verify),
.help = "Verify data written",
+ .cb = str_verify_cb,
.def = "0",
.posval = {
{ .ival = "0",
.help = "Exit on a single verify failure, don't continue",
.parent = "verify",
},
+ {
+ .name = "verify_dump",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(verify_dump),
+ .def = "1",
+ .help = "Dump contents of good and bad blocks on failure",
+ .parent = "verify",
+ },
{
.name = "verify_async",
.type = FIO_OPT_INT,
.help = "Number of async verifier threads to use",
.parent = "verify",
},
+ {
+ .name = "verify_backlog",
+ .type = FIO_OPT_STR_VAL,
+ .off1 = td_var_offset(verify_backlog),
+ .help = "Verify after this number of blocks are written",
+ .parent = "verify",
+ },
+ {
+ .name = "verify_backlog_batch",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(verify_batch),
+ .help = "Verify this number of IO blocks",
+ .parent = "verify",
+ },
#ifdef FIO_HAVE_CPU_AFFINITY
{
.name = "verify_async_cpus",
.help = "Set CPUs allowed for async verify threads",
.parent = "verify_async",
},
+#endif
+#ifdef FIO_HAVE_TRIM
+ {
+ .name = "trim_percentage",
+ .type = FIO_OPT_INT,
+ .cb = str_verify_trim_cb,
+ .maxval = 100,
+ .help = "Number of verify blocks to discard/trim",
+ .parent = "verify",
+ .def = "0",
+ },
+ {
+ .name = "trim_verify_zero",
+ .type = FIO_OPT_INT,
+ .help = "Verify that trim/discarded blocks are returned as zeroes",
+ .off1 = td_var_offset(trim_zero),
+ .parent = "trim_percentage",
+ .def = "1",
+ },
+ {
+ .name = "trim_backlog",
+ .type = FIO_OPT_STR_VAL,
+ .off1 = td_var_offset(trim_backlog),
+ .help = "Trim after this number of blocks are written",
+ .parent = "trim_percentage",
+ },
+ {
+ .name = "trim_backlog_batch",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(trim_batch),
+ .help = "Trim this number of IO blocks",
+ .parent = "trim_percentage",
+ },
#endif
{
.name = "write_iolog",
.off1 = td_var_offset(read_iolog_file),
.help = "Playback IO pattern from file",
},
+ {
+ .name = "replay_no_stall",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(no_stall),
+ .def = "0",
+ .parent = "read_iolog",
+ .help = "Playback IO pattern file as fast as possible without stalls",
+ },
+ {
+ .name = "replay_redirect",
+ .type = FIO_OPT_STR_STORE,
+ .off1 = td_var_offset(replay_redirect),
+ .parent = "read_iolog",
+ .help = "Replay all I/O onto this device, regardless of trace device",
+ },
{
.name = "exec_prerun",
.type = FIO_OPT_STR_STORE,
.type = FIO_OPT_INT,
.off1 = td_var_offset(rate_iops_min[0]),
.off2 = td_var_offset(rate_iops_min[1]),
- .help = "Job must meet this rate or it will be shutdown",
+ .help = "Job must meet this rate or it will be shut down",
.parent = "rate_iops",
},
{
.name = "create_fsync",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(create_fsync),
- .help = "Fsync file after creation",
+ .help = "fsync file after creation",
.def = "1",
},
{
.name = "pre_read",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(pre_read),
- .help = "Preread files before starting official testing",
+ .help = "Pre-read files before starting official testing",
.def = "0",
},
{
},
{
.name = "stonewall",
+ .alias = "wait_for_previous",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(stonewall),
.help = "Insert a hard barrier between this job and previous",
.off1 = td_var_offset(refill_buffers),
.help = "Refill IO buffers on every IO submit",
},
+ {
+ .name = "clat_percentiles",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(clat_percentiles),
+ .help = "Enable the reporting of completion latency percentiles",
+ .def = "0",
+ },
+ {
+ .name = "percentile_list",
+ .type = FIO_OPT_FLOAT_LIST,
+ .off1 = td_var_offset(percentile_list),
+ .off2 = td_var_offset(overwrite_plist),
+ .help = "Specify a custom list of percentiles to report",
+ .maxlen = FIO_IO_U_LIST_MAX_LEN,
+ .minfp = 0.0,
+ .maxfp = 100.0,
+ },
+
#ifdef FIO_HAVE_DISK_UTIL
{
.name = "disk_util",
.cb = str_gtod_reduce_cb,
.def = "0",
},
+ {
+ .name = "disable_lat",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(disable_lat),
+ .help = "Disable latency numbers",
+ .parent = "gtod_reduce",
+ .def = "0",
+ },
{
.name = "disable_clat",
.type = FIO_OPT_BOOL,
.name = "disable_slat",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(disable_slat),
- .help = "Disable submissionn latency numbers",
+ .help = "Disable submission latency numbers",
.parent = "gtod_reduce",
.def = "0",
},
.name = "gtod_cpu",
.type = FIO_OPT_INT,
.cb = str_gtod_cpu_cb,
- .help = "Setup dedicated gettimeofday() thread on this CPU",
+ .help = "Set up dedicated gettimeofday() thread on this CPU",
.verify = gtod_cpu_verify,
},
{
.name = "continue_on_error",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(continue_on_error),
- .help = "Continue on non-fatal errors during I/O",
+ .help = "Continue on non-fatal errors during IO",
.def = "0",
},
{
.minval = 100,
.maxval = 1000,
},
+ {
+ .name = "cgroup_nodelete",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(cgroup_nodelete),
+ .help = "Do not delete cgroups after job completion",
+ .def = "0",
+ },
{
.name = "uid",
.type = FIO_OPT_INT,
},
};
-static void add_to_lopt(struct option *lopt, struct fio_option *o)
+static void add_to_lopt(struct option *lopt, struct fio_option *o,
+ const char *name)
{
- lopt->name = (char *) o->name;
+ lopt->name = (char *) name;
lopt->val = FIO_GETOPT_JOB;
if (o->type == FIO_OPT_STR_SET)
lopt->has_arg = no_argument;
o = &options[0];
while (o->name) {
- add_to_lopt(&long_options[i], o);
+ add_to_lopt(&long_options[i], o, o->name);
+ if (o->alias) {
+ i++;
+ add_to_lopt(&long_options[i], o, o->alias);
+ }
i++;
o++;
sprintf(buf, "%lu", page_size);
fio_keywords[0].replace = strdup(buf);
- mb_memory = os_phys_mem() / page_size;
+ mb_memory = os_phys_mem() / (1024 * 1024);
sprintf(buf, "%llu", mb_memory);
fio_keywords[1].replace = strdup(buf);
- l = sysconf(_SC_NPROCESSORS_ONLN);
+ l = cpus_online();
sprintf(buf, "%lu", l);
fio_keywords[2].replace = strdup(buf);
}