#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <netinet/in.h>
#include "fio.h"
#include "verify.h"
#include "parse.h"
#include "lib/fls.h"
+#include "lib/pattern.h"
#include "options.h"
#include "crc/crc32c.h"
+char client_sockaddr_str[INET6_ADDRSTRLEN] = { 0 };
+
+struct pattern_fmt_desc fmt_desc[] = {
+ {
+ .fmt = "%o",
+ .len = FIELD_SIZE(struct io_u *, offset),
+ .paste = paste_blockoff
+ }
+};
+
/*
* Check if mmap/mmaphuge has a :/foo/bar/file at the end. If so, return that.
*/
return strdup(p);
}
-static int converthexchartoint(char a)
-{
- int base;
-
- switch (a) {
- case '0'...'9':
- base = '0';
- break;
- case 'A'...'F':
- base = 'A' - 10;
- break;
- case 'a'...'f':
- base = 'a' - 10;
- break;
- default:
- base = 0;
- }
- return a - base;
-}
-
static int bs_cmp(const void *p1, const void *p2)
{
const struct bssplit *bsp1 = p1;
if (perc > 100)
perc = 100;
else if (!perc)
- perc = -1;
+ perc = -1U;
} else
- perc = -1;
+ perc = -1U;
- if (str_to_decimal(fname, &val, 1, o, 0)) {
+ if (str_to_decimal(fname, &val, 1, o, 0, 0)) {
log_err("fio: bssplit conversion failed\n");
free(bssplit);
return 1;
for (i = 0; i < o->bssplit_nr[ddir]; i++) {
struct bssplit *bsp = &bssplit[i];
- if (bsp->perc == (unsigned char) -1)
+ if (bsp->perc == -1U)
perc_missing++;
else
perc += bsp->perc;
}
- if (perc > 100) {
+ if (perc > 100 && perc_missing > 1) {
log_err("fio: bssplit percentages add to more than 100%%\n");
free(bssplit);
return 1;
}
+
/*
* If values didn't have a percentage set, divide the remains between
* them.
*/
if (perc_missing) {
+ if (perc_missing == 1 && o->bssplit_nr[ddir] == 1)
+ perc = 100;
for (i = 0; i < o->bssplit_nr[ddir]; i++) {
struct bssplit *bsp = &bssplit[i];
- if (bsp->perc == (unsigned char) -1)
+ if (bsp->perc == -1U)
bsp->perc = (100 - perc) / perc_missing;
}
}
ret = bssplit_ddir(&td->o, DDIR_TRIM, op);
free(op);
}
- ret = bssplit_ddir(&td->o, DDIR_READ, str);
+ if (!ret)
+ ret = bssplit_ddir(&td->o, DDIR_READ, str);
}
free(p);
} else {
error[i] = atoi(fname);
if (error[i] < 0)
- error[i] = error[i];
+ error[i] = -error[i];
}
if (!error[i]) {
log_err("Unknown error %s, please use number value \n",
td->o.continue_on_error |= 1 << etype;
td->o.ignore_error_nr[etype] = i;
td->o.ignore_error[etype] = error;
- }
+ } else
+ free(error);
+
return 0;
}
{
struct thread_data *td = data;
struct thread_options *o = &td->o;
- char *nr = get_opt_postfix(str);
+ char *nr;
if (parse_dryrun())
return 0;
o->ddir_seq_nr = 1;
o->ddir_seq_add = 0;
+ nr = get_opt_postfix(str);
if (!nr)
return 0;
else {
long long val;
- if (str_to_decimal(nr, &val, 1, o, 0)) {
+ if (str_to_decimal(nr, &val, 1, o, 0, 0)) {
log_err("fio: rw postfix parsing failed\n");
free(nr);
return 1;
{
struct thread_data *td = data;
- if (td->o.mem_type == MEM_MMAPHUGE || td->o.mem_type == MEM_MMAP)
+ if (td->o.mem_type == MEM_MMAPHUGE || td->o.mem_type == MEM_MMAP ||
+ td->o.mem_type == MEM_MMAPSHARED)
td->o.mmapfile = get_opt_postfix(mem);
return 0;
}
#ifdef FIO_HAVE_CPU_AFFINITY
+int fio_cpus_split(os_cpu_mask_t *mask, unsigned int cpu_index)
+{
+ unsigned int i, index, cpus_in_mask;
+ const long max_cpu = cpus_online();
+
+ cpus_in_mask = fio_cpu_count(mask);
+ cpu_index = cpu_index % cpus_in_mask;
+
+ index = 0;
+ for (i = 0; i < max_cpu; i++) {
+ if (!fio_cpu_isset(mask, i))
+ continue;
+
+ if (cpu_index != index)
+ fio_cpu_clear(mask, i);
+
+ index++;
+ }
+
+ return fio_cpu_count(mask);
+}
+
static int str_cpumask_cb(void *data, unsigned long long *val)
{
struct thread_data *td = data;
for (i = 0; i < sizeof(int) * 8; i++) {
if ((1 << i) & *val) {
- if (i > max_cpu) {
+ if (i >= max_cpu) {
log_err("fio: CPU %d too large (max=%ld)\n", i,
- max_cpu);
+ max_cpu - 1);
return 1;
}
dprint(FD_PARSE, "set cpu allowed %d\n", i);
}
}
- td->o.cpumask_set = 1;
return 0;
}
ret = 1;
break;
}
- if (icpu > max_cpu) {
+ if (icpu >= max_cpu) {
log_err("fio: CPU %d too large (max=%ld)\n",
- icpu, max_cpu);
+ icpu, max_cpu - 1);
ret = 1;
break;
}
}
free(p);
- if (!ret)
- td->o.cpumask_set = 1;
return ret;
}
static int str_cpus_allowed_cb(void *data, const char *input)
{
struct thread_data *td = data;
- int ret;
if (parse_dryrun())
return 0;
- ret = set_cpus_allowed(td, &td->o.cpumask, input);
- if (!ret)
- td->o.cpumask_set = 1;
-
- return ret;
+ return set_cpus_allowed(td, &td->o.cpumask, input);
}
static int str_verify_cpus_allowed_cb(void *data, const char *input)
{
struct thread_data *td = data;
- int ret;
- ret = set_cpus_allowed(td, &td->o.verify_cpumask, input);
- if (!ret)
- td->o.verify_cpumask_set = 1;
+ if (parse_dryrun())
+ return 0;
- return ret;
+ return set_cpus_allowed(td, &td->o.verify_cpumask, input);
}
+
+static int str_log_cpus_allowed_cb(void *data, const char *input)
+{
+ struct thread_data *td = data;
+
+ if (parse_dryrun())
+ return 0;
+
+ return set_cpus_allowed(td, &td->o.log_gz_cpumask, input);
+}
+
#endif
#ifdef CONFIG_LIBNUMA
static int str_numa_cpunodes_cb(void *data, char *input)
{
struct thread_data *td = data;
+ struct bitmask *verify_bitmask;
if (parse_dryrun())
return 0;
* numa_allocate_nodemask(), so it should be freed by
* numa_free_nodemask().
*/
- td->o.numa_cpunodesmask = numa_parse_nodestring(input);
- if (td->o.numa_cpunodesmask == NULL) {
+ verify_bitmask = numa_parse_nodestring(input);
+ if (verify_bitmask == NULL) {
log_err("fio: numa_parse_nodestring failed\n");
td_verror(td, 1, "str_numa_cpunodes_cb");
return 1;
}
+ numa_free_nodemask(verify_bitmask);
- td->o.numa_cpumask_set = 1;
+ td->o.numa_cpunodes = strdup(input);
return 0;
}
{ "default", "prefer", "bind", "interleave", "local", NULL };
int i;
char *nodelist;
+ struct bitmask *verify_bitmask;
if (parse_dryrun())
return 0;
break;
case MPOL_INTERLEAVE:
case MPOL_BIND:
- td->o.numa_memnodesmask = numa_parse_nodestring(nodelist);
- if (td->o.numa_memnodesmask == NULL) {
+ verify_bitmask = numa_parse_nodestring(nodelist);
+ if (verify_bitmask == NULL) {
log_err("fio: numa_parse_nodestring failed\n");
td_verror(td, 1, "str_numa_memnodes_cb");
return 1;
}
+ td->o.numa_memnodes = strdup(nodelist);
+ numa_free_nodemask(verify_bitmask);
+
break;
case MPOL_LOCAL:
case MPOL_DEFAULT:
break;
}
- td->o.numa_memmask_set = 1;
return 0;
-
out:
return 1;
}
return 0;
if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
- val = 1.1;
+ val = FIO_DEF_ZIPF;
else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
- val = 0.2;
+ val = FIO_DEF_PARETO;
+ else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
+ val = 0.0;
else
return 0;
nr = get_opt_postfix(str);
- if (nr && !str_to_float(nr, &val)) {
+ if (nr && !str_to_float(nr, &val, 0)) {
log_err("fio: random postfix parsing failed\n");
free(nr);
return 1;
return 1;
}
td->o.zipf_theta.u.f = val;
- } else {
+ } else if (td->o.random_distribution == FIO_RAND_DIST_PARETO) {
if (val <= 0.00 || val >= 1.00) {
log_err("fio: pareto input out of range (0 < input < 1.0)\n");
return 1;
}
td->o.pareto_h.u.f = val;
+ } else {
+ if (val <= 0.00 || val >= 100.0) {
+ log_err("fio: normal deviation out of range (0 < input < 100.0)\n");
+ return 1;
+ }
+ td->o.gauss_dev.u.f = val;
}
return 0;
* Returns the directory at the index, indexes > entires will be
* assigned via modulo division of the index
*/
-int set_name_idx(char *target, char *input, int index)
+int set_name_idx(char *target, size_t tlen, char *input, int index)
{
unsigned int cur_idx;
int len;
for (cur_idx = 0; cur_idx <= index; cur_idx++)
fname = get_next_name(&str);
- len = sprintf(target, "%s/", fname);
+ if (client_sockaddr_str[0]) {
+ len = snprintf(target, tlen, "%s/%s.", fname,
+ client_sockaddr_str);
+ } else
+ len = snprintf(target, tlen, "%s/", fname);
+
+ target[tlen - 1] = '\0';
free(p);
return len;
while ((fname = get_next_name(&str)) != NULL) {
if (!strlen(fname))
break;
- add_file(td, fname, 0);
- td->o.nr_files++;
+ add_file(td, fname, 0, 1);
}
free(p);
return ret;
}
-static int str_lockfile_cb(void *data, const char fio_unused *str)
-{
- struct thread_data *td = data;
-
- if (td->files_index) {
- log_err("fio: lockfile= option must precede filename=\n");
- return 1;
- }
-
- return 0;
-}
-
static int str_opendir_cb(void *data, const char fio_unused *str)
{
struct thread_data *td = data;
return add_dir_files(td, td->o.opendir);
}
-static int pattern_cb(char *pattern, unsigned int max_size,
- const char *input, unsigned int *pattern_bytes)
+static int str_buffer_pattern_cb(void *data, const char *input)
{
- long off;
- int i = 0, j = 0, len, k, base = 10;
- uint32_t pattern_length;
- char *loc1, *loc2;
-
- loc1 = strstr(input, "0x");
- loc2 = strstr(input, "0X");
- if (loc1 || loc2)
- base = 16;
- off = strtol(input, NULL, base);
- if (off != LONG_MAX || errno != ERANGE) {
- while (off) {
- pattern[i] = off & 0xff;
- off >>= 8;
- i++;
- }
- } else {
- len = strlen(input);
- k = len - 1;
- if (base == 16) {
- if (loc1)
- j = loc1 - input + 2;
- else
- j = loc2 - input + 2;
- } else
- return 1;
- if (len - j < max_size * 2) {
- while (k >= j) {
- off = converthexchartoint(input[k--]);
- if (k >= j)
- off += (converthexchartoint(input[k--])
- * 16);
- pattern[i++] = (char) off;
- }
- }
- }
-
- /*
- * Fill the pattern all the way to the end. This greatly reduces
- * the number of memcpy's we have to do when verifying the IO.
- */
- pattern_length = i;
- while (i > 1 && i * 2 <= max_size) {
- memcpy(&pattern[i], &pattern[0], i);
- i *= 2;
- }
-
- /*
- * Fill remainder, if the pattern multiple ends up not being
- * max_size.
- */
- while (i > 1 && i < max_size) {
- unsigned int b = min(pattern_length, max_size - i);
+ struct thread_data *td = data;
+ int ret;
- memcpy(&pattern[i], &pattern[0], b);
- i += b;
- }
+ /* FIXME: for now buffer pattern does not support formats */
+ ret = parse_and_fill_pattern(input, strlen(input), td->o.buffer_pattern,
+ MAX_PATTERN_SIZE, NULL, 0, NULL, NULL);
+ if (ret < 0)
+ return 1;
- if (i == 1) {
- /*
- * The code in verify_io_u_pattern assumes a single byte pattern
- * fills the whole verify pattern buffer.
- */
- memset(pattern, pattern[0], max_size);
- }
+ assert(ret != 0);
+ td->o.buffer_pattern_bytes = ret;
+ if (!td->o.compress_percentage)
+ td->o.refill_buffers = 0;
+ td->o.scramble_buffers = 0;
+ td->o.zero_buffers = 0;
- *pattern_bytes = i;
return 0;
}
-static int str_buffer_pattern_cb(void *data, const char *input)
+static int str_buffer_compress_cb(void *data, unsigned long long *il)
{
struct thread_data *td = data;
- int ret;
- ret = pattern_cb(td->o.buffer_pattern, MAX_PATTERN_SIZE, input,
- &td->o.buffer_pattern_bytes);
+ td->flags |= TD_F_COMPRESS;
+ td->o.compress_percentage = *il;
+ return 0;
+}
- if (!ret) {
- td->o.refill_buffers = 0;
- td->o.scramble_buffers = 0;
- td->o.zero_buffers = 0;
- }
+static int str_dedupe_cb(void *data, unsigned long long *il)
+{
+ struct thread_data *td = data;
- return ret;
+ td->flags |= TD_F_COMPRESS;
+ td->o.dedupe_percentage = *il;
+ td->o.refill_buffers = 1;
+ return 0;
}
static int str_verify_pattern_cb(void *data, const char *input)
struct thread_data *td = data;
int ret;
- ret = pattern_cb(td->o.verify_pattern, MAX_PATTERN_SIZE, input,
- &td->o.verify_pattern_bytes);
+ td->o.verify_fmt_sz = ARRAY_SIZE(td->o.verify_fmt);
+ ret = parse_and_fill_pattern(input, strlen(input), td->o.verify_pattern,
+ MAX_PATTERN_SIZE, fmt_desc, sizeof(fmt_desc),
+ td->o.verify_fmt, &td->o.verify_fmt_sz);
+ if (ret < 0)
+ return 1;
+ assert(ret != 0);
+ td->o.verify_pattern_bytes = ret;
/*
- * VERIFY_META could already be set
+ * VERIFY_* could already be set
*/
- if (!ret && td->o.verify == VERIFY_NONE)
+ if (!fio_option_is_set(&td->o, verify))
td->o.verify = VERIFY_PATTERN;
- return ret;
+ return 0;
}
static int str_gtod_reduce_cb(void *data, int *il)
return 0;
}
-static int str_gtod_cpu_cb(void *data, long long *il)
-{
- struct thread_data *td = data;
- int val = *il;
-
- td->o.gtod_cpu = val;
- td->o.gtod_offload = 1;
- return 0;
-}
-
static int str_size_cb(void *data, unsigned long long *__val)
{
struct thread_data *td = data;
.name = "Tiobench profile",
.mask = FIO_OPT_G_TIOBENCH,
},
+ {
+ .name = "MTD",
+ .mask = FIO_OPT_G_MTD,
+ },
{
.name = NULL,
.parent = "filename",
.hide = 0,
.def = "none",
- .cb = str_lockfile_cb,
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_FILENAME,
.posval = {
.oval = TD_DDIR_RANDRW,
.help = "Random read and write mix"
},
+ { .ival = "trimwrite",
+ .oval = TD_DDIR_TRIMWRITE,
+ .help = "Trim and write mix, trims preceding writes"
+ },
},
},
{
{ .ival = "falloc",
.help = "fallocate() file based engine",
},
+#endif
+#ifdef CONFIG_GFAPI
+ { .ival = "gfapi",
+ .help = "Glusterfs libgfapi(sync) based engine"
+ },
+ { .ival = "gfapi_async",
+ .help = "Glusterfs libgfapi(async) based engine"
+ },
+#endif
+#ifdef CONFIG_LIBHDFS
+ { .ival = "libhdfs",
+ .help = "Hadoop Distributed Filesystem (HDFS) engine"
+ },
#endif
{ .ival = "external",
.help = "Load external engine (append name)",
.group = FIO_OPT_G_IO_BASIC,
},
{
- .name = "iodepth_batch_complete",
- .lname = "IO Depth batch complete",
+ .name = "iodepth_batch_complete_min",
+ .lname = "Min IO depth batch complete",
+ .alias = "iodepth_batch_complete",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(iodepth_batch_complete),
- .help = "Number of IO buffers to retrieve in one go",
+ .off1 = td_var_offset(iodepth_batch_complete_min),
+ .help = "Min number of IO buffers to retrieve in one go",
.parent = "iodepth",
.hide = 1,
.minval = 0,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BASIC,
},
+ {
+ .name = "iodepth_batch_complete_max",
+ .lname = "Max IO depth batch complete",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(iodepth_batch_complete_max),
+ .help = "Max number of IO buffers to retrieve in one go",
+ .parent = "iodepth",
+ .hide = 1,
+ .minval = 0,
+ .interval = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
+ },
{
.name = "iodepth_low",
.lname = "IO Depth batch low",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BASIC,
},
+ {
+ .name = "io_submit_mode",
+ .lname = "IO submit mode",
+ .type = FIO_OPT_STR,
+ .off1 = td_var_offset(io_submit_mode),
+ .help = "How IO submissions and completions are done",
+ .def = "inline",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
+ .posval = {
+ { .ival = "inline",
+ .oval = IO_MODE_INLINE,
+ .help = "Submit and complete IO inline",
+ },
+ { .ival = "offload",
+ .oval = IO_MODE_OFFLOAD,
+ .help = "Offload submit and complete to threads",
+ },
+ },
+ },
{
.name = "size",
.lname = "Size",
.type = FIO_OPT_STR_VAL,
.cb = str_size_cb,
+ .off1 = td_var_offset(size),
.help = "Total size of device or files",
.interval = 1024 * 1024,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
},
+ {
+ .name = "io_size",
+ .alias = "io_limit",
+ .lname = "IO Size",
+ .type = FIO_OPT_STR_VAL,
+ .off1 = td_var_offset(io_limit),
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
+ },
{
.name = "fill_device",
.lname = "Fill device",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
},
+ {
+ .name = "file_append",
+ .lname = "File append",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(file_append),
+ .help = "IO will start at the end of the file(s)",
+ .def = "0",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
+ },
{
.name = "offset",
.lname = "IO offset",
.lname = "Number of IOs to perform",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(number_ios),
- .help = "Force job completion of this number of IOs",
+ .help = "Force job completion after this number of IOs",
.def = "0",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
.lname = "Block size split",
.type = FIO_OPT_STR,
.cb = str_bssplit_cb,
+ .off1 = td_var_offset(bssplit),
.help = "Set a specific mix of block sizes",
.parent = "rw",
.hide = 1,
.lname = "Block size division is seq/random (not read/write)",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(bs_is_seq_rand),
- .help = "Consider any blocksize setting to be sequential,ramdom",
+ .help = "Consider any blocksize setting to be sequential,random",
.def = "0",
.parent = "blocksize",
.category = FIO_OPT_C_IO,
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(rand_seed),
.help = "Set the random generator seed value",
+ .def = "0x89",
.parent = "rw",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RANDOM,
{
.name = "use_os_rand",
.lname = "Use OS random",
- .type = FIO_OPT_BOOL,
- .off1 = td_var_offset(use_os_rand),
- .help = "Set to use OS random generator",
- .def = "0",
- .parent = "rw",
- .hide = 1,
+ .type = FIO_OPT_DEPRECATED,
+ .off1 = td_var_offset(dep_use_os_rand),
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RANDOM,
},
.oval = FIO_RAND_GEN_LFSR,
.help = "Variable length LFSR",
},
+ {
+ .ival = "tausworthe64",
+ .oval = FIO_RAND_GEN_TAUSWORTHE64,
+ .help = "64-bit Tausworthe variant",
+ },
},
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RANDOM,
.oval = FIO_RAND_DIST_PARETO,
.help = "Pareto distribution",
},
+ { .ival = "normal",
+ .oval = FIO_RAND_DIST_GAUSS,
+ .help = "Normal (gaussian) distribution",
+ },
},
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RANDOM,
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
},
+#ifdef FIO_HAVE_STREAMID
+ {
+ .name = "fadvise_stream",
+ .lname = "Fadvise stream",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(fadvise_stream),
+ .help = "Use fadvise() to set stream ID",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
+ },
+#endif
{
.name = "fsync",
.lname = "Fsync",
.help = "Only start job when this period has passed",
.def = "0",
.is_seconds = 1,
+ .is_time = 1,
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_RUNTIME,
},
.help = "Stop workload when this amount of time has passed",
.def = "0",
.is_seconds = 1,
+ .is_time = 1,
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_RUNTIME,
},
.off1 = td_var_offset(ramp_time),
.help = "Ramp up time before measuring performance",
.is_seconds = 1,
+ .is_time = 1,
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_RUNTIME,
},
.oval = MEM_MALLOC,
.help = "Use malloc(3) for IO buffers",
},
+#ifndef CONFIG_NO_SHM
{ .ival = "shm",
.oval = MEM_SHM,
.help = "Use shared memory segments for IO buffers",
.oval = MEM_SHMHUGE,
.help = "Like shm, but use huge pages",
},
+#endif
#endif
{ .ival = "mmap",
.oval = MEM_MMAP,
.help = "Use mmap(2) (file or anon) for IO buffers",
},
+ { .ival = "mmapshared",
+ .oval = MEM_MMAPSHARED,
+ .help = "Like mmap, but use the shared flag",
+ },
#ifdef FIO_HAVE_HUGETLB
{ .ival = "mmaphuge",
.oval = MEM_MMAPHUGE,
.oval = VERIFY_XXHASH,
.help = "Use xxhash checksums for verification",
},
+ /* Meta information was included into verify_header,
+ * 'meta' verification is implied by default. */
{ .ival = "meta",
- .oval = VERIFY_META,
- .help = "Use io information",
+ .oval = VERIFY_HDR_ONLY,
+ .help = "Use io information for verification. "
+ "Now is implied by default, thus option is obsolete, "
+ "don't use it",
+ },
+ { .ival = "pattern",
+ .oval = VERIFY_PATTERN_NO_HDR,
+ .help = "Verify strict pattern",
},
{
.ival = "null",
.lname = "Verify pattern",
.type = FIO_OPT_STR,
.cb = str_verify_pattern_cb,
+ .off1 = td_var_offset(verify_pattern),
.help = "Fill pattern for IO buffers",
.parent = "verify",
.hide = 1,
.lname = "Async verify CPUs",
.type = FIO_OPT_STR,
.cb = str_verify_cpus_allowed_cb,
+ .off1 = td_var_offset(verify_cpumask),
.help = "Set CPUs allowed for async verify threads",
.parent = "verify_async",
.hide = 1,
.off1 = td_var_offset(experimental_verify),
.type = FIO_OPT_BOOL,
.help = "Enable experimental verification",
+ .parent = "verify",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
+ },
+ {
+ .name = "verify_state_load",
+ .lname = "Load verify state",
+ .off1 = td_var_offset(verify_state),
+ .type = FIO_OPT_BOOL,
+ .help = "Load verify termination state",
+ .parent = "verify",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_VERIFY,
+ },
+ {
+ .name = "verify_state_save",
+ .lname = "Save verify state",
+ .off1 = td_var_offset(verify_state_save),
+ .type = FIO_OPT_BOOL,
+ .def = "1",
+ .help = "Save verify state on termination",
+ .parent = "verify",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_VERIFY,
},
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IOLOG,
},
+ {
+ .name = "replay_scale",
+ .lname = "Replace offset scale factor",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(replay_scale),
+ .parent = "read_iolog",
+ .def = "1",
+ .help = "Align offsets to this blocksize",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
+ },
+ {
+ .name = "replay_align",
+ .lname = "Replace alignment",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(replay_align),
+ .parent = "read_iolog",
+ .help = "Scale offset down by this factor",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
+ .pow2 = 1,
+ },
{
.name = "exec_prerun",
.lname = "Pre-execute runnable",
.lname = "Read/write mix read",
.type = FIO_OPT_INT,
.cb = str_rwmix_read_cb,
+ .off1 = td_var_offset(rwmix[DDIR_READ]),
.maxval = 100,
.help = "Percentage of mixed workload that is reads",
.def = "50",
.lname = "Read/write mix write",
.type = FIO_OPT_INT,
.cb = str_rwmix_write_cb,
+ .off1 = td_var_offset(rwmix[DDIR_WRITE]),
.maxval = 100,
.help = "Percentage of mixed workload that is writes",
.def = "50",
.off1 = td_var_offset(thinktime),
.help = "Idle time between IO buffers (usec)",
.def = "0",
+ .is_time = 1,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_THINKTIME,
},
.off1 = td_var_offset(thinktime_spin),
.help = "Start think time by spinning this amount (usec)",
.def = "0",
+ .is_time = 1,
.parent = "thinktime",
.hide = 1,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RATE,
},
{
- .name = "ratemin",
+ .name = "rate_min",
+ .alias = "ratemin",
.lname = "I/O min rate",
.type = FIO_OPT_INT,
.off1 = td_var_offset(ratemin[DDIR_READ]),
.group = FIO_OPT_G_RATE,
},
{
- .name = "ratecycle",
+ .name = "rate_process",
+ .lname = "Rate Process",
+ .type = FIO_OPT_STR,
+ .off1 = td_var_offset(rate_process),
+ .help = "What process controls how rated IO is managed",
+ .def = "linear",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
+ .posval = {
+ { .ival = "linear",
+ .oval = RATE_PROCESS_LINEAR,
+ .help = "Linear rate of IO",
+ },
+ {
+ .ival = "poisson",
+ .oval = RATE_PROCESS_POISSON,
+ .help = "Rate follows Poisson process",
+ },
+ },
+ .parent = "rate",
+ },
+ {
+ .name = "rate_cycle",
+ .alias = "ratecycle",
.lname = "I/O rate cycle",
.type = FIO_OPT_INT,
.off1 = td_var_offset(ratecycle),
.type = FIO_OPT_INT,
.off1 = td_var_offset(max_latency),
.help = "Maximum tolerated IO latency (usec)",
+ .is_time = 1,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_LATPROF,
},
.type = FIO_OPT_STR_VAL_TIME,
.off1 = td_var_offset(latency_target),
.help = "Ramp to max queue depth supporting this latency",
+ .is_time = 1,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_LATPROF,
},
.type = FIO_OPT_STR_VAL_TIME,
.off1 = td_var_offset(latency_window),
.help = "Time to sustain latency_target",
+ .is_time = 1,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_LATPROF,
},
.category = FIO_OPT_C_FILE,
.def = "0",
},
+ {
+ .name = "allow_file_create",
+ .lname = "Allow file create",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(allow_create),
+ .help = "Permit fio to create files, if they don't exist",
+ .def = "1",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_FILENAME,
+ },
+ {
+ .name = "allow_mounted_write",
+ .lname = "Allow mounted write",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(allow_mounted_write),
+ .help = "Allow writes to a mounted partition",
+ .def = "0",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_FILENAME,
+ },
{
.name = "pre_read",
.lname = "Pre-read files",
.lname = "CPU mask",
.type = FIO_OPT_INT,
.cb = str_cpumask_cb,
+ .off1 = td_var_offset(cpumask),
.help = "CPU affinity mask",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_CRED,
.lname = "CPUs allowed",
.type = FIO_OPT_STR,
.cb = str_cpus_allowed_cb,
+ .off1 = td_var_offset(cpumask),
.help = "Set CPUs allowed",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_CRED,
},
+ {
+ .name = "cpus_allowed_policy",
+ .lname = "CPUs allowed distribution policy",
+ .type = FIO_OPT_STR,
+ .off1 = td_var_offset(cpus_allowed_policy),
+ .help = "Distribution policy for cpus_allowed",
+ .parent = "cpus_allowed",
+ .prio = 1,
+ .posval = {
+ { .ival = "shared",
+ .oval = FIO_CPUS_SHARED,
+ .help = "Mask shared between threads",
+ },
+ { .ival = "split",
+ .oval = FIO_CPUS_SPLIT,
+ .help = "Mask split between threads",
+ },
+ },
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
+ },
#endif
#ifdef CONFIG_LIBNUMA
{
.name = "numa_cpu_nodes",
.type = FIO_OPT_STR,
.cb = str_numa_cpunodes_cb,
+ .off1 = td_var_offset(numa_cpunodes),
.help = "NUMA CPU nodes bind",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_INVALID,
.name = "numa_mem_policy",
.type = FIO_OPT_STR,
.cb = str_numa_mpol_cb,
+ .off1 = td_var_offset(numa_memnodes),
.help = "NUMA memory policy setup",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_INVALID,
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_PROCESS,
},
+ {
+ .name = "exitall_on_error",
+ .lname = "Exit-all on terminate in error",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(unlink),
+ .help = "Terminate all jobs when one exits in error",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_PROCESS,
+ },
{
.name = "stonewall",
.lname = "Wait for previous",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(use_thread),
.help = "Use threads instead of processes",
+#ifdef CONFIG_NO_SHM
+ .def = "1",
+ .no_warn_def = 1,
+#endif
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_PROCESS,
},
+ {
+ .name = "per_job_logs",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(per_job_logs),
+ .help = "Include job number in generated log files or not",
+ .def = "1",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
{
.name = "write_bw_log",
.lname = "Write bandwidth log",
.category = FIO_OPT_C_LOG,
.group = FIO_OPT_G_INVALID,
},
+ {
+ .name = "log_offset",
+ .lname = "Log offset of IO",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(log_offset),
+ .help = "Include offset of IO for each log entry",
+ .def = "0",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
+#ifdef CONFIG_ZLIB
+ {
+ .name = "log_compression",
+ .lname = "Log compression",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(log_gz),
+ .help = "Log in compressed chunks of this size",
+ .minval = 1024ULL,
+ .maxval = 512 * 1024 * 1024ULL,
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
+#ifdef FIO_HAVE_CPU_AFFINITY
+ {
+ .name = "log_compression_cpus",
+ .lname = "Log Compression CPUs",
+ .type = FIO_OPT_STR,
+ .cb = str_log_cpus_allowed_cb,
+ .off1 = td_var_offset(log_gz_cpumask),
+ .parent = "log_compression",
+ .help = "Limit log compression to these CPUs",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
+#endif
+ {
+ .name = "log_store_compressed",
+ .lname = "Log store compressed",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(log_gz_store),
+ .help = "Store logs in a compressed format",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
+#endif
+ {
+ .name = "block_error_percentiles",
+ .lname = "Block error percentiles",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(block_error_hist),
+ .help = "Record trim block errors and make a histogram",
+ .def = "0",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
{
.name = "bwavgtime",
.lname = "Bandwidth average time",
.lname = "Buffer pattern",
.type = FIO_OPT_STR,
.cb = str_buffer_pattern_cb,
+ .off1 = td_var_offset(buffer_pattern),
.help = "Fill pattern for IO buffers",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BUF,
.name = "buffer_compress_percentage",
.lname = "Buffer compression percentage",
.type = FIO_OPT_INT,
+ .cb = str_buffer_compress_cb,
.off1 = td_var_offset(compress_percentage),
.maxval = 100,
.minval = 0,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BUF,
},
+ {
+ .name = "dedupe_percentage",
+ .lname = "Dedupe percentage",
+ .type = FIO_OPT_INT,
+ .cb = str_dedupe_cb,
+ .off1 = td_var_offset(dedupe_percentage),
+ .maxval = 100,
+ .minval = 0,
+ .help = "Percentage of buffers that are dedupable",
+ .interval = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BUF,
+ },
{
.name = "clat_percentiles",
.lname = "Completion latency percentiles",
},
{
.name = "percentile_list",
- .lname = "Completion latency percentile list",
+ .lname = "Percentile list",
.type = FIO_OPT_FLOAT_LIST,
.off1 = td_var_offset(percentile_list),
.off2 = td_var_offset(percentile_precision),
- .help = "Specify a custom list of percentiles to report",
+ .help = "Specify a custom list of percentiles to report for "
+ "completion latency and block errors",
.def = "1:5:10:20:30:40:50:60:70:80:90:95:99:99.5:99.9:99.95:99.99",
.maxlen = FIO_IO_U_LIST_MAX_LEN,
.minfp = 0.0,
.name = "gtod_cpu",
.lname = "Dedicated gettimeofday() CPU",
.type = FIO_OPT_INT,
- .cb = str_gtod_cpu_cb,
+ .off1 = td_var_offset(gtod_cpu),
.help = "Set up dedicated gettimeofday() thread on this CPU",
.verify = gtod_cpu_verify,
.category = FIO_OPT_C_GENERAL,
.name = "ignore_error",
.type = FIO_OPT_STR,
.cb = str_ignore_error_cb,
+ .off1 = td_var_offset(ignore_error_nr),
.help = "Set a specific list of errors to ignore",
.parent = "rw",
.category = FIO_OPT_C_GENERAL,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_FLOW,
},
+ {
+ .name = "skip_bad",
+ .lname = "Skip operations against bad blocks",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(skip_bad),
+ .help = "Skip operations against known bad blocks.",
+ .hide = 1,
+ .def = "0",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_MTD,
+ },
{
.name = NULL,
},
},
};
+void fio_keywords_exit(void)
+{
+ struct fio_keyword *kw;
+
+ kw = &fio_keywords[0];
+ while (kw->word) {
+ free(kw->replace);
+ kw->replace = NULL;
+ kw++;
+ }
+}
+
void fio_keywords_init(void)
{
unsigned long long mb_memory;
return NULL;
ret = fread(&buf[tmp - str], 1, 128 - (tmp - str), f);
- if (ret <= 0)
+ if (ret <= 0) {
+ pclose(f);
return NULL;
+ }
pclose(f);
buf[(tmp - str) + ret - 1] = '\0';
return opts_copy;
}
-int fio_options_parse(struct thread_data *td, char **opts, int num_opts,
- int dump_cmdline)
+static void show_closest_option(const char *opt)
+{
+ int best_option, best_distance;
+ int i, distance;
+ char *name;
+
+ if (!strlen(opt))
+ return;
+
+ name = strdup(opt);
+ i = 0;
+ while (name[i] != '\0' && name[i] != '=')
+ i++;
+ name[i] = '\0';
+
+ best_option = -1;
+ best_distance = INT_MAX;
+ i = 0;
+ while (fio_options[i].name) {
+ distance = string_distance(name, fio_options[i].name);
+ if (distance < best_distance) {
+ best_distance = distance;
+ best_option = i;
+ }
+ i++;
+ }
+
+ if (best_option != -1 && string_distance_ok(name, best_distance))
+ log_err("Did you mean %s?\n", fio_options[best_option].name);
+
+ free(name);
+}
+
+int fio_options_parse(struct thread_data *td, char **opts, int num_opts)
{
int i, ret, unknown;
char **opts_copy;
for (ret = 0, i = 0, unknown = 0; i < num_opts; i++) {
struct fio_option *o;
int newret = parse_option(opts_copy[i], opts[i], fio_options,
- &o, td, dump_cmdline);
+ &o, td, &td->opt_list);
+
+ if (!newret && o)
+ fio_option_mark_set(&td->o, o);
if (opts_copy[i]) {
if (newret && !o) {
for (i = 0; i < num_opts; i++) {
struct fio_option *o = NULL;
int newret = 1;
+
if (!opts_copy[i])
continue;
if (td->eo)
newret = parse_option(opts_copy[i], opts[i],
td->io_ops->options, &o,
- td->eo, dump_cmdline);
+ td->eo, &td->opt_list);
ret |= newret;
- if (!o)
+ if (!o) {
log_err("Bad option <%s>\n", opts[i]);
-
+ show_closest_option(opts[i]);
+ }
free(opts_copy[i]);
opts_copy[i] = NULL;
}
int fio_cmd_option_parse(struct thread_data *td, const char *opt, char *val)
{
- return parse_cmd_option(opt, val, fio_options, td);
+ int ret;
+
+ ret = parse_cmd_option(opt, val, fio_options, td, &td->opt_list);
+ if (!ret) {
+ struct fio_option *o;
+
+ o = find_option(fio_options, opt);
+ if (o)
+ fio_option_mark_set(&td->o, o);
+ }
+
+ return ret;
}
int fio_cmd_ioengine_option_parse(struct thread_data *td, const char *opt,
char *val)
{
- return parse_cmd_option(opt, val, td->io_ops->options, td->eo);
+ return parse_cmd_option(opt, val, td->io_ops->options, td->eo,
+ &td->opt_list);
}
void fio_fill_default_options(struct thread_data *td)
{
+ td->o.magic = OPT_MAGIC;
fill_default_options(td, fio_options);
}
struct thread_options *o = data;
unsigned int kb_base = 0;
- if (o)
+ /*
+ * This is a hack... For private options, *data is not holding
+ * a pointer to the thread_options, but to private data. This means
+ * we can't safely dereference it, but magic is first so mem wise
+ * it is valid. But this also means that if the job first sets
+ * kb_base and expects that to be honored by private options,
+ * it will be disappointed. We will return the global default
+ * for this.
+ */
+ if (o && o->magic == OPT_MAGIC)
kb_base = o->kb_base;
if (!kb_base)
kb_base = 1024;
return find_option(fio_options, name);
}
+static struct fio_option *find_next_opt(struct thread_options *o,
+ struct fio_option *from,
+ unsigned int off1)
+{
+ struct fio_option *opt;
+
+ if (!from)
+ from = &fio_options[0];
+ else
+ from++;
+
+ opt = NULL;
+ do {
+ if (off1 == from->off1) {
+ opt = from;
+ break;
+ }
+ from++;
+ } while (from->name);
+
+ return opt;
+}
+
+static int opt_is_set(struct thread_options *o, struct fio_option *opt)
+{
+ unsigned int opt_off, index, offset;
+
+ opt_off = opt - &fio_options[0];
+ index = opt_off / (8 * sizeof(uint64_t));
+ offset = opt_off & ((8 * sizeof(uint64_t)) - 1);
+ return (o->set_options[index] & ((uint64_t)1 << offset)) != 0;
+}
+
+int __fio_option_is_set(struct thread_options *o, unsigned int off1)
+{
+ struct fio_option *opt, *next;
+
+ next = NULL;
+ while ((opt = find_next_opt(o, next, off1)) != NULL) {
+ if (opt_is_set(o, opt))
+ return 1;
+
+ next = opt;
+ }
+
+ return 0;
+}
+
+void fio_option_mark_set(struct thread_options *o, struct fio_option *opt)
+{
+ unsigned int opt_off, index, offset;
+
+ opt_off = opt - &fio_options[0];
+ index = opt_off / (8 * sizeof(uint64_t));
+ offset = opt_off & ((8 * sizeof(uint64_t)) - 1);
+ o->set_options[index] |= (uint64_t)1 << offset;
+}