#include "lib/fls.h"
#include "lib/pattern.h"
#include "options.h"
-
-#include "crc/crc32c.h"
+#include "optgroup.h"
char client_sockaddr_str[INET6_ADDRSTRLEN] = { 0 };
-struct pattern_fmt_desc fmt_desc[] = {
- {
- .fmt = "%o",
- .len = FIELD_SIZE(struct io_u *, offset),
- .paste = paste_blockoff
- }
-};
-
/*
* Check if mmap/mmaphuge has a :/foo/bar/file at the end. If so, return that.
*/
const struct bssplit *bsp1 = p1;
const struct bssplit *bsp2 = p2;
- return bsp1->perc < bsp2->perc;
+ return (int) bsp1->perc - (int) bsp2->perc;
}
static int bssplit_ddir(struct thread_options *o, int ddir, char *str)
ret = bssplit_ddir(&td->o, DDIR_TRIM, op);
free(op);
}
- ret = bssplit_ddir(&td->o, DDIR_READ, str);
+ if (!ret)
+ ret = bssplit_ddir(&td->o, DDIR_READ, str);
}
free(p);
{
struct thread_data *td = data;
- if (td->o.mem_type == MEM_MMAPHUGE || td->o.mem_type == MEM_MMAP)
+ if (td->o.mem_type == MEM_MMAPHUGE || td->o.mem_type == MEM_MMAP ||
+ td->o.mem_type == MEM_MMAPSHARED)
td->o.mmapfile = get_opt_postfix(mem);
return 0;
{
struct thread_data *td = data;
+ if (parse_dryrun())
+ return 0;
+
return set_cpus_allowed(td, &td->o.verify_cpumask, input);
}
-#endif
+
+#ifdef CONFIG_ZLIB
+static int str_log_cpus_allowed_cb(void *data, const char *input)
+{
+ struct thread_data *td = data;
+
+ if (parse_dryrun())
+ return 0;
+
+ return set_cpus_allowed(td, &td->o.log_gz_cpumask, input);
+}
+#endif /* CONFIG_ZLIB */
+
+#endif /* FIO_HAVE_CPU_AFFINITY */
#ifdef CONFIG_LIBNUMA
static int str_numa_cpunodes_cb(void *data, char *input)
}
#endif
+static int zone_cmp(const void *p1, const void *p2)
+{
+ const struct zone_split *zsp1 = p1;
+ const struct zone_split *zsp2 = p2;
+
+ return (int) zsp2->access_perc - (int) zsp1->access_perc;
+}
+
+static int zone_split_ddir(struct thread_options *o, int ddir, char *str)
+{
+ struct zone_split *zsplit;
+ unsigned int i, perc, perc_missing, sperc, sperc_missing;
+ long long val;
+ char *fname;
+
+ o->zone_split_nr[ddir] = 4;
+ zsplit = malloc(4 * sizeof(struct zone_split));
+
+ i = 0;
+ while ((fname = strsep(&str, ":")) != NULL) {
+ char *perc_str;
+
+ if (!strlen(fname))
+ break;
+
+ /*
+ * grow struct buffer, if needed
+ */
+ if (i == o->zone_split_nr[ddir]) {
+ o->zone_split_nr[ddir] <<= 1;
+ zsplit = realloc(zsplit, o->zone_split_nr[ddir]
+ * sizeof(struct zone_split));
+ }
+
+ perc_str = strstr(fname, "/");
+ if (perc_str) {
+ *perc_str = '\0';
+ perc_str++;
+ perc = atoi(perc_str);
+ if (perc > 100)
+ perc = 100;
+ else if (!perc)
+ perc = -1U;
+ } else
+ perc = -1U;
+
+ if (str_to_decimal(fname, &val, 1, o, 0, 0)) {
+ log_err("fio: zone_split conversion failed\n");
+ free(zsplit);
+ return 1;
+ }
+
+ zsplit[i].access_perc = val;
+ zsplit[i].size_perc = perc;
+ i++;
+ }
+
+ o->zone_split_nr[ddir] = i;
+
+ /*
+ * Now check if the percentages add up, and how much is missing
+ */
+ perc = perc_missing = 0;
+ sperc = sperc_missing = 0;
+ for (i = 0; i < o->zone_split_nr[ddir]; i++) {
+ struct zone_split *zsp = &zsplit[i];
+
+ if (zsp->access_perc == (uint8_t) -1U)
+ perc_missing++;
+ else
+ perc += zsp->access_perc;
+
+ if (zsp->size_perc == (uint8_t) -1U)
+ sperc_missing++;
+ else
+ sperc += zsp->size_perc;
+
+ }
+
+ if (perc > 100 || sperc > 100) {
+ log_err("fio: zone_split percentages add to more than 100%%\n");
+ free(zsplit);
+ return 1;
+ }
+ if (perc < 100) {
+ log_err("fio: access percentage don't add up to 100 for zoned "
+ "random distribution (got=%u)\n", perc);
+ free(zsplit);
+ return 1;
+ }
+
+ /*
+ * If values didn't have a percentage set, divide the remains between
+ * them.
+ */
+ if (perc_missing) {
+ if (perc_missing == 1 && o->zone_split_nr[ddir] == 1)
+ perc = 100;
+ for (i = 0; i < o->zone_split_nr[ddir]; i++) {
+ struct zone_split *zsp = &zsplit[i];
+
+ if (zsp->access_perc == (uint8_t) -1U)
+ zsp->access_perc = (100 - perc) / perc_missing;
+ }
+ }
+ if (sperc_missing) {
+ if (sperc_missing == 1 && o->zone_split_nr[ddir] == 1)
+ sperc = 100;
+ for (i = 0; i < o->zone_split_nr[ddir]; i++) {
+ struct zone_split *zsp = &zsplit[i];
+
+ if (zsp->size_perc == (uint8_t) -1U)
+ zsp->size_perc = (100 - sperc) / sperc_missing;
+ }
+ }
+
+ /*
+ * now sort based on percentages, for ease of lookup
+ */
+ qsort(zsplit, o->zone_split_nr[ddir], sizeof(struct zone_split), zone_cmp);
+ o->zone_split[ddir] = zsplit;
+ return 0;
+}
+
+static void __td_zone_gen_index(struct thread_data *td, enum fio_ddir ddir)
+{
+ unsigned int i, j, sprev, aprev;
+
+ td->zone_state_index[ddir] = malloc(sizeof(struct zone_split_index) * 100);
+
+ sprev = aprev = 0;
+ for (i = 0; i < td->o.zone_split_nr[ddir]; i++) {
+ struct zone_split *zsp = &td->o.zone_split[ddir][i];
+
+ for (j = aprev; j < aprev + zsp->access_perc; j++) {
+ struct zone_split_index *zsi = &td->zone_state_index[ddir][j];
+
+ zsi->size_perc = sprev + zsp->size_perc;
+ zsi->size_perc_prev = sprev;
+ }
+
+ aprev += zsp->access_perc;
+ sprev += zsp->size_perc;
+ }
+}
+
+/*
+ * Generate state table for indexes, so we don't have to do it inline from
+ * the hot IO path
+ */
+static void td_zone_gen_index(struct thread_data *td)
+{
+ int i;
+
+ td->zone_state_index = malloc(DDIR_RWDIR_CNT *
+ sizeof(struct zone_split_index *));
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++)
+ __td_zone_gen_index(td, i);
+}
+
+static int parse_zoned_distribution(struct thread_data *td, const char *input)
+{
+ char *str, *p, *odir, *ddir;
+ int i, ret = 0;
+
+ p = str = strdup(input);
+
+ strip_blank_front(&str);
+ strip_blank_end(str);
+
+ /* We expect it to start like that, bail if not */
+ if (strncmp(str, "zoned:", 6)) {
+ log_err("fio: mismatch in zoned input <%s>\n", str);
+ free(p);
+ return 1;
+ }
+ str += strlen("zoned:");
+
+ odir = strchr(str, ',');
+ if (odir) {
+ ddir = strchr(odir + 1, ',');
+ if (ddir) {
+ ret = zone_split_ddir(&td->o, DDIR_TRIM, ddir + 1);
+ if (!ret)
+ *ddir = '\0';
+ } else {
+ char *op;
+
+ op = strdup(odir + 1);
+ ret = zone_split_ddir(&td->o, DDIR_TRIM, op);
+
+ free(op);
+ }
+ if (!ret)
+ ret = zone_split_ddir(&td->o, DDIR_WRITE, odir + 1);
+ if (!ret) {
+ *odir = '\0';
+ ret = zone_split_ddir(&td->o, DDIR_READ, str);
+ }
+ } else {
+ char *op;
+
+ op = strdup(str);
+ ret = zone_split_ddir(&td->o, DDIR_WRITE, op);
+ free(op);
+
+ if (!ret) {
+ op = strdup(str);
+ ret = zone_split_ddir(&td->o, DDIR_TRIM, op);
+ free(op);
+ }
+ if (!ret)
+ ret = zone_split_ddir(&td->o, DDIR_READ, str);
+ }
+
+ free(p);
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++) {
+ int j;
+
+ dprint(FD_PARSE, "zone ddir %d (nr=%u): \n", i, td->o.zone_split_nr[i]);
+
+ for (j = 0; j < td->o.zone_split_nr[i]; j++) {
+ struct zone_split *zsp = &td->o.zone_split[i][j];
+
+ dprint(FD_PARSE, "\t%d: %u/%u\n", j, zsp->access_perc,
+ zsp->size_perc);
+ }
+ }
+
+ if (!ret)
+ td_zone_gen_index(td);
+ else {
+ for (i = 0; i < DDIR_RWDIR_CNT; i++)
+ td->o.zone_split_nr[i] = 0;
+ }
+
+ return ret;
+}
+
static int str_random_distribution_cb(void *data, const char *str)
{
struct thread_data *td = data;
val = FIO_DEF_PARETO;
else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
val = 0.0;
+ else if (td->o.random_distribution == FIO_RAND_DIST_ZONED)
+ return parse_zoned_distribution(td, str);
else
return 0;
static int str_verify_pattern_cb(void *data, const char *input)
{
+ struct pattern_fmt_desc fmt_desc[] = {
+ {
+ .fmt = "%o",
+ .len = FIELD_SIZE(struct io_u *, offset),
+ .paste = paste_blockoff
+ }
+ };
struct thread_data *td = data;
int ret;
td->o.verify_fmt_sz = ARRAY_SIZE(td->o.verify_fmt);
ret = parse_and_fill_pattern(input, strlen(input), td->o.verify_pattern,
- MAX_PATTERN_SIZE, fmt_desc, sizeof(fmt_desc),
- td->o.verify_fmt, &td->o.verify_fmt_sz);
+ MAX_PATTERN_SIZE, fmt_desc, sizeof(fmt_desc),
+ td->o.verify_fmt, &td->o.verify_fmt_sz);
if (ret < 0)
return 1;
return 0;
}
-/*
- * Option grouping
- */
-static struct opt_group fio_opt_groups[] = {
- {
- .name = "General",
- .mask = FIO_OPT_C_GENERAL,
- },
- {
- .name = "I/O",
- .mask = FIO_OPT_C_IO,
- },
- {
- .name = "File",
- .mask = FIO_OPT_C_FILE,
- },
- {
- .name = "Statistics",
- .mask = FIO_OPT_C_STAT,
- },
- {
- .name = "Logging",
- .mask = FIO_OPT_C_LOG,
- },
- {
- .name = "Profiles",
- .mask = FIO_OPT_C_PROFILE,
- },
- {
- .name = NULL,
- },
-};
-
-static struct opt_group *__opt_group_from_mask(struct opt_group *ogs, unsigned int *mask,
- unsigned int inv_mask)
-{
- struct opt_group *og;
- int i;
-
- if (*mask == inv_mask || !*mask)
- return NULL;
-
- for (i = 0; ogs[i].name; i++) {
- og = &ogs[i];
-
- if (*mask & og->mask) {
- *mask &= ~(og->mask);
- return og;
- }
- }
-
- return NULL;
-}
-
-struct opt_group *opt_group_from_mask(unsigned int *mask)
-{
- return __opt_group_from_mask(fio_opt_groups, mask, FIO_OPT_C_INVALID);
-}
-
-static struct opt_group fio_opt_cat_groups[] = {
- {
- .name = "Latency profiling",
- .mask = FIO_OPT_G_LATPROF,
- },
- {
- .name = "Rate",
- .mask = FIO_OPT_G_RATE,
- },
- {
- .name = "Zone",
- .mask = FIO_OPT_G_ZONE,
- },
- {
- .name = "Read/write mix",
- .mask = FIO_OPT_G_RWMIX,
- },
- {
- .name = "Verify",
- .mask = FIO_OPT_G_VERIFY,
- },
- {
- .name = "Trim",
- .mask = FIO_OPT_G_TRIM,
- },
- {
- .name = "I/O Logging",
- .mask = FIO_OPT_G_IOLOG,
- },
- {
- .name = "I/O Depth",
- .mask = FIO_OPT_G_IO_DEPTH,
- },
- {
- .name = "I/O Flow",
- .mask = FIO_OPT_G_IO_FLOW,
- },
- {
- .name = "Description",
- .mask = FIO_OPT_G_DESC,
- },
- {
- .name = "Filename",
- .mask = FIO_OPT_G_FILENAME,
- },
- {
- .name = "General I/O",
- .mask = FIO_OPT_G_IO_BASIC,
- },
- {
- .name = "Cgroups",
- .mask = FIO_OPT_G_CGROUP,
- },
- {
- .name = "Runtime",
- .mask = FIO_OPT_G_RUNTIME,
- },
- {
- .name = "Process",
- .mask = FIO_OPT_G_PROCESS,
- },
- {
- .name = "Job credentials / priority",
- .mask = FIO_OPT_G_CRED,
- },
- {
- .name = "Clock settings",
- .mask = FIO_OPT_G_CLOCK,
- },
- {
- .name = "I/O Type",
- .mask = FIO_OPT_G_IO_TYPE,
- },
- {
- .name = "I/O Thinktime",
- .mask = FIO_OPT_G_THINKTIME,
- },
- {
- .name = "Randomizations",
- .mask = FIO_OPT_G_RANDOM,
- },
- {
- .name = "I/O buffers",
- .mask = FIO_OPT_G_IO_BUF,
- },
- {
- .name = "Tiobench profile",
- .mask = FIO_OPT_G_TIOBENCH,
- },
- {
- .name = "MTD",
- .mask = FIO_OPT_G_MTD,
- },
-
- {
- .name = NULL,
- }
-};
-
-struct opt_group *opt_group_cat_from_mask(unsigned int *mask)
-{
- return __opt_group_from_mask(fio_opt_cat_groups, mask, FIO_OPT_G_INVALID);
-}
-
/*
* Map of job/command line options
*/
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_DESC,
},
+ {
+ .name = "wait_for",
+ .lname = "Waitee name",
+ .type = FIO_OPT_STR_STORE,
+ .off1 = td_var_offset(wait_for),
+ .help = "Name of the job this one wants to wait for before starting",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_DESC,
+ },
{
.name = "filename",
.lname = "Filename(s)",
.help = "Use preadv/pwritev",
},
#endif
+#ifdef CONFIG_PWRITEV
+ { .ival = "pvsync2",
+ .help = "Use preadv2/pwritev2",
+ },
+#endif
#ifdef CONFIG_LIBAIO
{ .ival = "libaio",
.help = "Linux native asynchronous IO",
.group = FIO_OPT_G_IO_BASIC,
},
{
- .name = "iodepth_batch_complete",
- .lname = "IO Depth batch complete",
+ .name = "iodepth_batch_complete_min",
+ .lname = "Min IO depth batch complete",
+ .alias = "iodepth_batch_complete",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(iodepth_batch_complete),
- .help = "Number of IO buffers to retrieve in one go",
+ .off1 = td_var_offset(iodepth_batch_complete_min),
+ .help = "Min number of IO buffers to retrieve in one go",
.parent = "iodepth",
.hide = 1,
.minval = 0,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BASIC,
},
+ {
+ .name = "iodepth_batch_complete_max",
+ .lname = "Max IO depth batch complete",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(iodepth_batch_complete_max),
+ .help = "Max number of IO buffers to retrieve in one go",
+ .parent = "iodepth",
+ .hide = 1,
+ .minval = 0,
+ .interval = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
+ },
{
.name = "iodepth_low",
.lname = "IO Depth batch low",
.oval = FIO_RAND_DIST_GAUSS,
.help = "Normal (gaussian) distribution",
},
+ { .ival = "zoned",
+ .oval = FIO_RAND_DIST_ZONED,
+ .help = "Zoned random distribution",
+ },
+
},
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RANDOM,
.oval = MEM_MMAP,
.help = "Use mmap(2) (file or anon) for IO buffers",
},
+ { .ival = "mmapshared",
+ .oval = MEM_MMAPSHARED,
+ .help = "Like mmap, but use the shared flag",
+ },
#ifdef FIO_HAVE_HUGETLB
{ .ival = "mmaphuge",
.oval = MEM_MMAPHUGE,
.group = FIO_OPT_G_RATE,
},
{
- .name = "ratemin",
+ .name = "rate_min",
+ .alias = "ratemin",
.lname = "I/O min rate",
.type = FIO_OPT_INT,
.off1 = td_var_offset(ratemin[DDIR_READ]),
.group = FIO_OPT_G_RATE,
},
{
- .name = "ratecycle",
+ .name = "rate_process",
+ .lname = "Rate Process",
+ .type = FIO_OPT_STR,
+ .off1 = td_var_offset(rate_process),
+ .help = "What process controls how rated IO is managed",
+ .def = "linear",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
+ .posval = {
+ { .ival = "linear",
+ .oval = RATE_PROCESS_LINEAR,
+ .help = "Linear rate of IO",
+ },
+ {
+ .ival = "poisson",
+ .oval = RATE_PROCESS_POISSON,
+ .help = "Rate follows Poisson process",
+ },
+ },
+ .parent = "rate",
+ },
+ {
+ .name = "rate_cycle",
+ .alias = "ratecycle",
.lname = "I/O rate cycle",
.type = FIO_OPT_INT,
.off1 = td_var_offset(ratecycle),
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_PROCESS,
},
+ {
+ .name = "exitall_on_error",
+ .lname = "Exit-all on terminate in error",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(unlink),
+ .help = "Terminate all jobs when one exits in error",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_PROCESS,
+ },
{
.name = "stonewall",
.lname = "Wait for previous",
.category = FIO_OPT_C_LOG,
.group = FIO_OPT_G_INVALID,
},
+ {
+ .name = "log_max_value",
+ .lname = "Log maximum instead of average",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(log_max),
+ .help = "Log max sample in a window instead of average",
+ .def = "0",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
{
.name = "log_offset",
.lname = "Log offset of IO",
.category = FIO_OPT_C_LOG,
.group = FIO_OPT_G_INVALID,
},
+#ifdef FIO_HAVE_CPU_AFFINITY
+ {
+ .name = "log_compression_cpus",
+ .lname = "Log Compression CPUs",
+ .type = FIO_OPT_STR,
+ .cb = str_log_cpus_allowed_cb,
+ .off1 = td_var_offset(log_gz_cpumask),
+ .parent = "log_compression",
+ .help = "Limit log compression to these CPUs",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
+#endif
{
.name = "log_store_compressed",
.lname = "Log store compressed",
free(name);
}
-int fio_options_parse(struct thread_data *td, char **opts, int num_opts,
- int dump_cmdline)
+int fio_options_parse(struct thread_data *td, char **opts, int num_opts)
{
int i, ret, unknown;
char **opts_copy;
for (ret = 0, i = 0, unknown = 0; i < num_opts; i++) {
struct fio_option *o;
int newret = parse_option(opts_copy[i], opts[i], fio_options,
- &o, td, dump_cmdline);
+ &o, td, &td->opt_list);
if (!newret && o)
fio_option_mark_set(&td->o, o);
if (td->eo)
newret = parse_option(opts_copy[i], opts[i],
td->io_ops->options, &o,
- td->eo, dump_cmdline);
+ td->eo, &td->opt_list);
ret |= newret;
if (!o) {
{
int ret;
- ret = parse_cmd_option(opt, val, fio_options, td);
+ ret = parse_cmd_option(opt, val, fio_options, td, &td->opt_list);
if (!ret) {
struct fio_option *o;
int fio_cmd_ioengine_option_parse(struct thread_data *td, const char *opt,
char *val)
{
- return parse_cmd_option(opt, val, td->io_ops->options, td->eo);
+ return parse_cmd_option(opt, val, td->io_ops->options, td->eo,
+ &td->opt_list);
}
void fio_fill_default_options(struct thread_data *td)
free(td->eo);
td->eo = NULL;
}
+ if (td->zone_state_index) {
+ int i;
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++)
+ free(td->zone_state_index[i]);
+ free(td->zone_state_index);
+ td->zone_state_index = NULL;
+ }
}
struct fio_option *fio_option_find(const char *name)
return (o->set_options[index] & ((uint64_t)1 << offset)) != 0;
}
-int __fio_option_is_set(struct thread_options *o, unsigned int off1)
+bool __fio_option_is_set(struct thread_options *o, unsigned int off1)
{
struct fio_option *opt, *next;
next = NULL;
while ((opt = find_next_opt(o, next, off1)) != NULL) {
if (opt_is_set(o, opt))
- return 1;
+ return true;
next = opt;
}
- return 0;
+ return false;
}
void fio_option_mark_set(struct thread_options *o, struct fio_option *opt)