#include "lib/fls.h"
#include "lib/pattern.h"
#include "options.h"
-
-#include "crc/crc32c.h"
+#include "optgroup.h"
char client_sockaddr_str[INET6_ADDRSTRLEN] = { 0 };
+#define cb_data_to_td(data) container_of(data, struct thread_data, o)
+
struct pattern_fmt_desc fmt_desc[] = {
{
.fmt = "%o",
const struct bssplit *bsp1 = p1;
const struct bssplit *bsp2 = p2;
- return bsp1->perc < bsp2->perc;
+ return (int) bsp1->perc - (int) bsp2->perc;
}
-static int bssplit_ddir(struct thread_options *o, int ddir, char *str)
+struct split {
+ unsigned int nr;
+ unsigned int val1[100];
+ unsigned int val2[100];
+};
+
+static int split_parse_ddir(struct thread_options *o, struct split *split,
+ enum fio_ddir ddir, char *str)
{
- struct bssplit *bssplit;
- unsigned int i, perc, perc_missing;
- unsigned int max_bs, min_bs;
+ unsigned int i, perc;
long long val;
char *fname;
- o->bssplit_nr[ddir] = 4;
- bssplit = malloc(4 * sizeof(struct bssplit));
+ split->nr = 0;
i = 0;
- max_bs = 0;
- min_bs = -1;
while ((fname = strsep(&str, ":")) != NULL) {
char *perc_str;
if (!strlen(fname))
break;
- /*
- * grow struct buffer, if needed
- */
- if (i == o->bssplit_nr[ddir]) {
- o->bssplit_nr[ddir] <<= 1;
- bssplit = realloc(bssplit, o->bssplit_nr[ddir]
- * sizeof(struct bssplit));
- }
-
perc_str = strstr(fname, "/");
if (perc_str) {
*perc_str = '\0';
if (str_to_decimal(fname, &val, 1, o, 0, 0)) {
log_err("fio: bssplit conversion failed\n");
- free(bssplit);
return 1;
}
- if (val > max_bs)
- max_bs = val;
- if (val < min_bs)
- min_bs = val;
-
- bssplit[i].bs = val;
- bssplit[i].perc = perc;
+ split->val1[i] = val;
+ split->val2[i] = perc;
i++;
+ if (i == 100)
+ break;
}
- o->bssplit_nr[ddir] = i;
+ split->nr = i;
+ return 0;
+}
+
+static int bssplit_ddir(struct thread_options *o, enum fio_ddir ddir, char *str)
+{
+ unsigned int i, perc, perc_missing;
+ unsigned int max_bs, min_bs;
+ struct split split;
+
+ memset(&split, 0, sizeof(split));
+
+ if (split_parse_ddir(o, &split, ddir, str))
+ return 1;
+ if (!split.nr)
+ return 0;
+
+ max_bs = 0;
+ min_bs = -1;
+ o->bssplit[ddir] = malloc(split.nr * sizeof(struct bssplit));
+ o->bssplit_nr[ddir] = split.nr;
+ for (i = 0; i < split.nr; i++) {
+ if (split.val1[i] > max_bs)
+ max_bs = split.val1[i];
+ if (split.val1[i] < min_bs)
+ min_bs = split.val1[i];
+
+ o->bssplit[ddir][i].bs = split.val1[i];
+ o->bssplit[ddir][i].perc =split.val2[i];
+ }
/*
* Now check if the percentages add up, and how much is missing
*/
perc = perc_missing = 0;
for (i = 0; i < o->bssplit_nr[ddir]; i++) {
- struct bssplit *bsp = &bssplit[i];
+ struct bssplit *bsp = &o->bssplit[ddir][i];
if (bsp->perc == -1U)
perc_missing++;
if (perc > 100 && perc_missing > 1) {
log_err("fio: bssplit percentages add to more than 100%%\n");
- free(bssplit);
+ free(o->bssplit[ddir]);
+ o->bssplit[ddir] = NULL;
return 1;
}
if (perc_missing == 1 && o->bssplit_nr[ddir] == 1)
perc = 100;
for (i = 0; i < o->bssplit_nr[ddir]; i++) {
- struct bssplit *bsp = &bssplit[i];
+ struct bssplit *bsp = &o->bssplit[ddir][i];
if (bsp->perc == -1U)
bsp->perc = (100 - perc) / perc_missing;
/*
* now sort based on percentages, for ease of lookup
*/
- qsort(bssplit, o->bssplit_nr[ddir], sizeof(struct bssplit), bs_cmp);
- o->bssplit[ddir] = bssplit;
+ qsort(o->bssplit[ddir], o->bssplit_nr[ddir], sizeof(struct bssplit), bs_cmp);
return 0;
}
-static int str_bssplit_cb(void *data, const char *input)
+typedef int (split_parse_fn)(struct thread_options *, enum fio_ddir, char *);
+
+static int str_split_parse(struct thread_data *td, char *str, split_parse_fn *fn)
{
- struct thread_data *td = data;
- char *str, *p, *odir, *ddir;
+ char *odir, *ddir;
int ret = 0;
- if (parse_dryrun())
- return 0;
-
- p = str = strdup(input);
-
- strip_blank_front(&str);
- strip_blank_end(str);
-
odir = strchr(str, ',');
if (odir) {
ddir = strchr(odir + 1, ',');
if (ddir) {
- ret = bssplit_ddir(&td->o, DDIR_TRIM, ddir + 1);
+ ret = fn(&td->o, DDIR_TRIM, ddir + 1);
if (!ret)
*ddir = '\0';
} else {
char *op;
op = strdup(odir + 1);
- ret = bssplit_ddir(&td->o, DDIR_TRIM, op);
+ ret = fn(&td->o, DDIR_TRIM, op);
free(op);
}
if (!ret)
- ret = bssplit_ddir(&td->o, DDIR_WRITE, odir + 1);
+ ret = fn(&td->o, DDIR_WRITE, odir + 1);
if (!ret) {
*odir = '\0';
- ret = bssplit_ddir(&td->o, DDIR_READ, str);
+ ret = fn(&td->o, DDIR_READ, str);
}
} else {
char *op;
op = strdup(str);
- ret = bssplit_ddir(&td->o, DDIR_WRITE, op);
+ ret = fn(&td->o, DDIR_WRITE, op);
free(op);
if (!ret) {
op = strdup(str);
- ret = bssplit_ddir(&td->o, DDIR_TRIM, op);
+ ret = fn(&td->o, DDIR_TRIM, op);
free(op);
}
- ret = bssplit_ddir(&td->o, DDIR_READ, str);
+ if (!ret)
+ ret = fn(&td->o, DDIR_READ, str);
+ }
+
+ return ret;
+}
+
+static int str_bssplit_cb(void *data, const char *input)
+{
+ struct thread_data *td = cb_data_to_td(data);
+ char *str, *p;
+ int ret = 0;
+
+ p = str = strdup(input);
+
+ strip_blank_front(&str);
+ strip_blank_end(str);
+
+ ret = str_split_parse(td, str, bssplit_ddir);
+
+ if (parse_dryrun()) {
+ int i;
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++) {
+ free(td->o.bssplit[i]);
+ td->o.bssplit[i] = NULL;
+ td->o.bssplit_nr[i] = 0;
+ }
}
free(p);
"EINVAL", "ENFILE", "EMFILE", "ENOTTY",
"ETXTBSY","EFBIG", "ENOSPC", "ESPIPE",
"EROFS","EMLINK", "EPIPE", "EDOM", "ERANGE" };
- int i = 0, num = sizeof(err) / sizeof(void *);
+ int i = 0, num = sizeof(err) / sizeof(char *);
while (i < num) {
if (!strcmp(err[i], str))
static int str_ignore_error_cb(void *data, const char *input)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
char *str, *p, *n;
int type = 0, ret = 1;
static int str_rw_cb(void *data, const char *str)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
struct thread_options *o = &td->o;
char *nr;
static int str_mem_cb(void *data, const char *mem)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
- if (td->o.mem_type == MEM_MMAPHUGE || td->o.mem_type == MEM_MMAP)
+ if (td->o.mem_type == MEM_MMAPHUGE || td->o.mem_type == MEM_MMAP ||
+ td->o.mem_type == MEM_MMAPSHARED)
td->o.mmapfile = get_opt_postfix(mem);
return 0;
static int fio_clock_source_cb(void *data, const char *str)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
fio_clock_source = td->o.clocksource;
fio_clock_source_set = 1;
static int str_rwmix_read_cb(void *data, unsigned long long *val)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
td->o.rwmix[DDIR_READ] = *val;
td->o.rwmix[DDIR_WRITE] = 100 - *val;
static int str_rwmix_write_cb(void *data, unsigned long long *val)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
td->o.rwmix[DDIR_WRITE] = *val;
td->o.rwmix[DDIR_READ] = 100 - *val;
static int str_cpumask_cb(void *data, unsigned long long *val)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
unsigned int i;
long max_cpu;
int ret;
static int str_cpus_allowed_cb(void *data, const char *input)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
if (parse_dryrun())
return 0;
static int str_verify_cpus_allowed_cb(void *data, const char *input)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
+
+ if (parse_dryrun())
+ return 0;
return set_cpus_allowed(td, &td->o.verify_cpumask, input);
}
-#endif
+
+#ifdef CONFIG_ZLIB
+static int str_log_cpus_allowed_cb(void *data, const char *input)
+{
+ struct thread_data *td = cb_data_to_td(data);
+
+ if (parse_dryrun())
+ return 0;
+
+ return set_cpus_allowed(td, &td->o.log_gz_cpumask, input);
+}
+#endif /* CONFIG_ZLIB */
+
+#endif /* FIO_HAVE_CPU_AFFINITY */
#ifdef CONFIG_LIBNUMA
static int str_numa_cpunodes_cb(void *data, char *input)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
struct bitmask *verify_bitmask;
if (parse_dryrun())
static int str_numa_mpol_cb(void *data, char *input)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
const char * const policy_types[] =
{ "default", "prefer", "bind", "interleave", "local", NULL };
int i;
static int str_fst_cb(void *data, const char *str)
{
- struct thread_data *td = data;
- char *nr = get_opt_postfix(str);
+ struct thread_data *td = cb_data_to_td(data);
+ double val;
+ bool done = false;
+ char *nr;
td->file_service_nr = 1;
- if (nr) {
- td->file_service_nr = atoi(nr);
+
+ switch (td->o.file_service_type) {
+ case FIO_FSERVICE_RANDOM:
+ case FIO_FSERVICE_RR:
+ case FIO_FSERVICE_SEQ:
+ nr = get_opt_postfix(str);
+ if (nr) {
+ td->file_service_nr = atoi(nr);
+ free(nr);
+ }
+ done = true;
+ break;
+ case FIO_FSERVICE_ZIPF:
+ val = FIO_DEF_ZIPF;
+ break;
+ case FIO_FSERVICE_PARETO:
+ val = FIO_DEF_PARETO;
+ break;
+ case FIO_FSERVICE_GAUSS:
+ val = 0.0;
+ break;
+ default:
+ log_err("fio: bad file service type: %d\n", td->o.file_service_type);
+ return 1;
+ }
+
+ if (done)
+ return 0;
+
+ nr = get_opt_postfix(str);
+ if (nr && !str_to_float(nr, &val, 0)) {
+ log_err("fio: file service type random postfix parsing failed\n");
free(nr);
+ return 1;
+ }
+
+ free(nr);
+
+ switch (td->o.file_service_type) {
+ case FIO_FSERVICE_ZIPF:
+ if (val == 1.00) {
+ log_err("fio: zipf theta must be different than 1.0\n");
+ return 1;
+ }
+ if (parse_dryrun())
+ return 0;
+ td->zipf_theta = val;
+ break;
+ case FIO_FSERVICE_PARETO:
+ if (val <= 0.00 || val >= 1.00) {
+ log_err("fio: pareto input out of range (0 < input < 1.0)\n");
+ return 1;
+ }
+ if (parse_dryrun())
+ return 0;
+ td->pareto_h = val;
+ break;
+ case FIO_FSERVICE_GAUSS:
+ if (val < 0.00 || val >= 100.00) {
+ log_err("fio: normal deviation out of range (0 <= input < 100.0)\n");
+ return 1;
+ }
+ if (parse_dryrun())
+ return 0;
+ td->gauss_dev = val;
+ break;
}
return 0;
#ifdef CONFIG_SYNC_FILE_RANGE
static int str_sfr_cb(void *data, const char *str)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
char *nr = get_opt_postfix(str);
td->sync_file_range_nr = 1;
}
#endif
+static int zone_cmp(const void *p1, const void *p2)
+{
+ const struct zone_split *zsp1 = p1;
+ const struct zone_split *zsp2 = p2;
+
+ return (int) zsp2->access_perc - (int) zsp1->access_perc;
+}
+
+static int zone_split_ddir(struct thread_options *o, enum fio_ddir ddir,
+ char *str)
+{
+ unsigned int i, perc, perc_missing, sperc, sperc_missing;
+ struct split split;
+
+ memset(&split, 0, sizeof(split));
+
+ if (split_parse_ddir(o, &split, ddir, str))
+ return 1;
+ if (!split.nr)
+ return 0;
+
+ o->zone_split[ddir] = malloc(split.nr * sizeof(struct zone_split));
+ o->zone_split_nr[ddir] = split.nr;
+ for (i = 0; i < split.nr; i++) {
+ o->zone_split[ddir][i].access_perc = split.val1[i];
+ o->zone_split[ddir][i].size_perc = split.val2[i];
+ }
+
+ /*
+ * Now check if the percentages add up, and how much is missing
+ */
+ perc = perc_missing = 0;
+ sperc = sperc_missing = 0;
+ for (i = 0; i < o->zone_split_nr[ddir]; i++) {
+ struct zone_split *zsp = &o->zone_split[ddir][i];
+
+ if (zsp->access_perc == (uint8_t) -1U)
+ perc_missing++;
+ else
+ perc += zsp->access_perc;
+
+ if (zsp->size_perc == (uint8_t) -1U)
+ sperc_missing++;
+ else
+ sperc += zsp->size_perc;
+
+ }
+
+ if (perc > 100 || sperc > 100) {
+ log_err("fio: zone_split percentages add to more than 100%%\n");
+ free(o->zone_split[ddir]);
+ o->zone_split[ddir] = NULL;
+ return 1;
+ }
+ if (perc < 100) {
+ log_err("fio: access percentage don't add up to 100 for zoned "
+ "random distribution (got=%u)\n", perc);
+ free(o->zone_split[ddir]);
+ o->zone_split[ddir] = NULL;
+ return 1;
+ }
+
+ /*
+ * If values didn't have a percentage set, divide the remains between
+ * them.
+ */
+ if (perc_missing) {
+ if (perc_missing == 1 && o->zone_split_nr[ddir] == 1)
+ perc = 100;
+ for (i = 0; i < o->zone_split_nr[ddir]; i++) {
+ struct zone_split *zsp = &o->zone_split[ddir][i];
+
+ if (zsp->access_perc == (uint8_t) -1U)
+ zsp->access_perc = (100 - perc) / perc_missing;
+ }
+ }
+ if (sperc_missing) {
+ if (sperc_missing == 1 && o->zone_split_nr[ddir] == 1)
+ sperc = 100;
+ for (i = 0; i < o->zone_split_nr[ddir]; i++) {
+ struct zone_split *zsp = &o->zone_split[ddir][i];
+
+ if (zsp->size_perc == (uint8_t) -1U)
+ zsp->size_perc = (100 - sperc) / sperc_missing;
+ }
+ }
+
+ /*
+ * now sort based on percentages, for ease of lookup
+ */
+ qsort(o->zone_split[ddir], o->zone_split_nr[ddir], sizeof(struct zone_split), zone_cmp);
+ return 0;
+}
+
+static void __td_zone_gen_index(struct thread_data *td, enum fio_ddir ddir)
+{
+ unsigned int i, j, sprev, aprev;
+
+ td->zone_state_index[ddir] = malloc(sizeof(struct zone_split_index) * 100);
+
+ sprev = aprev = 0;
+ for (i = 0; i < td->o.zone_split_nr[ddir]; i++) {
+ struct zone_split *zsp = &td->o.zone_split[ddir][i];
+
+ for (j = aprev; j < aprev + zsp->access_perc; j++) {
+ struct zone_split_index *zsi = &td->zone_state_index[ddir][j];
+
+ zsi->size_perc = sprev + zsp->size_perc;
+ zsi->size_perc_prev = sprev;
+ }
+
+ aprev += zsp->access_perc;
+ sprev += zsp->size_perc;
+ }
+}
+
+/*
+ * Generate state table for indexes, so we don't have to do it inline from
+ * the hot IO path
+ */
+static void td_zone_gen_index(struct thread_data *td)
+{
+ int i;
+
+ td->zone_state_index = malloc(DDIR_RWDIR_CNT *
+ sizeof(struct zone_split_index *));
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++)
+ __td_zone_gen_index(td, i);
+}
+
+static int parse_zoned_distribution(struct thread_data *td, const char *input)
+{
+ char *str, *p;
+ int i, ret = 0;
+
+ p = str = strdup(input);
+
+ strip_blank_front(&str);
+ strip_blank_end(str);
+
+ /* We expect it to start like that, bail if not */
+ if (strncmp(str, "zoned:", 6)) {
+ log_err("fio: mismatch in zoned input <%s>\n", str);
+ free(p);
+ return 1;
+ }
+ str += strlen("zoned:");
+
+ ret = str_split_parse(td, str, zone_split_ddir);
+
+ free(p);
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++) {
+ int j;
+
+ dprint(FD_PARSE, "zone ddir %d (nr=%u): \n", i, td->o.zone_split_nr[i]);
+
+ for (j = 0; j < td->o.zone_split_nr[i]; j++) {
+ struct zone_split *zsp = &td->o.zone_split[i][j];
+
+ dprint(FD_PARSE, "\t%d: %u/%u\n", j, zsp->access_perc,
+ zsp->size_perc);
+ }
+ }
+
+ if (parse_dryrun()) {
+ int i;
+
+ for (i = 0; i < DDIR_RWDIR_CNT; i++) {
+ free(td->o.zone_split[i]);
+ td->o.zone_split[i] = NULL;
+ td->o.zone_split_nr[i] = 0;
+ }
+
+ return ret;
+ }
+
+ if (!ret)
+ td_zone_gen_index(td);
+ else {
+ for (i = 0; i < DDIR_RWDIR_CNT; i++)
+ td->o.zone_split_nr[i] = 0;
+ }
+
+ return ret;
+}
+
static int str_random_distribution_cb(void *data, const char *str)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
double val;
char *nr;
- if (parse_dryrun())
- return 0;
-
if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
val = FIO_DEF_ZIPF;
else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
val = FIO_DEF_PARETO;
else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
val = 0.0;
+ else if (td->o.random_distribution == FIO_RAND_DIST_ZONED)
+ return parse_zoned_distribution(td, str);
else
return 0;
log_err("fio: zipf theta must different than 1.0\n");
return 1;
}
+ if (parse_dryrun())
+ return 0;
td->o.zipf_theta.u.f = val;
} else if (td->o.random_distribution == FIO_RAND_DIST_PARETO) {
if (val <= 0.00 || val >= 1.00) {
log_err("fio: pareto input out of range (0 < input < 1.0)\n");
return 1;
}
+ if (parse_dryrun())
+ return 0;
td->o.pareto_h.u.f = val;
} else {
- if (val <= 0.00 || val >= 100.0) {
- log_err("fio: normal deviation out of range (0 < input < 100.0)\n");
+ if (val < 0.00 || val >= 100.0) {
+ log_err("fio: normal deviation out of range (0 <= input < 100.0)\n");
return 1;
}
+ if (parse_dryrun())
+ return 0;
td->o.gauss_dev.u.f = val;
}
return 0;
}
+static int str_steadystate_cb(void *data, const char *str)
+{
+ struct thread_data *td = data;
+ double val;
+ char *nr;
+ char *pct;
+ long long ll;
+
+ if (td->o.ss != FIO_STEADYSTATE_IOPS &&
+ td->o.ss != FIO_STEADYSTATE_IOPS_SLOPE &&
+ td->o.ss != FIO_STEADYSTATE_BW &&
+ td->o.ss != FIO_STEADYSTATE_BW_SLOPE) {
+ /* should be impossible to get here */
+ log_err("fio: unknown steady state criterion\n");
+ return 1;
+ }
+
+ nr = get_opt_postfix(str);
+ if (!nr) {
+ log_err("fio: steadystate threshold must be specified in addition to criterion\n");
+ free(nr);
+ return 1;
+ }
+
+ /* ENHANCEMENT Allow fio to understand size=10.2% and use here */
+ pct = strstr(nr, "%");
+ if (pct) {
+ *pct = '\0';
+ strip_blank_end(nr);
+ if (!str_to_float(nr, &val, 0)) {
+ log_err("fio: could not parse steadystate threshold percentage\n");
+ free(nr);
+ return 1;
+ }
+
+ dprint(FD_PARSE, "set steady state threshold to %f%%\n", val);
+ free(nr);
+ if (parse_dryrun())
+ return 0;
+
+ td->o.ss_pct = true;
+ td->o.ss_limit.u.f = val;
+
+
+ } else if (td->o.ss == FIO_STEADYSTATE_IOPS ||
+ td->o.ss == FIO_STEADYSTATE_IOPS_SLOPE) {
+ if (!str_to_float(nr, &val, 0)) {
+ log_err("fio: steadystate IOPS threshold postfix parsing failed\n");
+ free(nr);
+ return 1;
+ }
+
+ dprint(FD_PARSE, "set steady state IOPS threshold to %f\n", val);
+ free(nr);
+ if (parse_dryrun())
+ return 0;
+
+ td->o.ss_pct = false;
+ td->o.ss_limit.u.f = val;
+
+ } else { /* bandwidth criterion */
+ if (str_to_decimal(nr, &ll, 1, td, 0, 0)) {
+ log_err("fio: steadystate BW threshold postfix parsing failed\n");
+ free(nr);
+ return 1;
+ }
+
+ dprint(FD_PARSE, "set steady state BW threshold to %lld\n", ll);
+ free(nr);
+ if (parse_dryrun())
+ return 0;
+
+ td->o.ss_pct = false;
+ td->o.ss_limit.u.f = (double) ll;
+
+ }
+
+ return 0;
+}
+
/*
* Return next name in the string. Files are separated with ':'. If the ':'
* is escaped with a '\', then that ':' is part of the filename and does not
* Returns the directory at the index, indexes > entires will be
* assigned via modulo division of the index
*/
-int set_name_idx(char *target, size_t tlen, char *input, int index)
+int set_name_idx(char *target, size_t tlen, char *input, int index,
+ bool unique_filename)
{
unsigned int cur_idx;
int len;
for (cur_idx = 0; cur_idx <= index; cur_idx++)
fname = get_next_name(&str);
- if (client_sockaddr_str[0]) {
+ if (client_sockaddr_str[0] && unique_filename) {
len = snprintf(target, tlen, "%s/%s.", fname,
client_sockaddr_str);
} else
static int str_filename_cb(void *data, const char *input)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
char *fname, *str, *p;
p = str = strdup(input);
static int str_directory_cb(void *data, const char fio_unused *unused)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
struct stat sb;
char *dirname, *str, *p;
int ret = 0;
static int str_opendir_cb(void *data, const char fio_unused *str)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
if (parse_dryrun())
return 0;
static int str_buffer_pattern_cb(void *data, const char *input)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
int ret;
/* FIXME: for now buffer pattern does not support formats */
static int str_buffer_compress_cb(void *data, unsigned long long *il)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
td->flags |= TD_F_COMPRESS;
td->o.compress_percentage = *il;
static int str_dedupe_cb(void *data, unsigned long long *il)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
td->flags |= TD_F_COMPRESS;
td->o.dedupe_percentage = *il;
static int str_verify_pattern_cb(void *data, const char *input)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
int ret;
td->o.verify_fmt_sz = ARRAY_SIZE(td->o.verify_fmt);
static int str_gtod_reduce_cb(void *data, int *il)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
int val = *il;
td->o.disable_lat = !!val;
static int str_size_cb(void *data, unsigned long long *__val)
{
- struct thread_data *td = data;
+ struct thread_data *td = cb_data_to_td(data);
unsigned long long v = *__val;
if (parse_is_percent(v)) {
td->o.size = 0;
td->o.size_percent = -1ULL - v;
} else
- td->o.size = v;
-
- return 0;
-}
-
-static int rw_verify(struct fio_option *o, void *data)
-{
- struct thread_data *td = data;
-
- if (read_only && td_write(td)) {
- log_err("fio: job <%s> has write bit set, but fio is in"
- " read-only mode\n", td->o.name);
- return 1;
- }
-
- return 0;
-}
-
-static int gtod_cpu_verify(struct fio_option *o, void *data)
-{
-#ifndef FIO_HAVE_CPU_AFFINITY
- struct thread_data *td = data;
-
- if (td->o.gtod_cpu) {
- log_err("fio: platform must support CPU affinity for"
- "gettimeofday() offloading\n");
- return 1;
- }
-#endif
-
- return 0;
-}
-
-/*
- * Option grouping
- */
-static struct opt_group fio_opt_groups[] = {
- {
- .name = "General",
- .mask = FIO_OPT_C_GENERAL,
- },
- {
- .name = "I/O",
- .mask = FIO_OPT_C_IO,
- },
- {
- .name = "File",
- .mask = FIO_OPT_C_FILE,
- },
- {
- .name = "Statistics",
- .mask = FIO_OPT_C_STAT,
- },
- {
- .name = "Logging",
- .mask = FIO_OPT_C_LOG,
- },
- {
- .name = "Profiles",
- .mask = FIO_OPT_C_PROFILE,
- },
- {
- .name = NULL,
- },
-};
-
-static struct opt_group *__opt_group_from_mask(struct opt_group *ogs, unsigned int *mask,
- unsigned int inv_mask)
-{
- struct opt_group *og;
- int i;
+ td->o.size = v;
- if (*mask == inv_mask || !*mask)
- return NULL;
+ return 0;
+}
- for (i = 0; ogs[i].name; i++) {
- og = &ogs[i];
+static int rw_verify(struct fio_option *o, void *data)
+{
+ struct thread_data *td = cb_data_to_td(data);
- if (*mask & og->mask) {
- *mask &= ~(og->mask);
- return og;
- }
+ if (read_only && td_write(td)) {
+ log_err("fio: job <%s> has write bit set, but fio is in"
+ " read-only mode\n", td->o.name);
+ return 1;
}
- return NULL;
+ return 0;
}
-struct opt_group *opt_group_from_mask(unsigned int *mask)
+static int gtod_cpu_verify(struct fio_option *o, void *data)
{
- return __opt_group_from_mask(fio_opt_groups, mask, FIO_OPT_C_INVALID);
-}
-
-static struct opt_group fio_opt_cat_groups[] = {
- {
- .name = "Latency profiling",
- .mask = FIO_OPT_G_LATPROF,
- },
- {
- .name = "Rate",
- .mask = FIO_OPT_G_RATE,
- },
- {
- .name = "Zone",
- .mask = FIO_OPT_G_ZONE,
- },
- {
- .name = "Read/write mix",
- .mask = FIO_OPT_G_RWMIX,
- },
- {
- .name = "Verify",
- .mask = FIO_OPT_G_VERIFY,
- },
- {
- .name = "Trim",
- .mask = FIO_OPT_G_TRIM,
- },
- {
- .name = "I/O Logging",
- .mask = FIO_OPT_G_IOLOG,
- },
- {
- .name = "I/O Depth",
- .mask = FIO_OPT_G_IO_DEPTH,
- },
- {
- .name = "I/O Flow",
- .mask = FIO_OPT_G_IO_FLOW,
- },
- {
- .name = "Description",
- .mask = FIO_OPT_G_DESC,
- },
- {
- .name = "Filename",
- .mask = FIO_OPT_G_FILENAME,
- },
- {
- .name = "General I/O",
- .mask = FIO_OPT_G_IO_BASIC,
- },
- {
- .name = "Cgroups",
- .mask = FIO_OPT_G_CGROUP,
- },
- {
- .name = "Runtime",
- .mask = FIO_OPT_G_RUNTIME,
- },
- {
- .name = "Process",
- .mask = FIO_OPT_G_PROCESS,
- },
- {
- .name = "Job credentials / priority",
- .mask = FIO_OPT_G_CRED,
- },
- {
- .name = "Clock settings",
- .mask = FIO_OPT_G_CLOCK,
- },
- {
- .name = "I/O Type",
- .mask = FIO_OPT_G_IO_TYPE,
- },
- {
- .name = "I/O Thinktime",
- .mask = FIO_OPT_G_THINKTIME,
- },
- {
- .name = "Randomizations",
- .mask = FIO_OPT_G_RANDOM,
- },
- {
- .name = "I/O buffers",
- .mask = FIO_OPT_G_IO_BUF,
- },
- {
- .name = "Tiobench profile",
- .mask = FIO_OPT_G_TIOBENCH,
- },
- {
- .name = "MTD",
- .mask = FIO_OPT_G_MTD,
- },
+#ifndef FIO_HAVE_CPU_AFFINITY
+ struct thread_data *td = cb_data_to_td(data);
- {
- .name = NULL,
+ if (td->o.gtod_cpu) {
+ log_err("fio: platform must support CPU affinity for"
+ "gettimeofday() offloading\n");
+ return 1;
}
-};
+#endif
-struct opt_group *opt_group_cat_from_mask(unsigned int *mask)
-{
- return __opt_group_from_mask(fio_opt_cat_groups, mask, FIO_OPT_G_INVALID);
+ return 0;
}
/*
.name = "description",
.lname = "Description of job",
.type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(description),
+ .off1 = offsetof(struct thread_options, description),
.help = "Text job description",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_DESC,
.name = "name",
.lname = "Job name",
.type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(name),
+ .off1 = offsetof(struct thread_options, name),
.help = "Name of this job",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_DESC,
},
+ {
+ .name = "wait_for",
+ .lname = "Waitee name",
+ .type = FIO_OPT_STR_STORE,
+ .off1 = offsetof(struct thread_options, wait_for),
+ .help = "Name of the job this one wants to wait for before starting",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_DESC,
+ },
{
.name = "filename",
.lname = "Filename(s)",
.type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(filename),
+ .off1 = offsetof(struct thread_options, filename),
.cb = str_filename_cb,
.prio = -1, /* must come after "directory" */
.help = "File(s) to use for the workload",
.name = "directory",
.lname = "Directory",
.type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(directory),
+ .off1 = offsetof(struct thread_options, directory),
.cb = str_directory_cb,
.help = "Directory to store files in",
.category = FIO_OPT_C_FILE,
},
{
.name = "filename_format",
+ .lname = "Filename Format",
.type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(filename_format),
+ .off1 = offsetof(struct thread_options, filename_format),
.prio = -1, /* must come after "directory" */
.help = "Override default $jobname.$jobnum.$filenum naming",
.def = "$jobname.$jobnum.$filenum",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_FILENAME,
},
+ {
+ .name = "unique_filename",
+ .lname = "Unique Filename",
+ .type = FIO_OPT_BOOL,
+ .off1 = offsetof(struct thread_options, unique_filename),
+ .help = "For network clients, prefix file with source IP",
+ .def = "1",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_FILENAME,
+ },
{
.name = "lockfile",
.lname = "Lockfile",
.type = FIO_OPT_STR,
- .off1 = td_var_offset(file_lock_mode),
+ .off1 = offsetof(struct thread_options, file_lock_mode),
.help = "Lock file when doing IO to it",
.prio = 1,
.parent = "filename",
.name = "opendir",
.lname = "Open directory",
.type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(opendir),
+ .off1 = offsetof(struct thread_options, opendir),
.cb = str_opendir_cb,
.help = "Recursively add files from this directory and down",
.category = FIO_OPT_C_FILE,
.alias = "readwrite",
.type = FIO_OPT_STR,
.cb = str_rw_cb,
- .off1 = td_var_offset(td_ddir),
+ .off1 = offsetof(struct thread_options, td_ddir),
.help = "IO direction",
.def = "read",
.verify = rw_verify,
.name = "rw_sequencer",
.lname = "RW Sequencer",
.type = FIO_OPT_STR,
- .off1 = td_var_offset(rw_seq),
+ .off1 = offsetof(struct thread_options, rw_seq),
.help = "IO offset generator modifier",
.def = "sequential",
.category = FIO_OPT_C_IO,
.name = "ioengine",
.lname = "IO Engine",
.type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(ioengine),
+ .off1 = offsetof(struct thread_options, ioengine),
.help = "IO engine to use",
.def = FIO_PREFERRED_ENGINE,
.category = FIO_OPT_C_IO,
.help = "Use preadv/pwritev",
},
#endif
+#ifdef FIO_HAVE_PWRITEV2
+ { .ival = "pvsync2",
+ .help = "Use preadv2/pwritev2",
+ },
+#endif
#ifdef CONFIG_LIBAIO
{ .ival = "libaio",
.help = "Linux native asynchronous IO",
{ .ival = "libhdfs",
.help = "Hadoop Distributed Filesystem (HDFS) engine"
},
+#endif
+#ifdef CONFIG_PMEMBLK
+ { .ival = "pmemblk",
+ .help = "NVML libpmemblk based IO engine",
+ },
+
#endif
{ .ival = "external",
.help = "Load external engine (append name)",
.name = "iodepth",
.lname = "IO Depth",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(iodepth),
+ .off1 = offsetof(struct thread_options, iodepth),
.help = "Number of IO buffers to keep in flight",
.minval = 1,
.interval = 1,
.lname = "IO Depth batch",
.alias = "iodepth_batch_submit",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(iodepth_batch),
+ .off1 = offsetof(struct thread_options, iodepth_batch),
.help = "Number of IO buffers to submit in one go",
.parent = "iodepth",
.hide = 1,
- .minval = 1,
.interval = 1,
.def = "1",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BASIC,
},
{
- .name = "iodepth_batch_complete",
- .lname = "IO Depth batch complete",
+ .name = "iodepth_batch_complete_min",
+ .lname = "Min IO depth batch complete",
+ .alias = "iodepth_batch_complete",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(iodepth_batch_complete),
- .help = "Number of IO buffers to retrieve in one go",
+ .off1 = offsetof(struct thread_options, iodepth_batch_complete_min),
+ .help = "Min number of IO buffers to retrieve in one go",
.parent = "iodepth",
.hide = 1,
.minval = 0,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BASIC,
},
+ {
+ .name = "iodepth_batch_complete_max",
+ .lname = "Max IO depth batch complete",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct thread_options, iodepth_batch_complete_max),
+ .help = "Max number of IO buffers to retrieve in one go",
+ .parent = "iodepth",
+ .hide = 1,
+ .minval = 0,
+ .interval = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BASIC,
+ },
{
.name = "iodepth_low",
.lname = "IO Depth batch low",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(iodepth_low),
+ .off1 = offsetof(struct thread_options, iodepth_low),
.help = "Low water mark for queuing depth",
.parent = "iodepth",
.hide = 1,
.name = "io_submit_mode",
.lname = "IO submit mode",
.type = FIO_OPT_STR,
- .off1 = td_var_offset(io_submit_mode),
+ .off1 = offsetof(struct thread_options, io_submit_mode),
.help = "How IO submissions and completions are done",
.def = "inline",
.category = FIO_OPT_C_IO,
.lname = "Size",
.type = FIO_OPT_STR_VAL,
.cb = str_size_cb,
- .off1 = td_var_offset(size),
+ .off1 = offsetof(struct thread_options, size),
.help = "Total size of device or files",
.interval = 1024 * 1024,
.category = FIO_OPT_C_IO,
.alias = "io_limit",
.lname = "IO Size",
.type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(io_limit),
+ .off1 = offsetof(struct thread_options, io_limit),
.interval = 1024 * 1024,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
.lname = "Fill device",
.alias = "fill_fs",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(fill_device),
+ .off1 = offsetof(struct thread_options, fill_device),
.help = "Write until an ENOSPC error occurs",
.def = "0",
.category = FIO_OPT_C_FILE,
.name = "filesize",
.lname = "File size",
.type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(file_size_low),
- .off2 = td_var_offset(file_size_high),
+ .off1 = offsetof(struct thread_options, file_size_low),
+ .off2 = offsetof(struct thread_options, file_size_high),
.minval = 1,
.help = "Size of individual files",
.interval = 1024 * 1024,
.name = "file_append",
.lname = "File append",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(file_append),
+ .off1 = offsetof(struct thread_options, file_append),
.help = "IO will start at the end of the file(s)",
.def = "0",
.category = FIO_OPT_C_FILE,
.lname = "IO offset",
.alias = "fileoffset",
.type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(start_offset),
+ .off1 = offsetof(struct thread_options, start_offset),
.help = "Start IO from this offset",
.def = "0",
.interval = 1024 * 1024,
.name = "offset_increment",
.lname = "IO offset increment",
.type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(offset_increment),
+ .off1 = offsetof(struct thread_options, offset_increment),
.help = "What is the increment from one offset to the next",
.parent = "offset",
.hide = 1,
.name = "number_ios",
.lname = "Number of IOs to perform",
.type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(number_ios),
+ .off1 = offsetof(struct thread_options, number_ios),
.help = "Force job completion after this number of IOs",
.def = "0",
.category = FIO_OPT_C_IO,
.lname = "Block size",
.alias = "blocksize",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(bs[DDIR_READ]),
- .off2 = td_var_offset(bs[DDIR_WRITE]),
- .off3 = td_var_offset(bs[DDIR_TRIM]),
+ .off1 = offsetof(struct thread_options, bs[DDIR_READ]),
+ .off2 = offsetof(struct thread_options, bs[DDIR_WRITE]),
+ .off3 = offsetof(struct thread_options, bs[DDIR_TRIM]),
.minval = 1,
.help = "Block size unit",
.def = "4k",
.lname = "Block size align",
.alias = "blockalign",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(ba[DDIR_READ]),
- .off2 = td_var_offset(ba[DDIR_WRITE]),
- .off3 = td_var_offset(ba[DDIR_TRIM]),
+ .off1 = offsetof(struct thread_options, ba[DDIR_READ]),
+ .off2 = offsetof(struct thread_options, ba[DDIR_WRITE]),
+ .off3 = offsetof(struct thread_options, ba[DDIR_TRIM]),
.minval = 1,
.help = "IO block offset alignment",
.parent = "rw",
.lname = "Block size range",
.alias = "blocksize_range",
.type = FIO_OPT_RANGE,
- .off1 = td_var_offset(min_bs[DDIR_READ]),
- .off2 = td_var_offset(max_bs[DDIR_READ]),
- .off3 = td_var_offset(min_bs[DDIR_WRITE]),
- .off4 = td_var_offset(max_bs[DDIR_WRITE]),
- .off5 = td_var_offset(min_bs[DDIR_TRIM]),
- .off6 = td_var_offset(max_bs[DDIR_TRIM]),
+ .off1 = offsetof(struct thread_options, min_bs[DDIR_READ]),
+ .off2 = offsetof(struct thread_options, max_bs[DDIR_READ]),
+ .off3 = offsetof(struct thread_options, min_bs[DDIR_WRITE]),
+ .off4 = offsetof(struct thread_options, max_bs[DDIR_WRITE]),
+ .off5 = offsetof(struct thread_options, min_bs[DDIR_TRIM]),
+ .off6 = offsetof(struct thread_options, max_bs[DDIR_TRIM]),
.minval = 1,
.help = "Set block size range (in more detail than bs)",
.parent = "rw",
.lname = "Block size split",
.type = FIO_OPT_STR,
.cb = str_bssplit_cb,
- .off1 = td_var_offset(bssplit),
+ .off1 = offsetof(struct thread_options, bssplit),
.help = "Set a specific mix of block sizes",
.parent = "rw",
.hide = 1,
.lname = "Block size unaligned",
.alias = "blocksize_unaligned",
.type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(bs_unaligned),
+ .off1 = offsetof(struct thread_options, bs_unaligned),
.help = "Don't sector align IO buffer sizes",
.parent = "rw",
.hide = 1,
.name = "bs_is_seq_rand",
.lname = "Block size division is seq/random (not read/write)",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(bs_is_seq_rand),
+ .off1 = offsetof(struct thread_options, bs_is_seq_rand),
.help = "Consider any blocksize setting to be sequential,random",
.def = "0",
.parent = "blocksize",
.name = "randrepeat",
.lname = "Random repeatable",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(rand_repeatable),
+ .off1 = offsetof(struct thread_options, rand_repeatable),
.help = "Use repeatable random IO pattern",
.def = "1",
.parent = "rw",
.name = "randseed",
.lname = "The random generator seed",
.type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(rand_seed),
+ .off1 = offsetof(struct thread_options, rand_seed),
.help = "Set the random generator seed value",
.def = "0x89",
.parent = "rw",
.name = "use_os_rand",
.lname = "Use OS random",
.type = FIO_OPT_DEPRECATED,
- .off1 = td_var_offset(dep_use_os_rand),
+ .off1 = offsetof(struct thread_options, dep_use_os_rand),
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RANDOM,
},
.name = "norandommap",
.lname = "No randommap",
.type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(norandommap),
+ .off1 = offsetof(struct thread_options, norandommap),
.help = "Accept potential duplicate random blocks",
.parent = "rw",
.hide = 1,
.name = "softrandommap",
.lname = "Soft randommap",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(softrandommap),
+ .off1 = offsetof(struct thread_options, softrandommap),
.help = "Set norandommap if randommap allocation fails",
.parent = "norandommap",
.hide = 1,
},
{
.name = "random_generator",
+ .lname = "Random Generator",
.type = FIO_OPT_STR,
- .off1 = td_var_offset(random_generator),
+ .off1 = offsetof(struct thread_options, random_generator),
.help = "Type of random number generator to use",
.def = "tausworthe",
.posval = {
},
{
.name = "random_distribution",
+ .lname = "Random Distribution",
.type = FIO_OPT_STR,
- .off1 = td_var_offset(random_distribution),
+ .off1 = offsetof(struct thread_options, random_distribution),
.cb = str_random_distribution_cb,
.help = "Random offset distribution generator",
.def = "random",
},
{ .ival = "normal",
.oval = FIO_RAND_DIST_GAUSS,
- .help = "Normal (gaussian) distribution",
+ .help = "Normal (Gaussian) distribution",
+ },
+ { .ival = "zoned",
+ .oval = FIO_RAND_DIST_ZONED,
+ .help = "Zoned random distribution",
},
+
},
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RANDOM,
.name = "percentage_random",
.lname = "Percentage Random",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(perc_rand[DDIR_READ]),
- .off2 = td_var_offset(perc_rand[DDIR_WRITE]),
- .off3 = td_var_offset(perc_rand[DDIR_TRIM]),
+ .off1 = offsetof(struct thread_options, perc_rand[DDIR_READ]),
+ .off2 = offsetof(struct thread_options, perc_rand[DDIR_WRITE]),
+ .off3 = offsetof(struct thread_options, perc_rand[DDIR_TRIM]),
.maxval = 100,
.help = "Percentage of seq/random mix that should be random",
.def = "100,100,100",
},
{
.name = "allrandrepeat",
+ .lname = "All Random Repeat",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(allrand_repeatable),
+ .off1 = offsetof(struct thread_options, allrand_repeatable),
.help = "Use repeatable random numbers for everything",
.def = "0",
.category = FIO_OPT_C_IO,
.lname = "Number of files",
.alias = "nr_files",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(nr_files),
+ .off1 = offsetof(struct thread_options, nr_files),
.help = "Split job workload between this number of files",
.def = "1",
.interval = 1,
.name = "openfiles",
.lname = "Number of open files",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(open_files),
+ .off1 = offsetof(struct thread_options, open_files),
.help = "Number of files to keep open at the same time",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
.lname = "File service type",
.type = FIO_OPT_STR,
.cb = str_fst_cb,
- .off1 = td_var_offset(file_service_type),
+ .off1 = offsetof(struct thread_options, file_service_type),
.help = "How to select which file to service next",
.def = "roundrobin",
.category = FIO_OPT_C_FILE,
.posval = {
{ .ival = "random",
.oval = FIO_FSERVICE_RANDOM,
- .help = "Choose a file at random",
+ .help = "Choose a file at random (uniform)",
+ },
+ { .ival = "zipf",
+ .oval = FIO_FSERVICE_ZIPF,
+ .help = "Zipf randomized",
+ },
+ { .ival = "pareto",
+ .oval = FIO_FSERVICE_PARETO,
+ .help = "Pareto randomized",
+ },
+ { .ival = "gauss",
+ .oval = FIO_FSERVICE_GAUSS,
+ .help = "Normal (guassian) distribution",
},
{ .ival = "roundrobin",
.oval = FIO_FSERVICE_RR,
.name = "fallocate",
.lname = "Fallocate",
.type = FIO_OPT_STR,
- .off1 = td_var_offset(fallocate_mode),
+ .off1 = offsetof(struct thread_options, fallocate_mode),
.help = "Whether pre-allocation is performed when laying out files",
.def = "posix",
.category = FIO_OPT_C_FILE,
},
},
},
-#endif /* CONFIG_POSIX_FALLOCATE */
+#else /* CONFIG_POSIX_FALLOCATE */
+ {
+ .name = "fallocate",
+ .lname = "Fallocate",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Your platform does not support fallocate",
+ },
+#endif /* CONFIG_POSIX_FALLOCATE */
{
.name = "fadvise_hint",
.lname = "Fadvise hint",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(fadvise_hint),
+ .off1 = offsetof(struct thread_options, fadvise_hint),
.help = "Use fadvise() to advise the kernel on IO pattern",
.def = "1",
.category = FIO_OPT_C_FILE,
.name = "fadvise_stream",
.lname = "Fadvise stream",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(fadvise_stream),
+ .off1 = offsetof(struct thread_options, fadvise_stream),
.help = "Use fadvise() to set stream ID",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
},
+#else
+ {
+ .name = "fadvise_stream",
+ .lname = "Fadvise stream",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Your platform does not support fadvise stream ID",
+ },
#endif
{
.name = "fsync",
.lname = "Fsync",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(fsync_blocks),
+ .off1 = offsetof(struct thread_options, fsync_blocks),
.help = "Issue fsync for writes every given number of blocks",
.def = "0",
.interval = 1,
.name = "fdatasync",
.lname = "Fdatasync",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(fdatasync_blocks),
+ .off1 = offsetof(struct thread_options, fdatasync_blocks),
.help = "Issue fdatasync for writes every given number of blocks",
.def = "0",
.interval = 1,
.name = "write_barrier",
.lname = "Write barrier",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(barrier_blocks),
+ .off1 = offsetof(struct thread_options, barrier_blocks),
.help = "Make every Nth write a barrier write",
.def = "0",
.interval = 1,
},
.type = FIO_OPT_STR_MULTI,
.cb = str_sfr_cb,
- .off1 = td_var_offset(sync_file_range),
+ .off1 = offsetof(struct thread_options, sync_file_range),
.help = "Use sync_file_range()",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
},
+#else
+ {
+ .name = "sync_file_range",
+ .lname = "Sync file range",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Your platform does not support sync_file_range",
+ },
#endif
{
.name = "direct",
.lname = "Direct I/O",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(odirect),
+ .off1 = offsetof(struct thread_options, odirect),
.help = "Use O_DIRECT IO (negates buffered)",
.def = "0",
.inverse = "buffered",
.name = "atomic",
.lname = "Atomic I/O",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(oatomic),
+ .off1 = offsetof(struct thread_options, oatomic),
.help = "Use Atomic IO with O_DIRECT (implies O_DIRECT)",
.def = "0",
.category = FIO_OPT_C_IO,
.name = "buffered",
.lname = "Buffered I/O",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(odirect),
+ .off1 = offsetof(struct thread_options, odirect),
.neg = 1,
.help = "Use buffered IO (negates direct)",
.def = "1",
.name = "overwrite",
.lname = "Overwrite",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(overwrite),
+ .off1 = offsetof(struct thread_options, overwrite),
.help = "When writing, set whether to overwrite current data",
.def = "0",
.category = FIO_OPT_C_FILE,
.name = "loops",
.lname = "Loops",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(loops),
+ .off1 = offsetof(struct thread_options, loops),
.help = "Number of times to run the job",
.def = "1",
.interval = 1,
.name = "numjobs",
.lname = "Number of jobs",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(numjobs),
+ .off1 = offsetof(struct thread_options, numjobs),
.help = "Duplicate this job this many times",
.def = "1",
.interval = 1,
.name = "startdelay",
.lname = "Start delay",
.type = FIO_OPT_STR_VAL_TIME,
- .off1 = td_var_offset(start_delay),
- .off2 = td_var_offset(start_delay_high),
+ .off1 = offsetof(struct thread_options, start_delay),
+ .off2 = offsetof(struct thread_options, start_delay_high),
.help = "Only start job when this period has passed",
.def = "0",
.is_seconds = 1,
.lname = "Runtime",
.alias = "timeout",
.type = FIO_OPT_STR_VAL_TIME,
- .off1 = td_var_offset(timeout),
+ .off1 = offsetof(struct thread_options, timeout),
.help = "Stop workload when this amount of time has passed",
.def = "0",
.is_seconds = 1,
.name = "time_based",
.lname = "Time based",
.type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(time_based),
+ .off1 = offsetof(struct thread_options, time_based),
.help = "Keep running until runtime/timeout is met",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_RUNTIME,
.name = "verify_only",
.lname = "Verify only",
.type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(verify_only),
+ .off1 = offsetof(struct thread_options, verify_only),
.help = "Verifies previously written data is still valid",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_RUNTIME,
.name = "ramp_time",
.lname = "Ramp time",
.type = FIO_OPT_STR_VAL_TIME,
- .off1 = td_var_offset(ramp_time),
+ .off1 = offsetof(struct thread_options, ramp_time),
.help = "Ramp up time before measuring performance",
.is_seconds = 1,
.is_time = 1,
.lname = "Clock source",
.type = FIO_OPT_STR,
.cb = fio_clock_source_cb,
- .off1 = td_var_offset(clocksource),
+ .off1 = offsetof(struct thread_options, clocksource),
.help = "What type of timing source to use",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_CLOCK,
.lname = "I/O Memory",
.type = FIO_OPT_STR,
.cb = str_mem_cb,
- .off1 = td_var_offset(mem_type),
+ .off1 = offsetof(struct thread_options, mem_type),
.help = "Backing type for IO buffers",
.def = "malloc",
.category = FIO_OPT_C_IO,
.oval = MEM_MMAP,
.help = "Use mmap(2) (file or anon) for IO buffers",
},
+ { .ival = "mmapshared",
+ .oval = MEM_MMAPSHARED,
+ .help = "Like mmap, but use the shared flag",
+ },
#ifdef FIO_HAVE_HUGETLB
{ .ival = "mmaphuge",
.oval = MEM_MMAPHUGE,
.alias = "mem_align",
.lname = "I/O memory alignment",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(mem_align),
+ .off1 = offsetof(struct thread_options, mem_align),
.minval = 0,
.help = "IO memory buffer offset alignment",
.def = "0",
.name = "verify",
.lname = "Verify",
.type = FIO_OPT_STR,
- .off1 = td_var_offset(verify),
+ .off1 = offsetof(struct thread_options, verify),
.help = "Verify data written",
.def = "0",
.category = FIO_OPT_C_IO,
.name = "do_verify",
.lname = "Perform verify step",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(do_verify),
+ .off1 = offsetof(struct thread_options, do_verify),
.help = "Run verification stage after write",
.def = "1",
.parent = "verify",
.name = "verifysort",
.lname = "Verify sort",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(verifysort),
+ .off1 = offsetof(struct thread_options, verifysort),
.help = "Sort written verify blocks for read back",
.def = "1",
.parent = "verify",
},
{
.name = "verifysort_nr",
+ .lname = "Verify Sort Nr",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(verifysort_nr),
+ .off1 = offsetof(struct thread_options, verifysort_nr),
.help = "Pre-load and sort verify blocks for a read workload",
.minval = 0,
.maxval = 131072,
.name = "verify_interval",
.lname = "Verify interval",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(verify_interval),
+ .off1 = offsetof(struct thread_options, verify_interval),
.minval = 2 * sizeof(struct verify_header),
.help = "Store verify buffer header every N bytes",
.parent = "verify",
.lname = "Verify offset",
.type = FIO_OPT_INT,
.help = "Offset verify header location by N bytes",
- .off1 = td_var_offset(verify_offset),
+ .off1 = offsetof(struct thread_options, verify_offset),
.minval = sizeof(struct verify_header),
.parent = "verify",
.hide = 1,
.lname = "Verify pattern",
.type = FIO_OPT_STR,
.cb = str_verify_pattern_cb,
- .off1 = td_var_offset(verify_pattern),
+ .off1 = offsetof(struct thread_options, verify_pattern),
.help = "Fill pattern for IO buffers",
.parent = "verify",
.hide = 1,
.name = "verify_fatal",
.lname = "Verify fatal",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(verify_fatal),
+ .off1 = offsetof(struct thread_options, verify_fatal),
.def = "0",
.help = "Exit on a single verify failure, don't continue",
.parent = "verify",
.name = "verify_dump",
.lname = "Verify dump",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(verify_dump),
+ .off1 = offsetof(struct thread_options, verify_dump),
.def = "0",
.help = "Dump contents of good and bad blocks on failure",
.parent = "verify",
.name = "verify_async",
.lname = "Verify asynchronously",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(verify_async),
+ .off1 = offsetof(struct thread_options, verify_async),
.def = "0",
.help = "Number of async verifier threads to use",
.parent = "verify",
.name = "verify_backlog",
.lname = "Verify backlog",
.type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(verify_backlog),
+ .off1 = offsetof(struct thread_options, verify_backlog),
.help = "Verify after this number of blocks are written",
.parent = "verify",
.hide = 1,
.name = "verify_backlog_batch",
.lname = "Verify backlog batch",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(verify_batch),
+ .off1 = offsetof(struct thread_options, verify_batch),
.help = "Verify this number of IO blocks",
.parent = "verify",
.hide = 1,
.lname = "Async verify CPUs",
.type = FIO_OPT_STR,
.cb = str_verify_cpus_allowed_cb,
- .off1 = td_var_offset(verify_cpumask),
+ .off1 = offsetof(struct thread_options, verify_cpumask),
.help = "Set CPUs allowed for async verify threads",
.parent = "verify_async",
.hide = 1,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_VERIFY,
},
+#else
+ {
+ .name = "verify_async_cpus",
+ .lname = "Async verify CPUs",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Your platform does not support CPU affinities",
+ },
#endif
{
.name = "experimental_verify",
- .off1 = td_var_offset(experimental_verify),
+ .lname = "Experimental Verify",
+ .off1 = offsetof(struct thread_options, experimental_verify),
.type = FIO_OPT_BOOL,
.help = "Enable experimental verification",
.parent = "verify",
{
.name = "verify_state_load",
.lname = "Load verify state",
- .off1 = td_var_offset(verify_state),
+ .off1 = offsetof(struct thread_options, verify_state),
.type = FIO_OPT_BOOL,
.help = "Load verify termination state",
.parent = "verify",
{
.name = "verify_state_save",
.lname = "Save verify state",
- .off1 = td_var_offset(verify_state_save),
+ .off1 = offsetof(struct thread_options, verify_state_save),
.type = FIO_OPT_BOOL,
.def = "1",
.help = "Save verify state on termination",
.name = "trim_percentage",
.lname = "Trim percentage",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(trim_percentage),
+ .off1 = offsetof(struct thread_options, trim_percentage),
.minval = 0,
.maxval = 100,
.help = "Number of verify blocks to discard/trim",
.lname = "Verify trim zero",
.type = FIO_OPT_BOOL,
.help = "Verify that trim/discarded blocks are returned as zeroes",
- .off1 = td_var_offset(trim_zero),
+ .off1 = offsetof(struct thread_options, trim_zero),
.parent = "trim_percentage",
.hide = 1,
.def = "1",
.name = "trim_backlog",
.lname = "Trim backlog",
.type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(trim_backlog),
+ .off1 = offsetof(struct thread_options, trim_backlog),
.help = "Trim after this number of blocks are written",
.parent = "trim_percentage",
.hide = 1,
.name = "trim_backlog_batch",
.lname = "Trim backlog batch",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(trim_batch),
+ .off1 = offsetof(struct thread_options, trim_batch),
.help = "Trim this number of IO blocks",
.parent = "trim_percentage",
.hide = 1,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_TRIM,
},
+#else
+ {
+ .name = "trim_percentage",
+ .lname = "Trim percentage",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Fio does not support TRIM on your platform",
+ },
+ {
+ .name = "trim_verify_zero",
+ .lname = "Verify trim zero",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Fio does not support TRIM on your platform",
+ },
+ {
+ .name = "trim_backlog",
+ .lname = "Trim backlog",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Fio does not support TRIM on your platform",
+ },
+ {
+ .name = "trim_backlog_batch",
+ .lname = "Trim backlog batch",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Fio does not support TRIM on your platform",
+ },
#endif
{
.name = "write_iolog",
.lname = "Write I/O log",
.type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(write_iolog_file),
+ .off1 = offsetof(struct thread_options, write_iolog_file),
.help = "Store IO pattern to file",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IOLOG,
.name = "read_iolog",
.lname = "Read I/O log",
.type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(read_iolog_file),
+ .off1 = offsetof(struct thread_options, read_iolog_file),
.help = "Playback IO pattern from file",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IOLOG,
.name = "replay_no_stall",
.lname = "Don't stall on replay",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(no_stall),
+ .off1 = offsetof(struct thread_options, no_stall),
.def = "0",
.parent = "read_iolog",
.hide = 1,
.name = "replay_redirect",
.lname = "Redirect device for replay",
.type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(replay_redirect),
+ .off1 = offsetof(struct thread_options, replay_redirect),
.parent = "read_iolog",
.hide = 1,
.help = "Replay all I/O onto this device, regardless of trace device",
.name = "replay_scale",
.lname = "Replace offset scale factor",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(replay_scale),
+ .off1 = offsetof(struct thread_options, replay_scale),
.parent = "read_iolog",
.def = "1",
.help = "Align offsets to this blocksize",
.name = "replay_align",
.lname = "Replace alignment",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(replay_align),
+ .off1 = offsetof(struct thread_options, replay_align),
.parent = "read_iolog",
.help = "Scale offset down by this factor",
.category = FIO_OPT_C_IO,
.name = "exec_prerun",
.lname = "Pre-execute runnable",
.type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(exec_prerun),
+ .off1 = offsetof(struct thread_options, exec_prerun),
.help = "Execute this file prior to running job",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_INVALID,
.name = "exec_postrun",
.lname = "Post-execute runnable",
.type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(exec_postrun),
+ .off1 = offsetof(struct thread_options, exec_postrun),
.help = "Execute this file after running job",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_INVALID,
.name = "ioscheduler",
.lname = "I/O scheduler",
.type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(ioscheduler),
+ .off1 = offsetof(struct thread_options, ioscheduler),
.help = "Use this IO scheduler on the backing device",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
},
+#else
+ {
+ .name = "ioscheduler",
+ .lname = "I/O scheduler",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Your platform does not support IO scheduler switching",
+ },
#endif
{
.name = "zonesize",
.lname = "Zone size",
.type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(zone_size),
+ .off1 = offsetof(struct thread_options, zone_size),
.help = "Amount of data to read per zone",
.def = "0",
.interval = 1024 * 1024,
.name = "zonerange",
.lname = "Zone range",
.type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(zone_range),
+ .off1 = offsetof(struct thread_options, zone_range),
.help = "Give size of an IO zone",
.def = "0",
.interval = 1024 * 1024,
.name = "zoneskip",
.lname = "Zone skip",
.type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(zone_skip),
+ .off1 = offsetof(struct thread_options, zone_skip),
.help = "Space between IO zones",
.def = "0",
.interval = 1024 * 1024,
.name = "lockmem",
.lname = "Lock memory",
.type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(lockmem),
+ .off1 = offsetof(struct thread_options, lockmem),
.help = "Lock down this amount of memory (per worker)",
.def = "0",
.interval = 1024 * 1024,
.lname = "Read/write mix read",
.type = FIO_OPT_INT,
.cb = str_rwmix_read_cb,
- .off1 = td_var_offset(rwmix[DDIR_READ]),
+ .off1 = offsetof(struct thread_options, rwmix[DDIR_READ]),
.maxval = 100,
.help = "Percentage of mixed workload that is reads",
.def = "50",
.lname = "Read/write mix write",
.type = FIO_OPT_INT,
.cb = str_rwmix_write_cb,
- .off1 = td_var_offset(rwmix[DDIR_WRITE]),
+ .off1 = offsetof(struct thread_options, rwmix[DDIR_WRITE]),
.maxval = 100,
.help = "Percentage of mixed workload that is writes",
.def = "50",
.name = "nice",
.lname = "Nice",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(nice),
+ .off1 = offsetof(struct thread_options, nice),
.help = "Set job CPU nice value",
.minval = -19,
.maxval = 20,
.name = "prio",
.lname = "I/O nice priority",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(ioprio),
+ .off1 = offsetof(struct thread_options, ioprio),
.help = "Set job IO priority value",
- .minval = 0,
- .maxval = 7,
+ .minval = IOPRIO_MIN_PRIO,
+ .maxval = IOPRIO_MAX_PRIO,
.interval = 1,
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_CRED,
},
+#else
+ {
+ .name = "prio",
+ .lname = "I/O nice priority",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Your platform does not support IO priorities",
+ },
+#endif
+#ifdef FIO_HAVE_IOPRIO_CLASS
+#ifndef FIO_HAVE_IOPRIO
+#error "FIO_HAVE_IOPRIO_CLASS requires FIO_HAVE_IOPRIO"
+#endif
{
.name = "prioclass",
.lname = "I/O nice priority class",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(ioprio_class),
+ .off1 = offsetof(struct thread_options, ioprio_class),
.help = "Set job IO priority class",
- .minval = 0,
- .maxval = 3,
+ .minval = IOPRIO_MIN_PRIO_CLASS,
+ .maxval = IOPRIO_MAX_PRIO_CLASS,
.interval = 1,
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_CRED,
},
+#else
+ {
+ .name = "prioclass",
+ .lname = "I/O nice priority class",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Your platform does not support IO priority classes",
+ },
#endif
{
.name = "thinktime",
.lname = "Thinktime",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(thinktime),
+ .off1 = offsetof(struct thread_options, thinktime),
.help = "Idle time between IO buffers (usec)",
.def = "0",
.is_time = 1,
.name = "thinktime_spin",
.lname = "Thinktime spin",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(thinktime_spin),
+ .off1 = offsetof(struct thread_options, thinktime_spin),
.help = "Start think time by spinning this amount (usec)",
.def = "0",
.is_time = 1,
.name = "thinktime_blocks",
.lname = "Thinktime blocks",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(thinktime_blocks),
+ .off1 = offsetof(struct thread_options, thinktime_blocks),
.help = "IO buffer period between 'thinktime'",
.def = "1",
.parent = "thinktime",
.name = "rate",
.lname = "I/O rate",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(rate[DDIR_READ]),
- .off2 = td_var_offset(rate[DDIR_WRITE]),
- .off3 = td_var_offset(rate[DDIR_TRIM]),
+ .off1 = offsetof(struct thread_options, rate[DDIR_READ]),
+ .off2 = offsetof(struct thread_options, rate[DDIR_WRITE]),
+ .off3 = offsetof(struct thread_options, rate[DDIR_TRIM]),
.help = "Set bandwidth rate",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RATE,
},
{
- .name = "ratemin",
+ .name = "rate_min",
+ .alias = "ratemin",
.lname = "I/O min rate",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(ratemin[DDIR_READ]),
- .off2 = td_var_offset(ratemin[DDIR_WRITE]),
- .off3 = td_var_offset(ratemin[DDIR_TRIM]),
+ .off1 = offsetof(struct thread_options, ratemin[DDIR_READ]),
+ .off2 = offsetof(struct thread_options, ratemin[DDIR_WRITE]),
+ .off3 = offsetof(struct thread_options, ratemin[DDIR_TRIM]),
.help = "Job must meet this rate or it will be shutdown",
.parent = "rate",
.hide = 1,
.name = "rate_iops",
.lname = "I/O rate IOPS",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(rate_iops[DDIR_READ]),
- .off2 = td_var_offset(rate_iops[DDIR_WRITE]),
- .off3 = td_var_offset(rate_iops[DDIR_TRIM]),
+ .off1 = offsetof(struct thread_options, rate_iops[DDIR_READ]),
+ .off2 = offsetof(struct thread_options, rate_iops[DDIR_WRITE]),
+ .off3 = offsetof(struct thread_options, rate_iops[DDIR_TRIM]),
.help = "Limit IO used to this number of IO operations/sec",
.hide = 1,
.category = FIO_OPT_C_IO,
.name = "rate_iops_min",
.lname = "I/O min rate IOPS",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(rate_iops_min[DDIR_READ]),
- .off2 = td_var_offset(rate_iops_min[DDIR_WRITE]),
- .off3 = td_var_offset(rate_iops_min[DDIR_TRIM]),
+ .off1 = offsetof(struct thread_options, rate_iops_min[DDIR_READ]),
+ .off2 = offsetof(struct thread_options, rate_iops_min[DDIR_WRITE]),
+ .off3 = offsetof(struct thread_options, rate_iops_min[DDIR_TRIM]),
.help = "Job must meet this rate or it will be shut down",
.parent = "rate_iops",
.hide = 1,
.group = FIO_OPT_G_RATE,
},
{
- .name = "ratecycle",
+ .name = "rate_process",
+ .lname = "Rate Process",
+ .type = FIO_OPT_STR,
+ .off1 = offsetof(struct thread_options, rate_process),
+ .help = "What process controls how rated IO is managed",
+ .def = "linear",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
+ .posval = {
+ { .ival = "linear",
+ .oval = RATE_PROCESS_LINEAR,
+ .help = "Linear rate of IO",
+ },
+ {
+ .ival = "poisson",
+ .oval = RATE_PROCESS_POISSON,
+ .help = "Rate follows Poisson process",
+ },
+ },
+ .parent = "rate",
+ },
+ {
+ .name = "rate_cycle",
+ .alias = "ratecycle",
.lname = "I/O rate cycle",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(ratecycle),
+ .off1 = offsetof(struct thread_options, ratecycle),
.help = "Window average for rate limits (msec)",
.def = "1000",
.parent = "rate",
},
{
.name = "max_latency",
+ .lname = "Max Latency",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(max_latency),
+ .off1 = offsetof(struct thread_options, max_latency),
.help = "Maximum tolerated IO latency (usec)",
.is_time = 1,
.category = FIO_OPT_C_IO,
.name = "latency_target",
.lname = "Latency Target (usec)",
.type = FIO_OPT_STR_VAL_TIME,
- .off1 = td_var_offset(latency_target),
+ .off1 = offsetof(struct thread_options, latency_target),
.help = "Ramp to max queue depth supporting this latency",
.is_time = 1,
.category = FIO_OPT_C_IO,
.name = "latency_window",
.lname = "Latency Window (usec)",
.type = FIO_OPT_STR_VAL_TIME,
- .off1 = td_var_offset(latency_window),
+ .off1 = offsetof(struct thread_options, latency_window),
.help = "Time to sustain latency_target",
.is_time = 1,
.category = FIO_OPT_C_IO,
.name = "latency_percentile",
.lname = "Latency Percentile",
.type = FIO_OPT_FLOAT_LIST,
- .off1 = td_var_offset(latency_percentile),
+ .off1 = offsetof(struct thread_options, latency_percentile),
.help = "Percentile of IOs must be below latency_target",
.def = "100",
.maxlen = 1,
.name = "invalidate",
.lname = "Cache invalidate",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(invalidate_cache),
+ .off1 = offsetof(struct thread_options, invalidate_cache),
.help = "Invalidate buffer/page cache prior to running job",
.def = "1",
.category = FIO_OPT_C_IO,
.name = "sync",
.lname = "Synchronous I/O",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(sync_io),
+ .off1 = offsetof(struct thread_options, sync_io),
.help = "Use O_SYNC for buffered writes",
.def = "0",
.parent = "buffered",
.name = "create_serialize",
.lname = "Create serialize",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(create_serialize),
- .help = "Serialize creating of job files",
+ .off1 = offsetof(struct thread_options, create_serialize),
+ .help = "Serialize creation of job files",
.def = "1",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
.name = "create_fsync",
.lname = "Create fsync",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(create_fsync),
+ .off1 = offsetof(struct thread_options, create_fsync),
.help = "fsync file after creation",
.def = "1",
.category = FIO_OPT_C_FILE,
.name = "create_on_open",
.lname = "Create on open",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(create_on_open),
+ .off1 = offsetof(struct thread_options, create_on_open),
.help = "Create files when they are opened for IO",
.def = "0",
.category = FIO_OPT_C_FILE,
},
{
.name = "create_only",
+ .lname = "Create Only",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(create_only),
+ .off1 = offsetof(struct thread_options, create_only),
.help = "Only perform file creation phase",
.category = FIO_OPT_C_FILE,
.def = "0",
.name = "allow_file_create",
.lname = "Allow file create",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(allow_create),
+ .off1 = offsetof(struct thread_options, allow_create),
.help = "Permit fio to create files, if they don't exist",
.def = "1",
.category = FIO_OPT_C_FILE,
.name = "allow_mounted_write",
.lname = "Allow mounted write",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(allow_mounted_write),
+ .off1 = offsetof(struct thread_options, allow_mounted_write),
.help = "Allow writes to a mounted partition",
.def = "0",
.category = FIO_OPT_C_FILE,
.name = "pre_read",
.lname = "Pre-read files",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(pre_read),
+ .off1 = offsetof(struct thread_options, pre_read),
.help = "Pre-read files before starting official testing",
.def = "0",
.category = FIO_OPT_C_FILE,
.lname = "CPU mask",
.type = FIO_OPT_INT,
.cb = str_cpumask_cb,
- .off1 = td_var_offset(cpumask),
+ .off1 = offsetof(struct thread_options, cpumask),
.help = "CPU affinity mask",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_CRED,
.lname = "CPUs allowed",
.type = FIO_OPT_STR,
.cb = str_cpus_allowed_cb,
- .off1 = td_var_offset(cpumask),
+ .off1 = offsetof(struct thread_options, cpumask),
.help = "Set CPUs allowed",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_CRED,
.name = "cpus_allowed_policy",
.lname = "CPUs allowed distribution policy",
.type = FIO_OPT_STR,
- .off1 = td_var_offset(cpus_allowed_policy),
+ .off1 = offsetof(struct thread_options, cpus_allowed_policy),
.help = "Distribution policy for cpus_allowed",
.parent = "cpus_allowed",
.prio = 1,
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_CRED,
},
+#else
+ {
+ .name = "cpumask",
+ .lname = "CPU mask",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Your platform does not support CPU affinities",
+ },
+ {
+ .name = "cpus_allowed",
+ .lname = "CPUs allowed",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Your platform does not support CPU affinities",
+ },
+ {
+ .name = "cpus_allowed_policy",
+ .lname = "CPUs allowed distribution policy",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Your platform does not support CPU affinities",
+ },
#endif
#ifdef CONFIG_LIBNUMA
{
.name = "numa_cpu_nodes",
+ .lname = "NUMA CPU Nodes",
.type = FIO_OPT_STR,
.cb = str_numa_cpunodes_cb,
- .off1 = td_var_offset(numa_cpunodes),
+ .off1 = offsetof(struct thread_options, numa_cpunodes),
.help = "NUMA CPU nodes bind",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_INVALID,
},
{
.name = "numa_mem_policy",
+ .lname = "NUMA Memory Policy",
.type = FIO_OPT_STR,
.cb = str_numa_mpol_cb,
- .off1 = td_var_offset(numa_memnodes),
+ .off1 = offsetof(struct thread_options, numa_memnodes),
.help = "NUMA memory policy setup",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_INVALID,
},
+#else
+ {
+ .name = "numa_cpu_nodes",
+ .lname = "NUMA CPU Nodes",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Build fio with libnuma-dev(el) to enable this option",
+ },
+ {
+ .name = "numa_mem_policy",
+ .lname = "NUMA Memory Policy",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Build fio with libnuma-dev(el) to enable this option",
+ },
#endif
{
.name = "end_fsync",
.lname = "End fsync",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(end_fsync),
+ .off1 = offsetof(struct thread_options, end_fsync),
.help = "Include fsync at the end of job",
.def = "0",
.category = FIO_OPT_C_FILE,
.name = "fsync_on_close",
.lname = "Fsync on close",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(fsync_on_close),
+ .off1 = offsetof(struct thread_options, fsync_on_close),
.help = "fsync files on close",
.def = "0",
.category = FIO_OPT_C_FILE,
.name = "unlink",
.lname = "Unlink file",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(unlink),
+ .off1 = offsetof(struct thread_options, unlink),
.help = "Unlink created files after job has completed",
.def = "0",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
},
+ {
+ .name = "unlink_each_loop",
+ .lname = "Unlink file after each loop of a job",
+ .type = FIO_OPT_BOOL,
+ .off1 = offsetof(struct thread_options, unlink_each_loop),
+ .help = "Unlink created files after each loop in a job has completed",
+ .def = "0",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
+ },
{
.name = "exitall",
.lname = "Exit-all on terminate",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_PROCESS,
},
+ {
+ .name = "exitall_on_error",
+ .lname = "Exit-all on terminate in error",
+ .type = FIO_OPT_STR_SET,
+ .off1 = offsetof(struct thread_options, exitall_error),
+ .help = "Terminate all jobs when one exits in error",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_PROCESS,
+ },
{
.name = "stonewall",
.lname = "Wait for previous",
.alias = "wait_for_previous",
.type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(stonewall),
+ .off1 = offsetof(struct thread_options, stonewall),
.help = "Insert a hard barrier between this job and previous",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_PROCESS,
.name = "new_group",
.lname = "New group",
.type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(new_group),
+ .off1 = offsetof(struct thread_options, new_group),
.help = "Mark the start of a new group (for reporting)",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_PROCESS,
.name = "thread",
.lname = "Thread",
.type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(use_thread),
+ .off1 = offsetof(struct thread_options, use_thread),
.help = "Use threads instead of processes",
#ifdef CONFIG_NO_SHM
.def = "1",
},
{
.name = "per_job_logs",
+ .lname = "Per Job Logs",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(per_job_logs),
+ .off1 = offsetof(struct thread_options, per_job_logs),
.help = "Include job number in generated log files or not",
.def = "1",
.category = FIO_OPT_C_LOG,
.name = "write_bw_log",
.lname = "Write bandwidth log",
.type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(bw_log_file),
+ .off1 = offsetof(struct thread_options, bw_log_file),
.help = "Write log of bandwidth during run",
.category = FIO_OPT_C_LOG,
.group = FIO_OPT_G_INVALID,
.name = "write_lat_log",
.lname = "Write latency log",
.type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(lat_log_file),
+ .off1 = offsetof(struct thread_options, lat_log_file),
.help = "Write log of latency during run",
.category = FIO_OPT_C_LOG,
.group = FIO_OPT_G_INVALID,
.name = "write_iops_log",
.lname = "Write IOPS log",
.type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(iops_log_file),
+ .off1 = offsetof(struct thread_options, iops_log_file),
.help = "Write log of IOPS during run",
.category = FIO_OPT_C_LOG,
.group = FIO_OPT_G_INVALID,
.name = "log_avg_msec",
.lname = "Log averaging (msec)",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(log_avg_msec),
+ .off1 = offsetof(struct thread_options, log_avg_msec),
.help = "Average bw/iops/lat logs over this period of time",
.def = "0",
.category = FIO_OPT_C_LOG,
.group = FIO_OPT_G_INVALID,
},
+ {
+ .name = "log_hist_msec",
+ .lname = "Log histograms (msec)",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct thread_options, log_hist_msec),
+ .help = "Dump completion latency histograms at frequency of this time value",
+ .def = "0",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
+ {
+ .name = "log_hist_coarseness",
+ .lname = "Histogram logs coarseness",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct thread_options, log_hist_coarseness),
+ .help = "Integer in range [0,6]. Higher coarseness outputs"
+ " fewer histogram bins per sample. The number of bins for"
+ " these are [1216, 608, 304, 152, 76, 38, 19] respectively.",
+ .def = "0",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
+ {
+ .name = "write_hist_log",
+ .lname = "Write latency histogram logs",
+ .type = FIO_OPT_STR_STORE,
+ .off1 = offsetof(struct thread_options, hist_log_file),
+ .help = "Write log of latency histograms during run",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
+ {
+ .name = "log_max_value",
+ .lname = "Log maximum instead of average",
+ .type = FIO_OPT_BOOL,
+ .off1 = offsetof(struct thread_options, log_max),
+ .help = "Log max sample in a window instead of average",
+ .def = "0",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
{
.name = "log_offset",
.lname = "Log offset of IO",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(log_offset),
+ .off1 = offsetof(struct thread_options, log_offset),
.help = "Include offset of IO for each log entry",
.def = "0",
.category = FIO_OPT_C_LOG,
.name = "log_compression",
.lname = "Log compression",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(log_gz),
+ .off1 = offsetof(struct thread_options, log_gz),
.help = "Log in compressed chunks of this size",
- .minval = 32 * 1024 * 1024ULL,
+ .minval = 1024ULL,
.maxval = 512 * 1024 * 1024ULL,
.category = FIO_OPT_C_LOG,
.group = FIO_OPT_G_INVALID,
},
+#ifdef FIO_HAVE_CPU_AFFINITY
+ {
+ .name = "log_compression_cpus",
+ .lname = "Log Compression CPUs",
+ .type = FIO_OPT_STR,
+ .cb = str_log_cpus_allowed_cb,
+ .off1 = offsetof(struct thread_options, log_gz_cpumask),
+ .parent = "log_compression",
+ .help = "Limit log compression to these CPUs",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
+#else
+ {
+ .name = "log_compression_cpus",
+ .lname = "Log Compression CPUs",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Your platform does not support CPU affinities",
+ },
+#endif
{
.name = "log_store_compressed",
.lname = "Log store compressed",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(log_gz_store),
+ .off1 = offsetof(struct thread_options, log_gz_store),
.help = "Store logs in a compressed format",
.category = FIO_OPT_C_LOG,
.group = FIO_OPT_G_INVALID,
},
+#else
+ {
+ .name = "log_compression",
+ .lname = "Log compression",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Install libz-dev(el) to get compression support",
+ },
+ {
+ .name = "log_store_compressed",
+ .lname = "Log store compressed",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Install libz-dev(el) to get compression support",
+ },
#endif
{
.name = "block_error_percentiles",
.lname = "Block error percentiles",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(block_error_hist),
+ .off1 = offsetof(struct thread_options, block_error_hist),
.help = "Record trim block errors and make a histogram",
.def = "0",
.category = FIO_OPT_C_LOG,
.name = "bwavgtime",
.lname = "Bandwidth average time",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(bw_avg_time),
+ .off1 = offsetof(struct thread_options, bw_avg_time),
.help = "Time window over which to calculate bandwidth"
" (msec)",
.def = "500",
.name = "iopsavgtime",
.lname = "IOPS average time",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(iops_avg_time),
+ .off1 = offsetof(struct thread_options, iops_avg_time),
.help = "Time window over which to calculate IOPS (msec)",
.def = "500",
.parent = "write_iops_log",
.name = "group_reporting",
.lname = "Group reporting",
.type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(group_reporting),
+ .off1 = offsetof(struct thread_options, group_reporting),
.help = "Do reporting on a per-group basis",
.category = FIO_OPT_C_STAT,
.group = FIO_OPT_G_INVALID,
.name = "zero_buffers",
.lname = "Zero I/O buffers",
.type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(zero_buffers),
+ .off1 = offsetof(struct thread_options, zero_buffers),
.help = "Init IO buffers to all zeroes",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BUF,
.name = "refill_buffers",
.lname = "Refill I/O buffers",
.type = FIO_OPT_STR_SET,
- .off1 = td_var_offset(refill_buffers),
+ .off1 = offsetof(struct thread_options, refill_buffers),
.help = "Refill IO buffers on every IO submit",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BUF,
.name = "scramble_buffers",
.lname = "Scramble I/O buffers",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(scramble_buffers),
+ .off1 = offsetof(struct thread_options, scramble_buffers),
.help = "Slightly scramble buffers on every IO submit",
.def = "1",
.category = FIO_OPT_C_IO,
.lname = "Buffer pattern",
.type = FIO_OPT_STR,
.cb = str_buffer_pattern_cb,
- .off1 = td_var_offset(buffer_pattern),
+ .off1 = offsetof(struct thread_options, buffer_pattern),
.help = "Fill pattern for IO buffers",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BUF,
.lname = "Buffer compression percentage",
.type = FIO_OPT_INT,
.cb = str_buffer_compress_cb,
- .off1 = td_var_offset(compress_percentage),
+ .off1 = offsetof(struct thread_options, compress_percentage),
.maxval = 100,
.minval = 0,
.help = "How compressible the buffer is (approximately)",
.name = "buffer_compress_chunk",
.lname = "Buffer compression chunk size",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(compress_chunk),
+ .off1 = offsetof(struct thread_options, compress_chunk),
.parent = "buffer_compress_percentage",
.hide = 1,
.help = "Size of compressible region in buffer",
.lname = "Dedupe percentage",
.type = FIO_OPT_INT,
.cb = str_dedupe_cb,
- .off1 = td_var_offset(dedupe_percentage),
+ .off1 = offsetof(struct thread_options, dedupe_percentage),
.maxval = 100,
.minval = 0,
.help = "Percentage of buffers that are dedupable",
.name = "clat_percentiles",
.lname = "Completion latency percentiles",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(clat_percentiles),
+ .off1 = offsetof(struct thread_options, clat_percentiles),
.help = "Enable the reporting of completion latency percentiles",
.def = "1",
.category = FIO_OPT_C_STAT,
.name = "percentile_list",
.lname = "Percentile list",
.type = FIO_OPT_FLOAT_LIST,
- .off1 = td_var_offset(percentile_list),
- .off2 = td_var_offset(percentile_precision),
+ .off1 = offsetof(struct thread_options, percentile_list),
+ .off2 = offsetof(struct thread_options, percentile_precision),
.help = "Specify a custom list of percentiles to report for "
"completion latency and block errors",
.def = "1:5:10:20:30:40:50:60:70:80:90:95:99:99.5:99.9:99.95:99.99",
.name = "disk_util",
.lname = "Disk utilization",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(do_disk_util),
+ .off1 = offsetof(struct thread_options, do_disk_util),
.help = "Log disk utilization statistics",
.def = "1",
.category = FIO_OPT_C_STAT,
.group = FIO_OPT_G_INVALID,
},
+#else
+ {
+ .name = "disk_util",
+ .lname = "Disk utilization",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Your platform does not support disk utilization",
+ },
#endif
{
.name = "gtod_reduce",
.name = "disable_lat",
.lname = "Disable all latency stats",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(disable_lat),
+ .off1 = offsetof(struct thread_options, disable_lat),
.help = "Disable latency numbers",
.parent = "gtod_reduce",
.hide = 1,
.name = "disable_clat",
.lname = "Disable completion latency stats",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(disable_clat),
+ .off1 = offsetof(struct thread_options, disable_clat),
.help = "Disable completion latency numbers",
.parent = "gtod_reduce",
.hide = 1,
.name = "disable_slat",
.lname = "Disable submission latency stats",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(disable_slat),
+ .off1 = offsetof(struct thread_options, disable_slat),
.help = "Disable submission latency numbers",
.parent = "gtod_reduce",
.hide = 1,
.name = "disable_bw_measurement",
.lname = "Disable bandwidth stats",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(disable_bw),
+ .off1 = offsetof(struct thread_options, disable_bw),
.help = "Disable bandwidth logging",
.parent = "gtod_reduce",
.hide = 1,
.name = "gtod_cpu",
.lname = "Dedicated gettimeofday() CPU",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(gtod_cpu),
+ .off1 = offsetof(struct thread_options, gtod_cpu),
.help = "Set up dedicated gettimeofday() thread on this CPU",
.verify = gtod_cpu_verify,
.category = FIO_OPT_C_GENERAL,
},
{
.name = "unified_rw_reporting",
+ .lname = "Unified RW Reporting",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(unified_rw_rep),
+ .off1 = offsetof(struct thread_options, unified_rw_rep),
.help = "Unify reporting across data direction",
.def = "0",
.category = FIO_OPT_C_GENERAL,
.name = "continue_on_error",
.lname = "Continue on error",
.type = FIO_OPT_STR,
- .off1 = td_var_offset(continue_on_error),
+ .off1 = offsetof(struct thread_options, continue_on_error),
.help = "Continue on non-fatal errors during IO",
.def = "none",
.category = FIO_OPT_C_GENERAL,
},
{
.name = "ignore_error",
+ .lname = "Ignore Error",
.type = FIO_OPT_STR,
.cb = str_ignore_error_cb,
- .off1 = td_var_offset(ignore_error_nr),
+ .off1 = offsetof(struct thread_options, ignore_error_nr),
.help = "Set a specific list of errors to ignore",
.parent = "rw",
.category = FIO_OPT_C_GENERAL,
},
{
.name = "error_dump",
+ .lname = "Error Dump",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(error_dump),
+ .off1 = offsetof(struct thread_options, error_dump),
.def = "0",
.help = "Dump info on each error",
.category = FIO_OPT_C_GENERAL,
.name = "profile",
.lname = "Profile",
.type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(profile),
+ .off1 = offsetof(struct thread_options, profile),
.help = "Select a specific builtin performance test",
.category = FIO_OPT_C_PROFILE,
.group = FIO_OPT_G_INVALID,
.name = "cgroup",
.lname = "Cgroup",
.type = FIO_OPT_STR_STORE,
- .off1 = td_var_offset(cgroup),
+ .off1 = offsetof(struct thread_options, cgroup),
.help = "Add job to cgroup of this name",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_CGROUP,
.name = "cgroup_nodelete",
.lname = "Cgroup no-delete",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(cgroup_nodelete),
+ .off1 = offsetof(struct thread_options, cgroup_nodelete),
.help = "Do not delete cgroups after job completion",
.def = "0",
.parent = "cgroup",
.name = "cgroup_weight",
.lname = "Cgroup weight",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(cgroup_weight),
+ .off1 = offsetof(struct thread_options, cgroup_weight),
.help = "Use given weight for cgroup",
.minval = 100,
.maxval = 1000,
.name = "uid",
.lname = "User ID",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(uid),
+ .off1 = offsetof(struct thread_options, uid),
.help = "Run job with this user ID",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_CRED,
.name = "gid",
.lname = "Group ID",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(gid),
+ .off1 = offsetof(struct thread_options, gid),
.help = "Run job with this group ID",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_CRED,
.name = "kb_base",
.lname = "KB Base",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(kb_base),
+ .off1 = offsetof(struct thread_options, kb_base),
.prio = 1,
.def = "1024",
.posval = {
.name = "unit_base",
.lname = "Base unit for reporting (Bits or Bytes)",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(unit_base),
+ .off1 = offsetof(struct thread_options, unit_base),
.prio = 1,
.posval = {
{ .ival = "0",
.name = "hugepage-size",
.lname = "Hugepage size",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(hugepage_size),
+ .off1 = offsetof(struct thread_options, hugepage_size),
.help = "When using hugepages, specify size of each page",
.def = __fio_stringify(FIO_HUGE_PAGE),
.interval = 1024 * 1024,
.name = "flow_id",
.lname = "I/O flow ID",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(flow_id),
+ .off1 = offsetof(struct thread_options, flow_id),
.help = "The flow index ID to use",
.def = "0",
.category = FIO_OPT_C_IO,
.name = "flow",
.lname = "I/O flow weight",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(flow),
+ .off1 = offsetof(struct thread_options, flow),
.help = "Weight for flow control of this job",
.parent = "flow_id",
.hide = 1,
.name = "flow_watermark",
.lname = "I/O flow watermark",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(flow_watermark),
+ .off1 = offsetof(struct thread_options, flow_watermark),
.help = "High watermark for flow control. This option"
" should be set to the same value for all threads"
" with non-zero flow.",
.name = "flow_sleep",
.lname = "I/O flow sleep",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(flow_sleep),
+ .off1 = offsetof(struct thread_options, flow_sleep),
.help = "How many microseconds to sleep after being held"
" back by the flow control mechanism",
.parent = "flow_id",
.name = "skip_bad",
.lname = "Skip operations against bad blocks",
.type = FIO_OPT_BOOL,
- .off1 = td_var_offset(skip_bad),
+ .off1 = offsetof(struct thread_options, skip_bad),
.help = "Skip operations against known bad blocks.",
.hide = 1,
.def = "0",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_MTD,
},
+ {
+ .name = "steadystate",
+ .lname = "Steady state threshold",
+ .alias = "ss",
+ .type = FIO_OPT_STR,
+ .off1 = offsetof(struct thread_options, ss),
+ .cb = str_steadystate_cb,
+ .help = "Define the criterion and limit to judge when a job has reached steady state",
+ .def = "iops_slope:0.01%",
+ .posval = {
+ { .ival = "iops",
+ .oval = FIO_STEADYSTATE_IOPS,
+ .help = "maximum mean deviation of IOPS measurements",
+ },
+ { .ival = "iops_slope",
+ .oval = FIO_STEADYSTATE_IOPS_SLOPE,
+ .help = "slope calculated from IOPS measurements",
+ },
+ { .ival = "bw",
+ .oval = FIO_STEADYSTATE_BW,
+ .help = "maximum mean deviation of bandwidth measurements",
+ },
+ {
+ .ival = "bw_slope",
+ .oval = FIO_STEADYSTATE_BW_SLOPE,
+ .help = "slope calculated from bandwidth measurements",
+ },
+ },
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
+ },
+ {
+ .name = "steadystate_duration",
+ .lname = "Steady state duration",
+ .alias = "ss_dur",
+ .type = FIO_OPT_STR_VAL_TIME,
+ .off1 = offsetof(struct thread_options, ss_dur),
+ .help = "Stop workload upon attaining steady state for specified duration",
+ .def = "0",
+ .is_seconds = 1,
+ .is_time = 1,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
+ },
+ {
+ .name = "steadystate_ramp_time",
+ .lname = "Steady state ramp time",
+ .alias = "ss_ramp",
+ .type = FIO_OPT_STR_VAL_TIME,
+ .off1 = offsetof(struct thread_options, ss_ramp_time),
+ .help = "Delay before initiation of data collection for steady state job termination testing",
+ .def = "0",
+ .is_seconds = 1,
+ .is_time = 1,
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_RUNTIME,
+ },
{
.name = NULL,
},
i++;
}
- if (best_option != -1 && string_distance_ok(name, best_distance))
+ if (best_option != -1 && string_distance_ok(name, best_distance) &&
+ fio_options[best_option].type != FIO_OPT_UNSUPPORTED)
log_err("Did you mean %s?\n", fio_options[best_option].name);
free(name);
}
-int fio_options_parse(struct thread_data *td, char **opts, int num_opts,
- int dump_cmdline)
+int fio_options_parse(struct thread_data *td, char **opts, int num_opts)
{
int i, ret, unknown;
char **opts_copy;
for (ret = 0, i = 0, unknown = 0; i < num_opts; i++) {
struct fio_option *o;
int newret = parse_option(opts_copy[i], opts[i], fio_options,
- &o, td, dump_cmdline);
+ &o, &td->o, &td->opt_list);
if (!newret && o)
fio_option_mark_set(&td->o, o);
if (td->eo)
newret = parse_option(opts_copy[i], opts[i],
td->io_ops->options, &o,
- td->eo, dump_cmdline);
+ td->eo, &td->opt_list);
ret |= newret;
if (!o) {
{
int ret;
- ret = parse_cmd_option(opt, val, fio_options, td);
+ ret = parse_cmd_option(opt, val, fio_options, &td->o, &td->opt_list);
if (!ret) {
struct fio_option *o;
int fio_cmd_ioengine_option_parse(struct thread_data *td, const char *opt,
char *val)
{
- return parse_cmd_option(opt, val, td->io_ops->options, td->eo);
+ return parse_cmd_option(opt, val, td->io_ops->options, td->eo,
+ &td->opt_list);
}
void fio_fill_default_options(struct thread_data *td)
{
td->o.magic = OPT_MAGIC;
- fill_default_options(td, fio_options);
+ fill_default_options(&td->o, fio_options);
}
int fio_show_option_help(const char *opt)
unsigned int fio_get_kb_base(void *data)
{
- struct thread_options *o = data;
+ struct thread_data *td = cb_data_to_td(data);
+ struct thread_options *o = &td->o;
unsigned int kb_base = 0;
/*
return (o->set_options[index] & ((uint64_t)1 << offset)) != 0;
}
-int __fio_option_is_set(struct thread_options *o, unsigned int off1)
+bool __fio_option_is_set(struct thread_options *o, unsigned int off1)
{
struct fio_option *opt, *next;
next = NULL;
while ((opt = find_next_opt(o, next, off1)) != NULL) {
if (opt_is_set(o, opt))
- return 1;
+ return true;
next = opt;
}
- return 0;
+ return false;
}
void fio_option_mark_set(struct thread_options *o, struct fio_option *opt)