#include <ctype.h>
#include <string.h>
#include <assert.h>
-#include <libgen.h>
-#include <fcntl.h>
-#include <sys/types.h>
#include <sys/stat.h>
#include <netinet/in.h>
#include "fio.h"
#include "verify.h"
#include "parse.h"
-#include "lib/fls.h"
#include "lib/pattern.h"
#include "options.h"
#include "optgroup.h"
+#include "zbd.h"
char client_sockaddr_str[INET6_ADDRSTRLEN] = { 0 };
#define cb_data_to_td(data) container_of(data, struct thread_data, o)
-static struct pattern_fmt_desc fmt_desc[] = {
+static const struct pattern_fmt_desc fmt_desc[] = {
{
.fmt = "%o",
.len = FIELD_SIZE(struct io_u *, offset),
.paste = paste_blockoff
- }
+ },
+ { }
};
/*
struct split {
unsigned int nr;
- unsigned int val1[ZONESPLIT_MAX];
+ unsigned long long val1[ZONESPLIT_MAX];
unsigned long long val2[ZONESPLIT_MAX];
};
static int split_parse_ddir(struct thread_options *o, struct split *split,
- enum fio_ddir ddir, char *str, bool absolute,
- unsigned int max_splits)
+ char *str, bool absolute, unsigned int max_splits)
{
unsigned long long perc;
unsigned int i;
split->val1[i] = val;
split->val2[i] = perc;
i++;
- if (i == max_splits)
+ if (i == max_splits) {
+ log_err("fio: hit max of %d split entries\n", i);
break;
+ }
}
split->nr = i;
bool data)
{
unsigned int i, perc, perc_missing;
- unsigned int max_bs, min_bs;
+ unsigned long long max_bs, min_bs;
struct split split;
memset(&split, 0, sizeof(split));
- if (split_parse_ddir(o, &split, ddir, str, data, BSSPLIT_MAX))
+ if (split_parse_ddir(o, &split, str, data, BSSPLIT_MAX))
return 1;
if (!split.nr)
return 0;
}
+static int str_replay_skip_cb(void *data, const char *input)
+{
+ struct thread_data *td = cb_data_to_td(data);
+ char *str, *p, *n;
+ int ret = 0;
+
+ if (parse_dryrun())
+ return 0;
+
+ p = str = strdup(input);
+
+ strip_blank_front(&str);
+ strip_blank_end(str);
+
+ while (p) {
+ n = strchr(p, ',');
+ if (n)
+ *n++ = '\0';
+ if (!strcmp(p, "read"))
+ td->o.replay_skip |= 1u << DDIR_READ;
+ else if (!strcmp(p, "write"))
+ td->o.replay_skip |= 1u << DDIR_WRITE;
+ else if (!strcmp(p, "trim"))
+ td->o.replay_skip |= 1u << DDIR_TRIM;
+ else if (!strcmp(p, "sync"))
+ td->o.replay_skip |= 1u << DDIR_SYNC;
+ else {
+ log_err("Unknown skip type: %s\n", p);
+ ret = 1;
+ break;
+ }
+ p = n;
+ }
+ free(str);
+ return ret;
+}
+
static int str_ignore_error_cb(void *data, const char *input)
{
struct thread_data *td = cb_data_to_td(data);
static int str_exitall_cb(void)
{
- exitall_on_terminate = 1;
+ exitall_on_terminate = true;
return 0;
}
const long max_cpu = cpus_online();
cpus_in_mask = fio_cpu_count(mask);
+ if (!cpus_in_mask)
+ return 0;
+
cpu_index = cpu_index % cpus_in_mask;
index = 0;
memset(&split, 0, sizeof(split));
- if (split_parse_ddir(o, &split, ddir, str, absolute, ZONESPLIT_MAX))
+ if (split_parse_ddir(o, &split, str, absolute, ZONESPLIT_MAX))
return 1;
if (!split.nr)
return 0;
return 0;
}
-static void __td_zone_gen_index(struct thread_data *td, enum fio_ddir ddir)
-{
- unsigned int i, j, sprev, aprev;
- uint64_t sprev_sz;
-
- td->zone_state_index[ddir] = malloc(sizeof(struct zone_split_index) * 100);
-
- sprev_sz = sprev = aprev = 0;
- for (i = 0; i < td->o.zone_split_nr[ddir]; i++) {
- struct zone_split *zsp = &td->o.zone_split[ddir][i];
-
- for (j = aprev; j < aprev + zsp->access_perc; j++) {
- struct zone_split_index *zsi = &td->zone_state_index[ddir][j];
-
- zsi->size_perc = sprev + zsp->size_perc;
- zsi->size_perc_prev = sprev;
-
- zsi->size = sprev_sz + zsp->size;
- zsi->size_prev = sprev_sz;
- }
-
- aprev += zsp->access_perc;
- sprev += zsp->size_perc;
- sprev_sz += zsp->size;
- }
-}
-
-/*
- * Generate state table for indexes, so we don't have to do it inline from
- * the hot IO path
- */
-static void td_zone_gen_index(struct thread_data *td)
-{
- int i;
-
- td->zone_state_index = malloc(DDIR_RWDIR_CNT *
- sizeof(struct zone_split_index *));
-
- for (i = 0; i < DDIR_RWDIR_CNT; i++)
- __td_zone_gen_index(td, i);
-}
-
static int parse_zoned_distribution(struct thread_data *td, const char *input,
bool absolute)
{
}
if (parse_dryrun()) {
- int i;
-
for (i = 0; i < DDIR_RWDIR_CNT; i++) {
free(td->o.zone_split[i]);
td->o.zone_split[i] = NULL;
return ret;
}
- if (!ret)
- td_zone_gen_index(td);
- else {
+ if (ret) {
for (i = 0; i < DDIR_RWDIR_CNT; i++)
td->o.zone_split_nr[i] = 0;
}
* is escaped with a '\', then that ':' is part of the filename and does not
* indicate a new file.
*/
-static char *get_next_name(char **ptr)
+char *get_next_str(char **ptr)
{
char *str = *ptr;
char *p, *start;
}
-static int get_max_name_idx(char *input)
+int get_max_str_idx(char *input)
{
unsigned int cur_idx;
char *str, *p;
p = str = strdup(input);
for (cur_idx = 0; ; cur_idx++)
- if (get_next_name(&str) == NULL)
+ if (get_next_str(&str) == NULL)
break;
free(p);
p = str = strdup(input);
- index %= get_max_name_idx(input);
+ index %= get_max_str_idx(input);
for (cur_idx = 0; cur_idx <= index; cur_idx++)
- fname = get_next_name(&str);
+ fname = get_next_str(&str);
if (client_sockaddr_str[0] && unique_filename) {
len = snprintf(target, tlen, "%s/%s.", fname,
client_sockaddr_str);
} else
- len = snprintf(target, tlen, "%s/", fname);
+ len = snprintf(target, tlen, "%s%c", fname,
+ FIO_OS_PATH_SEPARATOR);
target[tlen - 1] = '\0';
free(p);
return len;
}
+char* get_name_by_idx(char *input, int index)
+{
+ unsigned int cur_idx;
+ char *fname, *str, *p;
+
+ p = str = strdup(input);
+
+ index %= get_max_str_idx(input);
+ for (cur_idx = 0; cur_idx <= index; cur_idx++)
+ fname = get_next_str(&str);
+
+ fname = strdup(fname);
+ free(p);
+
+ return fname;
+}
+
static int str_filename_cb(void *data, const char *input)
{
struct thread_data *td = cb_data_to_td(data);
if (!td->files_index)
td->o.nr_files = 0;
- while ((fname = get_next_name(&str)) != NULL) {
+ while ((fname = get_next_str(&str)) != NULL) {
if (!strlen(fname))
break;
add_file(td, fname, 0, 1);
return 0;
p = str = strdup(td->o.directory);
- while ((dirname = get_next_name(&str)) != NULL) {
+ while ((dirname = get_next_str(&str)) != NULL) {
if (lstat(dirname, &sb) < 0) {
ret = errno;
/* FIXME: for now buffer pattern does not support formats */
ret = parse_and_fill_pattern(input, strlen(input), td->o.buffer_pattern,
- MAX_PATTERN_SIZE, NULL, 0, NULL, NULL);
+ MAX_PATTERN_SIZE, NULL, NULL, NULL);
if (ret < 0)
return 1;
td->o.verify_fmt_sz = ARRAY_SIZE(td->o.verify_fmt);
ret = parse_and_fill_pattern(input, strlen(input), td->o.verify_pattern,
- MAX_PATTERN_SIZE, fmt_desc, sizeof(fmt_desc),
+ MAX_PATTERN_SIZE, fmt_desc,
td->o.verify_fmt, &td->o.verify_fmt_sz);
if (ret < 0)
return 1;
struct thread_data *td = cb_data_to_td(data);
int val = *il;
- td->o.disable_lat = !!val;
- td->o.disable_clat = !!val;
- td->o.disable_slat = !!val;
- td->o.disable_bw = !!val;
- td->o.clat_percentiles = !val;
- if (val)
+ /*
+ * Only modfiy options if gtod_reduce==1
+ * Otherwise leave settings alone.
+ */
+ if (val) {
+ td->o.disable_lat = 1;
+ td->o.disable_clat = 1;
+ td->o.disable_slat = 1;
+ td->o.disable_bw = 1;
+ td->o.clat_percentiles = 0;
+ td->o.lat_percentiles = 0;
+ td->o.slat_percentiles = 0;
td->ts_cache_mask = 63;
+ }
return 0;
}
return 0;
}
+static int str_offset_increment_cb(void *data, unsigned long long *__val)
+{
+ struct thread_data *td = cb_data_to_td(data);
+ unsigned long long v = *__val;
+
+ if (parse_is_percent(v)) {
+ td->o.offset_increment = 0;
+ td->o.offset_increment_percent = -1ULL - v;
+ dprint(FD_PARSE, "SET offset_increment_percent %d\n",
+ td->o.offset_increment_percent);
+ } else
+ td->o.offset_increment = v;
+
+ return 0;
+}
+
static int str_size_cb(void *data, unsigned long long *__val)
{
struct thread_data *td = cb_data_to_td(data);
return 0;
}
-static int rw_verify(struct fio_option *o, void *data)
+static int rw_verify(const struct fio_option *o, void *data)
{
struct thread_data *td = cb_data_to_td(data);
- if (read_only && td_write(td)) {
- log_err("fio: job <%s> has write bit set, but fio is in"
- " read-only mode\n", td->o.name);
+ if (read_only && (td_write(td) || td_trim(td))) {
+ log_err("fio: job <%s> has write or trim bit set, but"
+ " fio is in read-only mode\n", td->o.name);
return 1;
}
return 0;
}
-static int gtod_cpu_verify(struct fio_option *o, void *data)
+static int gtod_cpu_verify(const struct fio_option *o, void *data)
{
#ifndef FIO_HAVE_CPU_AFFINITY
struct thread_data *td = cb_data_to_td(data);
.help = "Linux native asynchronous IO",
},
#endif
+#ifdef ARCH_HAVE_IOURING
+ { .ival = "io_uring",
+ .help = "Fast Linux native aio",
+ },
+#endif
#ifdef CONFIG_POSIXAIO
{ .ival = "posixaio",
.help = "POSIX asynchronous IO",
.help = "GUASI IO engine",
},
#endif
-#ifdef FIO_HAVE_BINJECT
- { .ival = "binject",
- .help = "binject direct inject block engine",
- },
-#endif
#ifdef CONFIG_RDMA
{ .ival = "rdma",
.help = "RDMA IO engine",
},
#endif
-#ifdef CONFIG_FUSION_AW
- { .ival = "fusion-aw-sync",
- .help = "Fusion-io atomic write engine",
- },
-#endif
#ifdef CONFIG_LINUX_EXT4_MOVE_EXTENT
{ .ival = "e4defrag",
.help = "ext4 defrag engine",
#endif
#ifdef CONFIG_PMEMBLK
{ .ival = "pmemblk",
- .help = "NVML libpmemblk based IO engine",
+ .help = "PMDK libpmemblk based IO engine",
},
#endif
+#ifdef CONFIG_IME
+ { .ival = "ime_psync",
+ .help = "DDN's IME synchronous IO engine",
+ },
+ { .ival = "ime_psyncv",
+ .help = "DDN's IME synchronous IO engine using iovecs",
+ },
+ { .ival = "ime_aio",
+ .help = "DDN's IME asynchronous IO engine",
+ },
+#endif
#ifdef CONFIG_LINUX_DEVDAX
{ .ival = "dev-dax",
.help = "DAX Device based IO engine",
},
#ifdef CONFIG_LIBPMEM
{ .ival = "libpmem",
- .help = "NVML libpmem based IO engine",
+ .help = "PMDK libpmem based IO engine",
},
#endif
+#ifdef CONFIG_HTTP
+ { .ival = "http",
+ .help = "HTTP (WebDAV/S3) IO engine",
+ },
+#endif
+ { .ival = "nbd",
+ .help = "Network Block Device (NBD) IO engine"
+ },
},
},
{
.name = "offset_increment",
.lname = "IO offset increment",
.type = FIO_OPT_STR_VAL,
+ .cb = str_offset_increment_cb,
.off1 = offsetof(struct thread_options, offset_increment),
.help = "What is the increment from one offset to the next",
.parent = "offset",
.name = "bs",
.lname = "Block size",
.alias = "blocksize",
- .type = FIO_OPT_INT,
+ .type = FIO_OPT_ULL,
.off1 = offsetof(struct thread_options, bs[DDIR_READ]),
.off2 = offsetof(struct thread_options, bs[DDIR_WRITE]),
.off3 = offsetof(struct thread_options, bs[DDIR_TRIM]),
.name = "ba",
.lname = "Block size align",
.alias = "blockalign",
- .type = FIO_OPT_INT,
+ .type = FIO_OPT_ULL,
.off1 = offsetof(struct thread_options, ba[DDIR_READ]),
.off2 = offsetof(struct thread_options, ba[DDIR_WRITE]),
.off3 = offsetof(struct thread_options, ba[DDIR_TRIM]),
{
.name = "bssplit",
.lname = "Block size split",
- .type = FIO_OPT_STR,
+ .type = FIO_OPT_STR_ULL,
.cb = str_bssplit_cb,
.off1 = offsetof(struct thread_options, bssplit),
.help = "Set a specific mix of block sizes",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RANDOM,
},
- {
- .name = "use_os_rand",
- .lname = "Use OS random",
- .type = FIO_OPT_DEPRECATED,
- .off1 = offsetof(struct thread_options, dep_use_os_rand),
- .category = FIO_OPT_C_IO,
- .group = FIO_OPT_G_RANDOM,
- },
{
.name = "norandommap",
.lname = "No randommap",
.parent = "nrfiles",
.hide = 1,
},
-#ifdef FIO_HAVE_ANY_FALLOCATE
{
.name = "fallocate",
.lname = "Fallocate",
.type = FIO_OPT_STR,
.off1 = offsetof(struct thread_options, fallocate_mode),
.help = "Whether pre-allocation is performed when laying out files",
+#ifdef FIO_HAVE_DEFAULT_FALLOCATE
.def = "native",
+#else
+ .def = "none",
+#endif
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
.posval = {
.help = "Use fallocate(..., FALLOC_FL_KEEP_SIZE, ...)",
},
#endif
+ { .ival = "truncate",
+ .oval = FIO_FALLOCATE_TRUNCATE,
+ .help = "Truncate file to final size instead of allocating"
+ },
/* Compatibility with former boolean values */
{ .ival = "0",
.oval = FIO_FALLOCATE_NONE,
#endif
},
},
-#else /* FIO_HAVE_ANY_FALLOCATE */
- {
- .name = "fallocate",
- .lname = "Fallocate",
- .type = FIO_OPT_UNSUPPORTED,
- .help = "Your platform does not support fallocate",
- },
-#endif /* FIO_HAVE_ANY_FALLOCATE */
{
.name = "fadvise_hint",
.lname = "Fadvise hint",
.posval = {
{ .ival = "0",
.oval = F_ADV_NONE,
- .help = "Don't issue fadvise",
+ .help = "Don't issue fadvise/madvise",
},
{ .ival = "1",
.oval = F_ADV_TYPE,
{
.name = "verifysort",
.lname = "Verify sort",
- .type = FIO_OPT_BOOL,
- .off1 = offsetof(struct thread_options, verifysort),
- .help = "Sort written verify blocks for read back",
- .def = "1",
- .parent = "verify",
- .hide = 1,
+ .type = FIO_OPT_SOFT_DEPRECATED,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_VERIFY,
},
{
.name = "verifysort_nr",
.lname = "Verify Sort Nr",
- .type = FIO_OPT_INT,
- .off1 = offsetof(struct thread_options, verifysort_nr),
- .help = "Pre-load and sort verify blocks for a read workload",
- .minval = 0,
- .maxval = 131072,
- .def = "1024",
- .parent = "verify",
+ .type = FIO_OPT_SOFT_DEPRECATED,
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_VERIFY,
},
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IOLOG,
},
+ {
+ .name = "read_iolog_chunked",
+ .lname = "Read I/O log in parts",
+ .type = FIO_OPT_BOOL,
+ .off1 = offsetof(struct thread_options, read_iolog_chunked),
+ .def = "0",
+ .parent = "read_iolog",
+ .help = "Parse IO pattern in chunks",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
+ },
{
.name = "replay_no_stall",
.lname = "Don't stall on replay",
.group = FIO_OPT_G_IOLOG,
.pow2 = 1,
},
+ {
+ .name = "replay_time_scale",
+ .lname = "Replay Time Scale",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct thread_options, replay_time_scale),
+ .def = "100",
+ .minval = 1,
+ .parent = "read_iolog",
+ .hide = 1,
+ .help = "Scale time for replay events",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
+ },
+ {
+ .name = "replay_skip",
+ .lname = "Replay Skip",
+ .type = FIO_OPT_STR,
+ .cb = str_replay_skip_cb,
+ .off1 = offsetof(struct thread_options, replay_skip),
+ .parent = "read_iolog",
+ .help = "Skip certain IO types (read,write,trim,flush)",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
+ },
+ {
+ .name = "merge_blktrace_file",
+ .lname = "Merged blktrace output filename",
+ .type = FIO_OPT_STR_STORE,
+ .off1 = offsetof(struct thread_options, merge_blktrace_file),
+ .help = "Merged blktrace output filename",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
+ },
+ {
+ .name = "merge_blktrace_scalars",
+ .lname = "Percentage to scale each trace",
+ .type = FIO_OPT_FLOAT_LIST,
+ .off1 = offsetof(struct thread_options, merge_blktrace_scalars),
+ .maxlen = FIO_IO_U_LIST_MAX_LEN,
+ .help = "Percentage to scale each trace",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
+ },
+ {
+ .name = "merge_blktrace_iters",
+ .lname = "Number of iterations to run per trace",
+ .type = FIO_OPT_FLOAT_LIST,
+ .off1 = offsetof(struct thread_options, merge_blktrace_iters),
+ .maxlen = FIO_IO_U_LIST_MAX_LEN,
+ .help = "Number of iterations to run per trace",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IOLOG,
+ },
{
.name = "exec_prerun",
.lname = "Pre-execute runnable",
.help = "Your platform does not support IO scheduler switching",
},
#endif
+ {
+ .name = "zonemode",
+ .lname = "Zone mode",
+ .help = "Mode for the zonesize, zonerange and zoneskip parameters",
+ .type = FIO_OPT_STR,
+ .off1 = offsetof(struct thread_options, zone_mode),
+ .def = "none",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_ZONE,
+ .posval = {
+ { .ival = "none",
+ .oval = ZONE_MODE_NONE,
+ .help = "no zoning",
+ },
+ { .ival = "strided",
+ .oval = ZONE_MODE_STRIDED,
+ .help = "strided mode - random I/O is restricted to a single zone",
+ },
+ { .ival = "zbd",
+ .oval = ZONE_MODE_ZBD,
+ .help = "zoned block device mode - random I/O selects one of multiple zones randomly",
+ },
+ },
+ },
{
.name = "zonesize",
.lname = "Zone size",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_ZONE,
},
+ {
+ .name = "zonecapacity",
+ .lname = "Zone capacity",
+ .type = FIO_OPT_STR_VAL,
+ .off1 = offsetof(struct thread_options, zone_capacity),
+ .help = "Capacity per zone",
+ .def = "0",
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_ZONE,
+ },
{
.name = "zonerange",
.lname = "Zone range",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_ZONE,
},
+ {
+ .name = "read_beyond_wp",
+ .lname = "Allow reads beyond the zone write pointer",
+ .type = FIO_OPT_BOOL,
+ .off1 = offsetof(struct thread_options, read_beyond_wp),
+ .help = "Allow reads beyond the zone write pointer",
+ .def = "0",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
+ },
+ {
+ .name = "max_open_zones",
+ .lname = "Per device/file maximum number of open zones",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct thread_options, max_open_zones),
+ .maxval = ZBD_MAX_OPEN_ZONES,
+ .help = "Limit on the number of simultaneously opened sequential write zones with zonemode=zbd",
+ .def = "0",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
+ },
+ {
+ .name = "job_max_open_zones",
+ .lname = "Job maximum number of open zones",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct thread_options, job_max_open_zones),
+ .maxval = ZBD_MAX_OPEN_ZONES,
+ .help = "Limit on the number of simultaneously opened sequential write zones with zonemode=zbd by one thread/process",
+ .def = "0",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
+ },
+ {
+ .name = "zone_reset_threshold",
+ .lname = "Zone reset threshold",
+ .help = "Zoned block device reset threshold",
+ .type = FIO_OPT_FLOAT_LIST,
+ .maxlen = 1,
+ .off1 = offsetof(struct thread_options, zrt),
+ .minfp = 0,
+ .maxfp = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_ZONE,
+ },
+ {
+ .name = "zone_reset_frequency",
+ .lname = "Zone reset frequency",
+ .help = "Zoned block device zone reset frequency in HZ",
+ .type = FIO_OPT_FLOAT_LIST,
+ .maxlen = 1,
+ .off1 = offsetof(struct thread_options, zrf),
+ .minfp = 0,
+ .maxfp = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_ZONE,
+ },
{
.name = "lockmem",
.lname = "Lock memory",
{
.name = "rate",
.lname = "I/O rate",
- .type = FIO_OPT_INT,
+ .type = FIO_OPT_ULL,
.off1 = offsetof(struct thread_options, rate[DDIR_READ]),
.off2 = offsetof(struct thread_options, rate[DDIR_WRITE]),
.off3 = offsetof(struct thread_options, rate[DDIR_TRIM]),
.name = "rate_min",
.alias = "ratemin",
.lname = "I/O min rate",
- .type = FIO_OPT_INT,
+ .type = FIO_OPT_ULL,
.off1 = offsetof(struct thread_options, ratemin[DDIR_READ]),
.off2 = offsetof(struct thread_options, ratemin[DDIR_WRITE]),
.off3 = offsetof(struct thread_options, ratemin[DDIR_TRIM]),
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_RATE,
},
+ {
+ .name = "rate_ignore_thinktime",
+ .lname = "Rate ignore thinktime",
+ .type = FIO_OPT_BOOL,
+ .off1 = offsetof(struct thread_options, rate_ign_think),
+ .help = "Rated IO ignores thinktime settings",
+ .parent = "rate",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_RATE,
+ },
{
.name = "max_latency",
.lname = "Max Latency (usec)",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_LATPROF,
},
+ {
+ .name = "latency_run",
+ .lname = "Latency Run",
+ .type = FIO_OPT_BOOL,
+ .off1 = offsetof(struct thread_options, latency_run),
+ .help = "Keep adjusting queue depth to match latency_target",
+ .def = "0",
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_LATPROF,
+ },
{
.name = "invalidate",
.lname = "Cache invalidate",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_PROCESS,
},
+ {
+ .name = "exit_what",
+ .lname = "What jobs to quit on terminate",
+ .type = FIO_OPT_STR,
+ .off1 = offsetof(struct thread_options, exit_what),
+ .help = "Fine-grained control for exitall",
+ .def = "group",
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_PROCESS,
+ .posval = {
+ { .ival = "group",
+ .oval = TERMINATE_GROUP,
+ .help = "exit_all=1 default behaviour",
+ },
+ { .ival = "stonewall",
+ .oval = TERMINATE_STONEWALL,
+ .help = "quit all currently running jobs; continue with next stonewall",
+ },
+ { .ival = "all",
+ .oval = TERMINATE_ALL,
+ .help = "Quit everything",
+ },
+ },
+ },
{
.name = "exitall_on_error",
.lname = "Exit-all on terminate in error",
.off1 = offsetof(struct thread_options, clat_percentiles),
.help = "Enable the reporting of completion latency percentiles",
.def = "1",
- .inverse = "lat_percentiles",
.category = FIO_OPT_C_STAT,
.group = FIO_OPT_G_INVALID,
},
.off1 = offsetof(struct thread_options, lat_percentiles),
.help = "Enable the reporting of IO latency percentiles",
.def = "0",
- .inverse = "clat_percentiles",
+ .category = FIO_OPT_C_STAT,
+ .group = FIO_OPT_G_INVALID,
+ },
+ {
+ .name = "slat_percentiles",
+ .lname = "Submission latency percentiles",
+ .type = FIO_OPT_BOOL,
+ .off1 = offsetof(struct thread_options, slat_percentiles),
+ .help = "Enable the reporting of submission latency percentiles",
+ .def = "0",
.category = FIO_OPT_C_STAT,
.group = FIO_OPT_G_INVALID,
},
{
.name = "kb_base",
.lname = "KB Base",
- .type = FIO_OPT_INT,
+ .type = FIO_OPT_STR,
.off1 = offsetof(struct thread_options, kb_base),
.prio = 1,
.def = "1024",
{
.name = "unit_base",
.lname = "Unit for quantities of data (Bits or Bytes)",
- .type = FIO_OPT_INT,
+ .type = FIO_OPT_STR,
.off1 = offsetof(struct thread_options, unit_base),
.prio = 1,
.posval = {
{ .ival = "0",
- .oval = 0,
+ .oval = N2S_NONE,
.help = "Auto-detect",
},
{ .ival = "8",
- .oval = 8,
+ .oval = N2S_BYTEPERSEC,
.help = "Normal (byte based)",
},
{ .ival = "1",
- .oval = 1,
+ .oval = N2S_BITPERSEC,
.help = "Bit based",
},
},
* substitution always occurs, even if VARNAME is empty or the corresponding
* environment variable undefined.
*/
-static char *option_dup_subs(const char *opt)
+char *fio_option_dup_subs(const char *opt)
{
char out[OPT_LEN_MAX+1];
char in[OPT_LEN_MAX+1];
return NULL;
}
- in[OPT_LEN_MAX] = '\0';
- strncpy(in, opt, OPT_LEN_MAX);
+ snprintf(in, sizeof(in), "%s", opt);
while (*inptr && nchr > 0) {
if (inptr[0] == '$' && inptr[1] == '{') {
int i;
char **opts_copy = malloc(num_opts * sizeof(*opts));
for (i = 0; i < num_opts; i++) {
- opts_copy[i] = option_dup_subs(opts[i]);
+ opts_copy[i] = fio_option_dup_subs(opts[i]);
if (!opts_copy[i])
continue;
opts_copy[i] = fio_keyword_replace(opts_copy[i]);
opts_copy = dup_and_sub_options(opts, num_opts);
for (ret = 0, i = 0, unknown = 0; i < num_opts; i++) {
- struct fio_option *o;
+ const struct fio_option *o;
int newret = parse_option(opts_copy[i], opts[i], fio_options,
&o, &td->o, &td->opt_list);
opts = opts_copy;
}
for (i = 0; i < num_opts; i++) {
- struct fio_option *o = NULL;
+ const struct fio_option *o = NULL;
int newret = 1;
if (!opts_copy[i])
ret = parse_cmd_option(opt, val, fio_options, &td->o, &td->opt_list);
if (!ret) {
- struct fio_option *o;
+ const struct fio_option *o;
- o = find_option(fio_options, opt);
+ o = find_option_c(fio_options, opt);
if (o)
fio_option_mark_set(&td->o, o);
}
return kb_base;
}
-int add_option(struct fio_option *o)
+int add_option(const struct fio_option *o)
{
struct fio_option *__o;
int opt_index = 0;
return find_option(fio_options, name);
}
-static struct fio_option *find_next_opt(struct thread_options *o,
- struct fio_option *from,
+static struct fio_option *find_next_opt(struct fio_option *from,
unsigned int off1)
{
struct fio_option *opt;
struct fio_option *opt, *next;
next = NULL;
- while ((opt = find_next_opt(o, next, off1)) != NULL) {
+ while ((opt = find_next_opt(next, off1)) != NULL) {
if (opt_is_set(o, opt))
return true;
return false;
}
-void fio_option_mark_set(struct thread_options *o, struct fio_option *opt)
+void fio_option_mark_set(struct thread_options *o, const struct fio_option *opt)
{
unsigned int opt_off, index, offset;