#include <string.h>
#include <getopt.h>
#include <assert.h>
+#include <libgen.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
#include "fio.h"
+#include "verify.h"
#include "parse.h"
+#include "lib/fls.h"
#define td_var_offset(var) ((size_t) &((struct thread_options *)0)->var)
return strdup(p);
}
+static int bs_cmp(const void *p1, const void *p2)
+{
+ const struct bssplit *bsp1 = p1;
+ const struct bssplit *bsp2 = p2;
+
+ return bsp1->perc < bsp2->perc;
+}
+
+static int bssplit_ddir(struct thread_data *td, int ddir, char *str)
+{
+ struct bssplit *bssplit;
+ unsigned int i, perc, perc_missing;
+ unsigned int max_bs, min_bs;
+ long long val;
+ char *fname;
+
+ td->o.bssplit_nr[ddir] = 4;
+ bssplit = malloc(4 * sizeof(struct bssplit));
+
+ i = 0;
+ max_bs = 0;
+ min_bs = -1;
+ while ((fname = strsep(&str, ":")) != NULL) {
+ char *perc_str;
+
+ if (!strlen(fname))
+ break;
+
+ /*
+ * grow struct buffer, if needed
+ */
+ if (i == td->o.bssplit_nr[ddir]) {
+ td->o.bssplit_nr[ddir] <<= 1;
+ bssplit = realloc(bssplit, td->o.bssplit_nr[ddir]
+ * sizeof(struct bssplit));
+ }
+
+ perc_str = strstr(fname, "/");
+ if (perc_str) {
+ *perc_str = '\0';
+ perc_str++;
+ perc = atoi(perc_str);
+ if (perc > 100)
+ perc = 100;
+ else if (!perc)
+ perc = -1;
+ } else
+ perc = -1;
+
+ if (str_to_decimal(fname, &val, 1)) {
+ log_err("fio: bssplit conversion failed\n");
+ free(td->o.bssplit);
+ return 1;
+ }
+
+ if (val > max_bs)
+ max_bs = val;
+ if (val < min_bs)
+ min_bs = val;
+
+ bssplit[i].bs = val;
+ bssplit[i].perc = perc;
+ i++;
+ }
+
+ td->o.bssplit_nr[ddir] = i;
+
+ /*
+ * Now check if the percentages add up, and how much is missing
+ */
+ perc = perc_missing = 0;
+ for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
+ struct bssplit *bsp = &bssplit[i];
+
+ if (bsp->perc == (unsigned char) -1)
+ perc_missing++;
+ else
+ perc += bsp->perc;
+ }
+
+ if (perc > 100) {
+ log_err("fio: bssplit percentages add to more than 100%%\n");
+ free(bssplit);
+ return 1;
+ }
+ /*
+ * If values didn't have a percentage set, divide the remains between
+ * them.
+ */
+ if (perc_missing) {
+ for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
+ struct bssplit *bsp = &bssplit[i];
+
+ if (bsp->perc == (unsigned char) -1)
+ bsp->perc = (100 - perc) / perc_missing;
+ }
+ }
+
+ td->o.min_bs[ddir] = min_bs;
+ td->o.max_bs[ddir] = max_bs;
+
+ /*
+ * now sort based on percentages, for ease of lookup
+ */
+ qsort(bssplit, td->o.bssplit_nr[ddir], sizeof(struct bssplit), bs_cmp);
+ td->o.bssplit[ddir] = bssplit;
+ return 0;
+
+}
+
+static int str_bssplit_cb(void *data, const char *input)
+{
+ struct thread_data *td = data;
+ char *str, *p, *odir;
+ int ret = 0;
+
+ p = str = strdup(input);
+
+ strip_blank_front(&str);
+ strip_blank_end(str);
+
+ odir = strchr(str, ',');
+ if (odir) {
+ ret = bssplit_ddir(td, DDIR_WRITE, odir + 1);
+ if (!ret) {
+ *odir = '\0';
+ ret = bssplit_ddir(td, DDIR_READ, str);
+ }
+ } else {
+ char *op;
+
+ op = strdup(str);
+
+ ret = bssplit_ddir(td, DDIR_READ, str);
+ if (!ret)
+ ret = bssplit_ddir(td, DDIR_WRITE, op);
+
+ free(op);
+ }
+
+ free(p);
+ return ret;
+}
+
static int str_rw_cb(void *data, const char *str)
{
struct thread_data *td = data;
char *nr = get_opt_postfix(str);
td->o.ddir_nr = 1;
- if (nr)
+ if (nr) {
td->o.ddir_nr = atoi(nr);
+ free(nr);
+ }
return 0;
}
return 0;
}
+static int str_rwmix_read_cb(void *data, unsigned int *val)
+{
+ struct thread_data *td = data;
+
+ td->o.rwmix[DDIR_READ] = *val;
+ td->o.rwmix[DDIR_WRITE] = 100 - *val;
+ return 0;
+}
+
+static int str_rwmix_write_cb(void *data, unsigned int *val)
+{
+ struct thread_data *td = data;
+
+ td->o.rwmix[DDIR_WRITE] = *val;
+ td->o.rwmix[DDIR_READ] = 100 - *val;
+ return 0;
+}
+
#ifdef FIO_HAVE_IOPRIO
static int str_prioclass_cb(void *data, unsigned int *val)
{
td->ioprio &= mask;
td->ioprio |= *val << IOPRIO_CLASS_SHIFT;
+ td->ioprio_set = 1;
return 0;
}
if ((td->ioprio >> IOPRIO_CLASS_SHIFT) == 0)
td->ioprio |= IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT;
+ td->ioprio_set = 1;
return 0;
}
#endif
{
struct thread_data *td = data;
unsigned int i;
+ long max_cpu;
+ int ret;
- CPU_ZERO(&td->o.cpumask);
+ ret = fio_cpuset_init(&td->o.cpumask);
+ if (ret < 0) {
+ log_err("fio: cpuset_init failed\n");
+ td_verror(td, ret, "fio_cpuset_init");
+ return 1;
+ }
- for (i = 0; i < sizeof(int) * 8; i++)
- if ((1 << i) & *val)
- CPU_SET(*val, &td->o.cpumask);
+ max_cpu = sysconf(_SC_NPROCESSORS_ONLN);
+
+ for (i = 0; i < sizeof(int) * 8; i++) {
+ if ((1 << i) & *val) {
+ if (i > max_cpu) {
+ log_err("fio: CPU %d too large (max=%ld)\n", i,
+ max_cpu);
+ return 1;
+ }
+ dprint(FD_PARSE, "set cpu allowed %d\n", i);
+ fio_cpu_set(&td->o.cpumask, i);
+ }
+ }
td->o.cpumask_set = 1;
return 0;
{
struct thread_data *td = data;
char *cpu, *str, *p;
+ long max_cpu;
+ int ret = 0;
- CPU_ZERO(&td->o.cpumask);
+ ret = fio_cpuset_init(&td->o.cpumask);
+ if (ret < 0) {
+ log_err("fio: cpuset_init failed\n");
+ td_verror(td, ret, "fio_cpuset_init");
+ return 1;
+ }
p = str = strdup(input);
strip_blank_front(&str);
strip_blank_end(str);
+ max_cpu = sysconf(_SC_NPROCESSORS_ONLN);
+
while ((cpu = strsep(&str, ",")) != NULL) {
+ char *str2, *cpu2;
+ int icpu, icpu2;
+
if (!strlen(cpu))
break;
- CPU_SET(atoi(cpu), &td->o.cpumask);
+
+ str2 = cpu;
+ icpu2 = -1;
+ while ((cpu2 = strsep(&str2, "-")) != NULL) {
+ if (!strlen(cpu2))
+ break;
+
+ icpu2 = atoi(cpu2);
+ }
+
+ icpu = atoi(cpu);
+ if (icpu2 == -1)
+ icpu2 = icpu;
+ while (icpu <= icpu2) {
+ if (icpu >= FIO_MAX_CPUS) {
+ log_err("fio: your OS only supports up to"
+ " %d CPUs\n", (int) FIO_MAX_CPUS);
+ ret = 1;
+ break;
+ }
+ if (icpu > max_cpu) {
+ log_err("fio: CPU %d too large (max=%ld)\n",
+ icpu, max_cpu);
+ ret = 1;
+ break;
+ }
+
+ dprint(FD_PARSE, "set cpu allowed %d\n", icpu);
+ fio_cpu_set(&td->o.cpumask, icpu);
+ icpu++;
+ }
+ if (ret)
+ break;
}
free(p);
- td->o.cpumask_set = 1;
- exit(0);
- return 0;
+ if (!ret)
+ td->o.cpumask_set = 1;
+ return ret;
}
#endif
char *nr = get_opt_postfix(str);
td->file_service_nr = 1;
- if (nr)
+ if (nr) {
td->file_service_nr = atoi(nr);
+ free(nr);
+ }
+
+ return 0;
+}
+
+static int check_dir(struct thread_data *td, char *fname)
+{
+ char file[PATH_MAX], *dir;
+ int elen = 0;
+
+ if (td->o.directory) {
+ strcpy(file, td->o.directory);
+ strcat(file, "/");
+ elen = strlen(file);
+ }
+
+ sprintf(file + elen, "%s", fname);
+ dir = dirname(file);
+
+#if 0
+ {
+ struct stat sb;
+ /*
+ * We can't do this on FIO_DISKLESSIO engines. The engine isn't loaded
+ * yet, so we can't do this check right here...
+ */
+ if (lstat(dir, &sb) < 0) {
+ int ret = errno;
+
+ log_err("fio: %s is not a directory\n", dir);
+ td_verror(td, ret, "lstat");
+ return 1;
+ }
+
+ if (!S_ISDIR(sb.st_mode)) {
+ log_err("fio: %s is not a directory\n", dir);
+ return 1;
+ }
+ }
+#endif
return 0;
}
while ((fname = strsep(&str, ":")) != NULL) {
if (!strlen(fname))
break;
+ if (check_dir(td, fname)) {
+ free(p);
+ return 1;
+ }
add_file(td, fname);
td->o.nr_files++;
}
struct stat sb;
if (lstat(td->o.directory, &sb) < 0) {
+ int ret = errno;
+
log_err("fio: %s is not a directory\n", td->o.directory);
- td_verror(td, errno, "lstat");
+ td_verror(td, ret, "lstat");
return 1;
}
if (!S_ISDIR(sb.st_mode)) {
return add_dir_files(td, td->o.opendir);
}
+static int str_verify_offset_cb(void *data, unsigned int *off)
+{
+ struct thread_data *td = data;
+
+ if (*off && *off < sizeof(struct verify_header)) {
+ log_err("fio: verify_offset too small\n");
+ return 1;
+ }
+
+ td->o.verify_offset = *off;
+ return 0;
+}
+
+static int str_verify_pattern_cb(void *data, unsigned int *off)
+{
+ struct thread_data *td = data;
+ unsigned int msb;
+
+ msb = __fls(*off);
+ if (msb <= 8)
+ td->o.verify_pattern_bytes = 1;
+ else if (msb <= 16)
+ td->o.verify_pattern_bytes = 2;
+ else if (msb <= 24)
+ td->o.verify_pattern_bytes = 3;
+ else
+ td->o.verify_pattern_bytes = 4;
+
+ td->o.verify_pattern = *off;
+ return 0;
+}
+
+static int str_lockfile_cb(void *data, const char *str)
+{
+ struct thread_data *td = data;
+ char *nr = get_opt_postfix(str);
+
+ td->o.lockfile_batch = 1;
+ if (nr) {
+ td->o.lockfile_batch = atoi(nr);
+ free(nr);
+ }
+
+ return 0;
+}
+
+static int str_write_bw_log_cb(void *data, const char *str)
+{
+ struct thread_data *td = data;
+
+ if (str)
+ td->o.bw_log_file = strdup(str);
+
+ td->o.write_bw_log = 1;
+ return 0;
+}
+
+static int str_write_lat_log_cb(void *data, const char *str)
+{
+ struct thread_data *td = data;
+
+ if (str)
+ td->o.lat_log_file = strdup(str);
+
+ td->o.write_lat_log = 1;
+ return 0;
+}
+
+static int str_gtod_reduce_cb(void *data, int *il)
+{
+ struct thread_data *td = data;
+ int val = *il;
+
+ td->o.disable_clat = !!val;
+ td->o.disable_slat = !!val;
+ td->o.disable_bw = !!val;
+ if (val)
+ td->tv_cache_mask = 63;
+
+ return 0;
+}
+
+static int str_gtod_cpu_cb(void *data, int *il)
+{
+ struct thread_data *td = data;
+ int val = *il;
+
+ td->o.gtod_cpu = val;
+ td->o.gtod_offload = 1;
+ return 0;
+}
+
+static int rw_verify(struct fio_option *o, void *data)
+{
+ struct thread_data *td = data;
+
+ if (read_only && td_write(td)) {
+ log_err("fio: job <%s> has write bit set, but fio is in"
+ " read-only mode\n", td->o.name);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int gtod_cpu_verify(struct fio_option *o, void *data)
+{
+#ifndef FIO_HAVE_CPU_AFFINITY
+ struct thread_data *td = data;
+
+ if (td->o.gtod_cpu) {
+ log_err("fio: platform must support CPU affinity for"
+ "gettimeofday() offloading\n");
+ return 1;
+ }
+#endif
+
+ return 0;
+}
#define __stringify_1(x) #x
#define __stringify(x) __stringify_1(x)
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(filename),
.cb = str_filename_cb,
+ .prio = -1, /* must come after "directory" */
.help = "File(s) to use for the workload",
},
+ {
+ .name = "lockfile",
+ .type = FIO_OPT_STR,
+ .cb = str_lockfile_cb,
+ .off1 = td_var_offset(file_lock_mode),
+ .help = "Lock file when doing IO to it",
+ .parent = "filename",
+ .def = "none",
+ .posval = {
+ { .ival = "none",
+ .oval = FILE_LOCK_NONE,
+ .help = "No file locking",
+ },
+ { .ival = "exclusive",
+ .oval = FILE_LOCK_EXCLUSIVE,
+ .help = "Exclusive file lock",
+ },
+ {
+ .ival = "readwrite",
+ .oval = FILE_LOCK_READWRITE,
+ .help = "Read vs write lock",
+ },
+ },
+ },
{
.name = "opendir",
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(td_ddir),
.help = "IO direction",
.def = "read",
+ .verify = rw_verify,
.posval = {
{ .ival = "read",
.oval = TD_DDIR_READ,
},
},
},
- {
- .name = "fadvise_hint",
- .type = FIO_OPT_BOOL,
- .off1 = td_var_offset(fadvise_hint),
- .help = "Use fadvise() to advise the kernel on IO pattern",
- .def = "1",
- },
{
.name = "ioengine",
.type = FIO_OPT_STR_STORE,
{ .ival = "sync",
.help = "Use read/write",
},
+ { .ival = "psync",
+ .help = "Use pread/pwrite",
+ },
+ { .ival = "vsync",
+ .help = "Use readv/writev",
+ },
#ifdef FIO_HAVE_LIBAIO
{ .ival = "libaio",
.help = "Linux native asynchronous IO",
{ .ival = "posixaio",
.help = "POSIX asynchronous IO",
},
+#endif
+#ifdef FIO_HAVE_SOLARISAIO
+ { .ival = "solarisaio",
+ .help = "Solaris native asynchronous IO",
+ },
#endif
{ .ival = "mmap",
.help = "Memory mapped IO",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iodepth),
.help = "Amount of IO buffers to keep in flight",
+ .minval = 1,
.def = "1",
},
{
.name = "iodepth_batch",
+ .alias = "iodepth_batch_submit",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iodepth_batch),
- .help = "Number of IO to submit in one go",
+ .help = "Number of IO buffers to submit in one go",
+ .parent = "iodepth",
+ .minval = 1,
+ .def = "1",
+ },
+ {
+ .name = "iodepth_batch_complete",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(iodepth_batch_complete),
+ .help = "Number of IO buffers to retrieve in one go",
+ .parent = "iodepth",
+ .minval = 0,
+ .def = "1",
},
{
.name = "iodepth_low",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iodepth_low),
.help = "Low water mark for queuing depth",
+ .parent = "iodepth",
},
{
.name = "size",
.minval = 1,
.help = "Total size of device or files",
},
+ {
+ .name = "fill_device",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(fill_device),
+ .help = "Write until an ENOSPC error occurs",
+ .def = "0",
+ },
{
.name = "filesize",
.type = FIO_OPT_STR_VAL,
.minval = 1,
.help = "Size of individual files",
},
+ {
+ .name = "offset",
+ .alias = "fileoffset",
+ .type = FIO_OPT_STR_VAL,
+ .off1 = td_var_offset(start_offset),
+ .help = "Start IO from this offset",
+ .def = "0",
+ },
{
.name = "bs",
.alias = "blocksize",
- .type = FIO_OPT_STR_VAL_INT,
+ .type = FIO_OPT_INT,
.off1 = td_var_offset(bs[DDIR_READ]),
.off2 = td_var_offset(bs[DDIR_WRITE]),
.minval = 1,
.help = "Block size unit",
.def = "4k",
+ .parent = "rw",
+ },
+ {
+ .name = "ba",
+ .alias = "blockalign",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(ba[DDIR_READ]),
+ .off2 = td_var_offset(ba[DDIR_WRITE]),
+ .minval = 1,
+ .help = "IO block offset alignment",
+ .parent = "rw",
},
{
.name = "bsrange",
.off4 = td_var_offset(max_bs[DDIR_WRITE]),
.minval = 1,
.help = "Set block size range (in more detail than bs)",
+ .parent = "rw",
+ },
+ {
+ .name = "bssplit",
+ .type = FIO_OPT_STR,
+ .cb = str_bssplit_cb,
+ .help = "Set a specific mix of block sizes",
+ .parent = "rw",
},
{
.name = "bs_unaligned",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(bs_unaligned),
.help = "Don't sector align IO buffer sizes",
- },
- {
- .name = "offset",
- .type = FIO_OPT_STR_VAL,
- .off1 = td_var_offset(start_offset),
- .help = "Start IO from this offset",
- .def = "0",
+ .parent = "rw",
},
{
.name = "randrepeat",
.off1 = td_var_offset(rand_repeatable),
.help = "Use repeatable random IO pattern",
.def = "1",
+ .parent = "rw",
},
{
.name = "norandommap",
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(norandommap),
.help = "Accept potential duplicate random blocks",
+ .parent = "rw",
+ },
+ {
+ .name = "softrandommap",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(softrandommap),
+ .help = "Set norandommap if randommap allocation fails",
+ .parent = "norandommap",
+ .def = "0",
},
{
.name = "nrfiles",
.oval = FIO_FSERVICE_RR,
.help = "Round robin select files",
},
+ { .ival = "sequential",
+ .oval = FIO_FSERVICE_SEQ,
+ .help = "Finish one file before moving to the next",
+ },
},
+ .parent = "nrfiles",
+ },
+ {
+ .name = "fadvise_hint",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(fadvise_hint),
+ .help = "Use fadvise() to advise the kernel on IO pattern",
+ .def = "1",
},
{
.name = "fsync",
.help = "Issue fsync for writes every given number of blocks",
.def = "0",
},
+ {
+ .name = "fdatasync",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(fdatasync_blocks),
+ .help = "Issue fdatasync for writes every given number of blocks",
+ .def = "0",
+ },
{
.name = "direct",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(time_based),
.help = "Keep running until runtime/timeout is met",
},
+ {
+ .name = "ramp_time",
+ .type = FIO_OPT_STR_VAL_TIME,
+ .off1 = td_var_offset(ramp_time),
+ .help = "Ramp up time before measuring performance",
+ },
{
.name = "mem",
.alias = "iomem",
#endif
},
},
+ {
+ .name = "iomem_align",
+ .alias = "mem_align",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(mem_align),
+ .minval = 0,
+ .help = "IO memory buffer offset alignment",
+ .def = "0",
+ .parent = "iomem",
+ },
{
.name = "verify",
.type = FIO_OPT_STR,
.oval = VERIFY_NONE,
.help = "Don't do IO verification",
},
+ { .ival = "md5",
+ .oval = VERIFY_MD5,
+ .help = "Use md5 checksums for verification",
+ },
+ { .ival = "crc64",
+ .oval = VERIFY_CRC64,
+ .help = "Use crc64 checksums for verification",
+ },
{ .ival = "crc32",
.oval = VERIFY_CRC32,
.help = "Use crc32 checksums for verification",
},
+ { .ival = "crc32c-intel",
+ .oval = VERIFY_CRC32C_INTEL,
+ .help = "Use hw crc32c checksums for verification",
+ },
+ { .ival = "crc32c",
+ .oval = VERIFY_CRC32C,
+ .help = "Use crc32c checksums for verification",
+ },
{ .ival = "crc16",
.oval = VERIFY_CRC16,
.help = "Use crc16 checksums for verification",
},
- { .ival = "md5",
- .oval = VERIFY_MD5,
- .help = "Use md5 checksums for verification",
+ { .ival = "crc7",
+ .oval = VERIFY_CRC7,
+ .help = "Use crc7 checksums for verification",
+ },
+ { .ival = "sha256",
+ .oval = VERIFY_SHA256,
+ .help = "Use sha256 checksums for verification",
+ },
+ { .ival = "sha512",
+ .oval = VERIFY_SHA512,
+ .help = "Use sha512 checksums for verification",
+ },
+ { .ival = "meta",
+ .oval = VERIFY_META,
+ .help = "Use io information",
},
{
.ival = "null",
},
},
},
+ {
+ .name = "do_verify",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(do_verify),
+ .help = "Run verification stage after write",
+ .def = "1",
+ .parent = "verify",
+ },
{
.name = "verifysort",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(verifysort),
.help = "Sort written verify blocks for read back",
.def = "1",
+ .parent = "verify",
+ },
+ {
+ .name = "verify_interval",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(verify_interval),
+ .minval = 2 * sizeof(struct verify_header),
+ .help = "Store verify buffer header every N bytes",
+ .parent = "verify",
+ },
+ {
+ .name = "verify_offset",
+ .type = FIO_OPT_INT,
+ .help = "Offset verify header location by N bytes",
+ .def = "0",
+ .cb = str_verify_offset_cb,
+ .parent = "verify",
+ },
+ {
+ .name = "verify_pattern",
+ .type = FIO_OPT_INT,
+ .cb = str_verify_pattern_cb,
+ .help = "Fill pattern for IO buffers",
+ .parent = "verify",
+ },
+ {
+ .name = "verify_fatal",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(verify_fatal),
+ .def = "0",
+ .help = "Exit on a single verify failure, don't continue",
+ .parent = "verify",
},
{
.name = "write_iolog",
.help = "Lock down this amount of memory",
.def = "0",
},
- {
- .name = "rwmixcycle",
- .type = FIO_OPT_INT,
- .off1 = td_var_offset(rwmixcycle),
- .help = "Cycle period for mixed read/write workloads (msec)",
- .def = "500",
- },
{
.name = "rwmixread",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(rwmix[DDIR_READ]),
+ .cb = str_rwmix_read_cb,
.maxval = 100,
.help = "Percentage of mixed workload that is reads",
.def = "50",
{
.name = "rwmixwrite",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(rwmix[DDIR_WRITE]),
+ .cb = str_rwmix_write_cb,
.maxval = 100,
.help = "Percentage of mixed workload that is writes",
.def = "50",
},
+ {
+ .name = "rwmixcycle",
+ .type = FIO_OPT_DEPRECATED,
+ },
{
.name = "nice",
.type = FIO_OPT_INT,
.off1 = td_var_offset(thinktime_spin),
.help = "Start think time by spinning this amount (usec)",
.def = "0",
+ .parent = "thinktime",
},
{
.name = "thinktime_blocks",
.off1 = td_var_offset(thinktime_blocks),
.help = "IO buffer period between 'thinktime'",
.def = "1",
+ .parent = "thinktime",
},
{
.name = "rate",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(rate),
+ .off1 = td_var_offset(rate[0]),
+ .off2 = td_var_offset(rate[1]),
.help = "Set bandwidth rate",
},
{
.name = "ratemin",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(ratemin),
+ .off1 = td_var_offset(ratemin[0]),
+ .off2 = td_var_offset(ratemin[1]),
.help = "Job must meet this rate or it will be shutdown",
+ .parent = "rate",
},
{
.name = "rate_iops",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(rate_iops),
+ .off1 = td_var_offset(rate_iops[0]),
+ .off2 = td_var_offset(rate_iops[1]),
.help = "Limit IO used to this number of IO operations/sec",
},
{
.name = "rate_iops_min",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(rate_iops_min),
+ .off1 = td_var_offset(rate_iops_min[0]),
+ .off2 = td_var_offset(rate_iops_min[1]),
.help = "Job must meet this rate or it will be shutdown",
+ .parent = "rate_iops",
},
{
.name = "ratecycle",
.off1 = td_var_offset(ratecycle),
.help = "Window average for rate limits (msec)",
.def = "1000",
+ .parent = "rate",
},
{
.name = "invalidate",
.off1 = td_var_offset(sync_io),
.help = "Use O_SYNC for buffered writes",
.def = "0",
+ .parent = "buffered",
},
{
.name = "bwavgtime",
.type = FIO_OPT_INT,
.off1 = td_var_offset(bw_avg_time),
- .help = "Time window over which to calculate bandwidth (msec)",
+ .help = "Time window over which to calculate bandwidth"
+ " (msec)",
.def = "500",
},
{
.help = "Fsync file after creation",
.def = "1",
},
+ {
+ .name = "create_on_open",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(create_on_open),
+ .help = "Create files when they are opened for IO",
+ .def = "0",
+ },
+ {
+ .name = "pre_read",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(pre_read),
+ .help = "Preread files before starting official testing",
+ .def = "0",
+ },
{
.name = "cpuload",
.type = FIO_OPT_INT,
.off1 = td_var_offset(cpucycle),
.help = "Length of the CPU burn cycles (usecs)",
.def = "50000",
+ .parent = "cpuload",
},
#ifdef FIO_HAVE_CPU_AFFINITY
{
},
{
.name = "write_bw_log",
- .type = FIO_OPT_STR_SET,
+ .type = FIO_OPT_STR,
.off1 = td_var_offset(write_bw_log),
+ .cb = str_write_bw_log_cb,
.help = "Write log of bandwidth during run",
},
{
.name = "write_lat_log",
- .type = FIO_OPT_STR_SET,
+ .type = FIO_OPT_STR,
.off1 = td_var_offset(write_lat_log),
+ .cb = str_write_lat_log_cb,
.help = "Write log of latency during run",
},
{
.name = "hugepage-size",
- .type = FIO_OPT_STR_VAL,
+ .type = FIO_OPT_INT,
.off1 = td_var_offset(hugepage_size),
.help = "When using hugepages, specify size of each page",
.def = __stringify(FIO_HUGE_PAGE),
.off1 = td_var_offset(zero_buffers),
.help = "Init IO buffers to all zeroes",
},
+ {
+ .name = "refill_buffers",
+ .type = FIO_OPT_STR_SET,
+ .off1 = td_var_offset(refill_buffers),
+ .help = "Refill IO buffers on every IO submit",
+ },
#ifdef FIO_HAVE_DISK_UTIL
{
.name = "disk_util",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(do_disk_util),
- .help = "Log disk utilization stats",
+ .help = "Log disk utilization statistics",
.def = "1",
},
#endif
+ {
+ .name = "gtod_reduce",
+ .type = FIO_OPT_BOOL,
+ .help = "Greatly reduce number of gettimeofday() calls",
+ .cb = str_gtod_reduce_cb,
+ .def = "0",
+ },
+ {
+ .name = "disable_clat",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(disable_clat),
+ .help = "Disable completion latency numbers",
+ .parent = "gtod_reduce",
+ .def = "0",
+ },
+ {
+ .name = "disable_slat",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(disable_slat),
+ .help = "Disable submissionn latency numbers",
+ .parent = "gtod_reduce",
+ .def = "0",
+ },
+ {
+ .name = "disable_bw_measurement",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(disable_bw),
+ .help = "Disable bandwidth logging",
+ .parent = "gtod_reduce",
+ .def = "0",
+ },
+ {
+ .name = "gtod_cpu",
+ .type = FIO_OPT_INT,
+ .cb = str_gtod_cpu_cb,
+ .help = "Setup dedicated gettimeofday() thread on this CPU",
+ .verify = gtod_cpu_verify,
+ },
+ {
+ .name = "continue_on_error",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(continue_on_error),
+ .help = "Continue on non-fatal errors during I/O",
+ .def = "0",
+ },
{
.name = NULL,
},
o = &options[0];
while (o->name) {
- long_options[i].name = o->name;
+ long_options[i].name = (char *) o->name;
long_options[i].val = FIO_GETOPT_JOB;
if (o->type == FIO_OPT_STR_SET)
long_options[i].has_arg = no_argument;
}
}
-int fio_option_parse(struct thread_data *td, const char *opt)
+int fio_options_parse(struct thread_data *td, char **opts, int num_opts)
{
- return parse_option(opt, options, td);
+ int i, ret;
+
+ sort_options(opts, options, num_opts);
+
+ for (ret = 0, i = 0; i < num_opts; i++)
+ ret |= parse_option(opts[i], options, td);
+
+ return ret;
}
int fio_cmd_option_parse(struct thread_data *td, const char *opt, char *val)