#include <getopt.h>
#include <assert.h>
#include <libgen.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
#include "fio.h"
+#include "verify.h"
#include "parse.h"
-#include "fls.h"
+#include "lib/fls.h"
#define td_var_offset(var) ((size_t) &((struct thread_options *)0)->var)
return bsp1->perc < bsp2->perc;
}
-static int str_bssplit_cb(void *data, const char *input)
+static int bssplit_ddir(struct thread_data *td, int ddir, char *str)
{
- struct thread_data *td = data;
- char *fname, *str, *p;
+ struct bssplit *bssplit;
unsigned int i, perc, perc_missing;
unsigned int max_bs, min_bs;
long long val;
+ char *fname;
- p = str = strdup(input);
-
- strip_blank_front(&str);
- strip_blank_end(str);
-
- td->o.bssplit_nr = 4;
- td->o.bssplit = malloc(4 * sizeof(struct bssplit));
+ td->o.bssplit_nr[ddir] = 4;
+ bssplit = malloc(4 * sizeof(struct bssplit));
i = 0;
max_bs = 0;
/*
* grow struct buffer, if needed
*/
- if (i == td->o.bssplit_nr) {
- td->o.bssplit_nr <<= 1;
- td->o.bssplit = realloc(td->o.bssplit,
- td->o.bssplit_nr
+ if (i == td->o.bssplit_nr[ddir]) {
+ td->o.bssplit_nr[ddir] <<= 1;
+ bssplit = realloc(bssplit, td->o.bssplit_nr[ddir]
* sizeof(struct bssplit));
}
if (val < min_bs)
min_bs = val;
- td->o.bssplit[i].bs = val;
- td->o.bssplit[i].perc = perc;
+ bssplit[i].bs = val;
+ bssplit[i].perc = perc;
i++;
}
- td->o.bssplit_nr = i;
+ td->o.bssplit_nr[ddir] = i;
/*
* Now check if the percentages add up, and how much is missing
*/
perc = perc_missing = 0;
- for (i = 0; i < td->o.bssplit_nr; i++) {
- struct bssplit *bsp = &td->o.bssplit[i];
+ for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
+ struct bssplit *bsp = &bssplit[i];
if (bsp->perc == (unsigned char) -1)
perc_missing++;
if (perc > 100) {
log_err("fio: bssplit percentages add to more than 100%%\n");
- free(td->o.bssplit);
+ free(bssplit);
return 1;
}
/*
* them.
*/
if (perc_missing) {
- for (i = 0; i < td->o.bssplit_nr; i++) {
- struct bssplit *bsp = &td->o.bssplit[i];
+ for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
+ struct bssplit *bsp = &bssplit[i];
if (bsp->perc == (unsigned char) -1)
bsp->perc = (100 - perc) / perc_missing;
}
}
- td->o.min_bs[DDIR_READ] = td->o.min_bs[DDIR_WRITE] = min_bs;
- td->o.max_bs[DDIR_READ] = td->o.max_bs[DDIR_WRITE] = max_bs;
+ td->o.min_bs[ddir] = min_bs;
+ td->o.max_bs[ddir] = max_bs;
/*
* now sort based on percentages, for ease of lookup
*/
- qsort(td->o.bssplit, td->o.bssplit_nr, sizeof(struct bssplit), bs_cmp);
+ qsort(bssplit, td->o.bssplit_nr[ddir], sizeof(struct bssplit), bs_cmp);
+ td->o.bssplit[ddir] = bssplit;
+ return 0;
+
+}
+
+static int str_bssplit_cb(void *data, const char *input)
+{
+ struct thread_data *td = data;
+ char *str, *p, *odir;
+ int ret = 0;
+
+ p = str = strdup(input);
+
+ strip_blank_front(&str);
+ strip_blank_end(str);
+
+ odir = strchr(str, ',');
+ if (odir) {
+ ret = bssplit_ddir(td, DDIR_WRITE, odir + 1);
+ if (!ret) {
+ *odir = '\0';
+ ret = bssplit_ddir(td, DDIR_READ, str);
+ }
+ } else {
+ char *op;
+
+ op = strdup(str);
+
+ ret = bssplit_ddir(td, DDIR_READ, str);
+ if (!ret)
+ ret = bssplit_ddir(td, DDIR_WRITE, op);
+
+ free(op);
+ }
free(p);
- return 0;
+ return ret;
}
static int str_rw_cb(void *data, const char *str)
char *nr = get_opt_postfix(str);
td->o.ddir_nr = 1;
- if (nr)
+ if (nr) {
td->o.ddir_nr = atoi(nr);
+ free(nr);
+ }
return 0;
}
{
struct thread_data *td = data;
unsigned int i;
+ long max_cpu;
+ int ret;
- CPU_ZERO(&td->o.cpumask);
+ ret = fio_cpuset_init(&td->o.cpumask);
+ if (ret < 0) {
+ log_err("fio: cpuset_init failed\n");
+ td_verror(td, ret, "fio_cpuset_init");
+ return 1;
+ }
- for (i = 0; i < sizeof(int) * 8; i++)
- if ((1 << i) & *val)
- CPU_SET(*val, &td->o.cpumask);
+ max_cpu = sysconf(_SC_NPROCESSORS_ONLN);
+
+ for (i = 0; i < sizeof(int) * 8; i++) {
+ if ((1 << i) & *val) {
+ if (i > max_cpu) {
+ log_err("fio: CPU %d too large (max=%ld)\n", i,
+ max_cpu);
+ return 1;
+ }
+ dprint(FD_PARSE, "set cpu allowed %d\n", i);
+ fio_cpu_set(&td->o.cpumask, i);
+ }
+ }
td->o.cpumask_set = 1;
return 0;
{
struct thread_data *td = data;
char *cpu, *str, *p;
+ long max_cpu;
+ int ret = 0;
- CPU_ZERO(&td->o.cpumask);
+ ret = fio_cpuset_init(&td->o.cpumask);
+ if (ret < 0) {
+ log_err("fio: cpuset_init failed\n");
+ td_verror(td, ret, "fio_cpuset_init");
+ return 1;
+ }
p = str = strdup(input);
strip_blank_front(&str);
strip_blank_end(str);
+ max_cpu = sysconf(_SC_NPROCESSORS_ONLN);
+
while ((cpu = strsep(&str, ",")) != NULL) {
+ char *str2, *cpu2;
+ int icpu, icpu2;
+
if (!strlen(cpu))
break;
- CPU_SET(atoi(cpu), &td->o.cpumask);
+
+ str2 = cpu;
+ icpu2 = -1;
+ while ((cpu2 = strsep(&str2, "-")) != NULL) {
+ if (!strlen(cpu2))
+ break;
+
+ icpu2 = atoi(cpu2);
+ }
+
+ icpu = atoi(cpu);
+ if (icpu2 == -1)
+ icpu2 = icpu;
+ while (icpu <= icpu2) {
+ if (icpu >= FIO_MAX_CPUS) {
+ log_err("fio: your OS only supports up to"
+ " %d CPUs\n", (int) FIO_MAX_CPUS);
+ ret = 1;
+ break;
+ }
+ if (icpu > max_cpu) {
+ log_err("fio: CPU %d too large (max=%ld)\n",
+ icpu, max_cpu);
+ ret = 1;
+ break;
+ }
+
+ dprint(FD_PARSE, "set cpu allowed %d\n", icpu);
+ fio_cpu_set(&td->o.cpumask, icpu);
+ icpu++;
+ }
+ if (ret)
+ break;
}
free(p);
- td->o.cpumask_set = 1;
- return 0;
+ if (!ret)
+ td->o.cpumask_set = 1;
+ return ret;
}
#endif
char *nr = get_opt_postfix(str);
td->file_service_nr = 1;
- if (nr)
+ if (nr) {
td->file_service_nr = atoi(nr);
+ free(nr);
+ }
return 0;
}
struct thread_data *td = data;
unsigned int msb;
- msb = fls(*off);
+ msb = __fls(*off);
if (msb <= 8)
td->o.verify_pattern_bytes = 1;
else if (msb <= 16)
char *nr = get_opt_postfix(str);
td->o.lockfile_batch = 1;
- if (nr)
+ if (nr) {
td->o.lockfile_batch = atoi(nr);
+ free(nr);
+ }
+
+ return 0;
+}
+
+static int str_write_bw_log_cb(void *data, const char *str)
+{
+ struct thread_data *td = data;
+
+ if (str)
+ td->o.bw_log_file = strdup(str);
+ td->o.write_bw_log = 1;
+ return 0;
+}
+
+static int str_write_lat_log_cb(void *data, const char *str)
+{
+ struct thread_data *td = data;
+
+ if (str)
+ td->o.lat_log_file = strdup(str);
+
+ td->o.write_lat_log = 1;
+ return 0;
+}
+
+static int str_gtod_reduce_cb(void *data, int *il)
+{
+ struct thread_data *td = data;
+ int val = *il;
+
+ td->o.disable_clat = !!val;
+ td->o.disable_slat = !!val;
+ td->o.disable_bw = !!val;
+ if (val)
+ td->tv_cache_mask = 63;
+
+ return 0;
+}
+
+static int str_gtod_cpu_cb(void *data, int *il)
+{
+ struct thread_data *td = data;
+ int val = *il;
+
+ td->o.gtod_cpu = val;
+ td->o.gtod_offload = 1;
return 0;
}
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(filename),
.cb = str_filename_cb,
+ .prio = -1, /* must come after "directory" */
.help = "File(s) to use for the workload",
},
{
{ .ival = "posixaio",
.help = "POSIX asynchronous IO",
},
+#endif
+#ifdef FIO_HAVE_SOLARISAIO
+ { .ival = "solarisaio",
+ .help = "Solaris native asynchronous IO",
+ },
#endif
{ .ival = "mmap",
.help = "Memory mapped IO",
},
{
.name = "iodepth_batch",
+ .alias = "iodepth_batch_submit",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iodepth_batch),
- .help = "Number of IO to submit in one go",
+ .help = "Number of IO buffers to submit in one go",
.parent = "iodepth",
.minval = 1,
.def = "1",
},
+ {
+ .name = "iodepth_batch_complete",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(iodepth_batch_complete),
+ .help = "Number of IO buffers to retrieve in one go",
+ .parent = "iodepth",
+ .minval = 0,
+ .def = "1",
+ },
{
.name = "iodepth_low",
.type = FIO_OPT_INT,
.def = "4k",
.parent = "rw",
},
+ {
+ .name = "ba",
+ .alias = "blockalign",
+ .type = FIO_OPT_STR_VAL_INT,
+ .off1 = td_var_offset(ba[DDIR_READ]),
+ .off2 = td_var_offset(ba[DDIR_WRITE]),
+ .minval = 1,
+ .help = "IO block offset alignment",
+ .parent = "rw",
+ },
{
.name = "bsrange",
.alias = "blocksize_range",
.name = "softrandommap",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(softrandommap),
- .help = "Allow randommap to fail and continue witout",
+ .help = "Set norandommap if randommap allocation fails",
.parent = "norandommap",
.def = "0",
},
.oval = FIO_FSERVICE_RR,
.help = "Round robin select files",
},
+ { .ival = "sequential",
+ .oval = FIO_FSERVICE_SEQ,
+ .help = "Finish one file before moving to the next",
+ },
},
.parent = "nrfiles",
},
.off1 = td_var_offset(time_based),
.help = "Keep running until runtime/timeout is met",
},
+ {
+ .name = "ramp_time",
+ .type = FIO_OPT_STR_VAL_TIME,
+ .off1 = td_var_offset(ramp_time),
+ .help = "Ramp up time before measuring performance",
+ },
{
.name = "mem",
.alias = "iomem",
.oval = VERIFY_CRC32,
.help = "Use crc32 checksums for verification",
},
+ { .ival = "crc32c-intel",
+ .oval = VERIFY_CRC32C_INTEL,
+ .help = "Use hw crc32c checksums for verification",
+ },
+ { .ival = "crc32c",
+ .oval = VERIFY_CRC32C,
+ .help = "Use crc32c checksums for verification",
+ },
{ .ival = "crc16",
.oval = VERIFY_CRC16,
.help = "Use crc16 checksums for verification",
.help = "Fsync file after creation",
.def = "1",
},
+ {
+ .name = "create_on_open",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(create_on_open),
+ .help = "Create files when they are opened for IO",
+ .def = "0",
+ },
+ {
+ .name = "pre_read",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(pre_read),
+ .help = "Preread files before starting official testing",
+ .def = "0",
+ },
{
.name = "cpuload",
.type = FIO_OPT_INT,
},
{
.name = "write_bw_log",
- .type = FIO_OPT_STR_SET,
+ .type = FIO_OPT_STR,
.off1 = td_var_offset(write_bw_log),
+ .cb = str_write_bw_log_cb,
.help = "Write log of bandwidth during run",
},
{
.name = "write_lat_log",
- .type = FIO_OPT_STR_SET,
+ .type = FIO_OPT_STR,
.off1 = td_var_offset(write_lat_log),
+ .cb = str_write_lat_log_cb,
.help = "Write log of latency during run",
},
{
.name = "disk_util",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(do_disk_util),
- .help = "Log disk utilization stats",
+ .help = "Log disk utilization statistics",
.def = "1",
},
#endif
+ {
+ .name = "gtod_reduce",
+ .type = FIO_OPT_BOOL,
+ .help = "Greatly reduce number of gettimeofday() calls",
+ .cb = str_gtod_reduce_cb,
+ .def = "0",
+ },
+ {
+ .name = "disable_clat",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(disable_clat),
+ .help = "Disable completion latency numbers",
+ .parent = "gtod_reduce",
+ .def = "0",
+ },
+ {
+ .name = "disable_slat",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(disable_slat),
+ .help = "Disable submissionn latency numbers",
+ .parent = "gtod_reduce",
+ .def = "0",
+ },
+ {
+ .name = "disable_bw_measurement",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(disable_bw),
+ .help = "Disable bandwidth logging",
+ .parent = "gtod_reduce",
+ .def = "0",
+ },
+ {
+ .name = "gtod_cpu",
+ .type = FIO_OPT_INT,
+ .cb = str_gtod_cpu_cb,
+ .help = "Setup dedicated gettimeofday() thread on this CPU",
+ },
{
.name = NULL,
},
o = &options[0];
while (o->name) {
- long_options[i].name = o->name;
+ long_options[i].name = (char *) o->name;
long_options[i].val = FIO_GETOPT_JOB;
if (o->type == FIO_OPT_STR_SET)
long_options[i].has_arg = no_argument;
}
}
-int fio_option_parse(struct thread_data *td, const char *opt)
+int fio_options_parse(struct thread_data *td, char **opts, int num_opts)
{
- return parse_option(opt, options, td);
+ int i, ret;
+
+ sort_options(opts, options, num_opts);
+
+ for (ret = 0, i = 0; i < num_opts; i++)
+ ret |= parse_option(opts[i], options, td);
+
+ return ret;
}
int fio_cmd_option_parse(struct thread_data *td, const char *opt, char *val)