#include <sys/stat.h>
#include "fio.h"
+#include "verify.h"
#include "parse.h"
#include "lib/fls.h"
return bsp1->perc < bsp2->perc;
}
-static int str_bssplit_cb(void *data, const char *input)
+static int bssplit_ddir(struct thread_data *td, int ddir, char *str)
{
- struct thread_data *td = data;
- char *fname, *str, *p;
+ struct bssplit *bssplit;
unsigned int i, perc, perc_missing;
unsigned int max_bs, min_bs;
long long val;
+ char *fname;
- p = str = strdup(input);
-
- strip_blank_front(&str);
- strip_blank_end(str);
-
- td->o.bssplit_nr = 4;
- td->o.bssplit = malloc(4 * sizeof(struct bssplit));
+ td->o.bssplit_nr[ddir] = 4;
+ bssplit = malloc(4 * sizeof(struct bssplit));
i = 0;
max_bs = 0;
/*
* grow struct buffer, if needed
*/
- if (i == td->o.bssplit_nr) {
- td->o.bssplit_nr <<= 1;
- td->o.bssplit = realloc(td->o.bssplit,
- td->o.bssplit_nr
+ if (i == td->o.bssplit_nr[ddir]) {
+ td->o.bssplit_nr[ddir] <<= 1;
+ bssplit = realloc(bssplit, td->o.bssplit_nr[ddir]
* sizeof(struct bssplit));
}
if (val < min_bs)
min_bs = val;
- td->o.bssplit[i].bs = val;
- td->o.bssplit[i].perc = perc;
+ bssplit[i].bs = val;
+ bssplit[i].perc = perc;
i++;
}
- td->o.bssplit_nr = i;
+ td->o.bssplit_nr[ddir] = i;
/*
* Now check if the percentages add up, and how much is missing
*/
perc = perc_missing = 0;
- for (i = 0; i < td->o.bssplit_nr; i++) {
- struct bssplit *bsp = &td->o.bssplit[i];
+ for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
+ struct bssplit *bsp = &bssplit[i];
if (bsp->perc == (unsigned char) -1)
perc_missing++;
if (perc > 100) {
log_err("fio: bssplit percentages add to more than 100%%\n");
- free(td->o.bssplit);
+ free(bssplit);
return 1;
}
/*
* them.
*/
if (perc_missing) {
- for (i = 0; i < td->o.bssplit_nr; i++) {
- struct bssplit *bsp = &td->o.bssplit[i];
+ for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
+ struct bssplit *bsp = &bssplit[i];
if (bsp->perc == (unsigned char) -1)
bsp->perc = (100 - perc) / perc_missing;
}
}
- td->o.min_bs[DDIR_READ] = td->o.min_bs[DDIR_WRITE] = min_bs;
- td->o.max_bs[DDIR_READ] = td->o.max_bs[DDIR_WRITE] = max_bs;
+ td->o.min_bs[ddir] = min_bs;
+ td->o.max_bs[ddir] = max_bs;
/*
* now sort based on percentages, for ease of lookup
*/
- qsort(td->o.bssplit, td->o.bssplit_nr, sizeof(struct bssplit), bs_cmp);
+ qsort(bssplit, td->o.bssplit_nr[ddir], sizeof(struct bssplit), bs_cmp);
+ td->o.bssplit[ddir] = bssplit;
+ return 0;
+
+}
+
+static int str_bssplit_cb(void *data, const char *input)
+{
+ struct thread_data *td = data;
+ char *str, *p, *odir;
+ int ret = 0;
+
+ p = str = strdup(input);
+
+ strip_blank_front(&str);
+ strip_blank_end(str);
+
+ odir = strchr(str, ',');
+ if (odir) {
+ ret = bssplit_ddir(td, DDIR_WRITE, odir + 1);
+ if (!ret) {
+ *odir = '\0';
+ ret = bssplit_ddir(td, DDIR_READ, str);
+ }
+ } else {
+ char *op;
+
+ op = strdup(str);
+
+ ret = bssplit_ddir(td, DDIR_READ, str);
+ if (!ret)
+ ret = bssplit_ddir(td, DDIR_WRITE, op);
+
+ free(op);
+ }
free(p);
- return 0;
+ return ret;
}
static int str_rw_cb(void *data, const char *str)
return 0;
}
+static int rw_verify(struct fio_option *o, void *data)
+{
+ struct thread_data *td = data;
+
+ if (read_only && td_write(td)) {
+ log_err("fio: job <%s> has write bit set, but fio is in"
+ " read-only mode\n", td->o.name);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int gtod_cpu_verify(struct fio_option *o, void *data)
+{
+#ifndef FIO_HAVE_CPU_AFFINITY
+ struct thread_data *td = data;
+
+ if (td->o.gtod_cpu) {
+ log_err("fio: platform must support CPU affinity for"
+ "gettimeofday() offloading\n");
+ return 1;
+ }
+#endif
+
+ return 0;
+}
+
#define __stringify_1(x) #x
#define __stringify(x) __stringify_1(x)
.type = FIO_OPT_STR_STORE,
.off1 = td_var_offset(filename),
.cb = str_filename_cb,
- .prio = 1, /* must come before "directory" */
+ .prio = -1, /* must come after "directory" */
.help = "File(s) to use for the workload",
},
{
.off1 = td_var_offset(td_ddir),
.help = "IO direction",
.def = "read",
+ .verify = rw_verify,
.posval = {
{ .ival = "read",
.oval = TD_DDIR_READ,
.alias = "iodepth_batch_submit",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iodepth_batch),
- .help = "Number of IO to submit in one go",
+ .help = "Number of IO buffers to submit in one go",
.parent = "iodepth",
.minval = 1,
.def = "1",
.name = "iodepth_batch_complete",
.type = FIO_OPT_INT,
.off1 = td_var_offset(iodepth_batch_complete),
- .help = "Number of IO to retrieve in one go",
+ .help = "Number of IO buffers to retrieve in one go",
.parent = "iodepth",
.minval = 0,
.def = "1",
{
.name = "bs",
.alias = "blocksize",
- .type = FIO_OPT_STR_VAL_INT,
+ .type = FIO_OPT_INT,
.off1 = td_var_offset(bs[DDIR_READ]),
.off2 = td_var_offset(bs[DDIR_WRITE]),
.minval = 1,
.def = "4k",
.parent = "rw",
},
+ {
+ .name = "ba",
+ .alias = "blockalign",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(ba[DDIR_READ]),
+ .off2 = td_var_offset(ba[DDIR_WRITE]),
+ .minval = 1,
+ .help = "IO block offset alignment",
+ .parent = "rw",
+ },
{
.name = "bsrange",
.alias = "blocksize_range",
.oval = FIO_FSERVICE_RR,
.help = "Round robin select files",
},
+ { .ival = "sequential",
+ .oval = FIO_FSERVICE_SEQ,
+ .help = "Finish one file before moving to the next",
+ },
},
.parent = "nrfiles",
},
.help = "Issue fsync for writes every given number of blocks",
.def = "0",
},
+ {
+ .name = "fdatasync",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(fdatasync_blocks),
+ .help = "Issue fdatasync for writes every given number of blocks",
+ .def = "0",
+ },
{
.name = "direct",
.type = FIO_OPT_BOOL,
#endif
},
},
+ {
+ .name = "iomem_align",
+ .alias = "mem_align",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(mem_align),
+ .minval = 0,
+ .help = "IO memory buffer offset alignment",
+ .def = "0",
+ .parent = "iomem",
+ },
{
.name = "verify",
.type = FIO_OPT_STR,
},
{
.name = "verify_interval",
- .type = FIO_OPT_STR_VAL_INT,
+ .type = FIO_OPT_INT,
.off1 = td_var_offset(verify_interval),
.minval = 2 * sizeof(struct verify_header),
.help = "Store verify buffer header every N bytes",
},
{
.name = "verify_offset",
- .type = FIO_OPT_STR_VAL_INT,
+ .type = FIO_OPT_INT,
.help = "Offset verify header location by N bytes",
.def = "0",
.cb = str_verify_offset_cb,
{
.name = "rate",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(rate),
+ .off1 = td_var_offset(rate[0]),
+ .off2 = td_var_offset(rate[1]),
.help = "Set bandwidth rate",
},
{
.name = "ratemin",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(ratemin),
+ .off1 = td_var_offset(ratemin[0]),
+ .off2 = td_var_offset(ratemin[1]),
.help = "Job must meet this rate or it will be shutdown",
.parent = "rate",
},
{
.name = "rate_iops",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(rate_iops),
+ .off1 = td_var_offset(rate_iops[0]),
+ .off2 = td_var_offset(rate_iops[1]),
.help = "Limit IO used to this number of IO operations/sec",
},
{
.name = "rate_iops_min",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(rate_iops_min),
+ .off1 = td_var_offset(rate_iops_min[0]),
+ .off2 = td_var_offset(rate_iops_min[1]),
.help = "Job must meet this rate or it will be shutdown",
.parent = "rate_iops",
},
.help = "Fsync file after creation",
.def = "1",
},
+ {
+ .name = "create_on_open",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(create_on_open),
+ .help = "Create files when they are opened for IO",
+ .def = "0",
+ },
+ {
+ .name = "pre_read",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(pre_read),
+ .help = "Preread files before starting official testing",
+ .def = "0",
+ },
{
.name = "cpuload",
.type = FIO_OPT_INT,
},
{
.name = "hugepage-size",
- .type = FIO_OPT_STR_VAL_INT,
+ .type = FIO_OPT_INT,
.off1 = td_var_offset(hugepage_size),
.help = "When using hugepages, specify size of each page",
.def = __stringify(FIO_HUGE_PAGE),
.type = FIO_OPT_INT,
.cb = str_gtod_cpu_cb,
.help = "Setup dedicated gettimeofday() thread on this CPU",
+ .verify = gtod_cpu_verify,
+ },
+ {
+ .name = "continue_on_error",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(continue_on_error),
+ .help = "Continue on non-fatal errors during I/O",
+ .def = "0",
},
{
.name = NULL,