if (perc > 100)
perc = 100;
else if (!perc)
- perc = -1;
+ perc = -1U;
} else
- perc = -1;
+ perc = -1U;
if (str_to_decimal(fname, &val, 1, o, 0)) {
log_err("fio: bssplit conversion failed\n");
for (i = 0; i < o->bssplit_nr[ddir]; i++) {
struct bssplit *bsp = &bssplit[i];
- if (bsp->perc == (unsigned char) -1)
+ if (bsp->perc == -1U)
perc_missing++;
else
perc += bsp->perc;
}
- if (perc > 100) {
+ if (perc > 100 && perc_missing > 1) {
log_err("fio: bssplit percentages add to more than 100%%\n");
free(bssplit);
return 1;
}
+
/*
* If values didn't have a percentage set, divide the remains between
* them.
*/
if (perc_missing) {
+ if (perc_missing == 1 && o->bssplit_nr[ddir] == 1)
+ perc = 100;
for (i = 0; i < o->bssplit_nr[ddir]; i++) {
struct bssplit *bsp = &bssplit[i];
- if (bsp->perc == (unsigned char) -1)
+ if (bsp->perc == -1U)
bsp->perc = (100 - perc) / perc_missing;
}
}
} else {
error[i] = atoi(fname);
if (error[i] < 0)
- error[i] = error[i];
+ error[i] = -error[i];
}
if (!error[i]) {
log_err("Unknown error %s, please use number value \n",
td->o.continue_on_error |= 1 << etype;
td->o.ignore_error_nr[etype] = i;
td->o.ignore_error[etype] = error;
- }
+ } else
+ free(error);
+
return 0;
}
{
struct thread_data *td = data;
struct thread_options *o = &td->o;
- char *nr = get_opt_postfix(str);
+ char *nr;
if (parse_dryrun())
return 0;
o->ddir_seq_nr = 1;
o->ddir_seq_add = 0;
+ nr = get_opt_postfix(str);
if (!nr)
return 0;
}
#ifdef FIO_HAVE_CPU_AFFINITY
+int fio_cpus_split(os_cpu_mask_t *mask, unsigned int cpu_index)
+{
+ unsigned int i, index, cpus_in_mask;
+ const long max_cpu = cpus_online();
+
+ cpus_in_mask = fio_cpu_count(mask);
+ cpu_index = cpu_index % cpus_in_mask;
+
+ index = 0;
+ for (i = 0; i < max_cpu; i++) {
+ if (!fio_cpu_isset(mask, i))
+ continue;
+
+ if (cpu_index != index)
+ fio_cpu_clear(mask, i);
+
+ index++;
+ }
+
+ return fio_cpu_count(mask);
+}
+
static int str_cpumask_cb(void *data, unsigned long long *val)
{
struct thread_data *td = data;
static int str_numa_cpunodes_cb(void *data, char *input)
{
struct thread_data *td = data;
+ struct bitmask *verify_bitmask;
if (parse_dryrun())
return 0;
* numa_allocate_nodemask(), so it should be freed by
* numa_free_nodemask().
*/
- td->o.numa_cpunodesmask = numa_parse_nodestring(input);
- if (td->o.numa_cpunodesmask == NULL) {
+ verify_bitmask = numa_parse_nodestring(input);
+ if (verify_bitmask == NULL) {
log_err("fio: numa_parse_nodestring failed\n");
td_verror(td, 1, "str_numa_cpunodes_cb");
return 1;
}
+ numa_free_nodemask(verify_bitmask);
+ td->o.numa_cpunodes = strdup(input);
td->o.numa_cpumask_set = 1;
return 0;
}
{ "default", "prefer", "bind", "interleave", "local", NULL };
int i;
char *nodelist;
+ struct bitmask *verify_bitmask;
if (parse_dryrun())
return 0;
break;
case MPOL_INTERLEAVE:
case MPOL_BIND:
- td->o.numa_memnodesmask = numa_parse_nodestring(nodelist);
- if (td->o.numa_memnodesmask == NULL) {
+ verify_bitmask = numa_parse_nodestring(nodelist);
+ if (verify_bitmask == NULL) {
log_err("fio: numa_parse_nodestring failed\n");
td_verror(td, 1, "str_numa_memnodes_cb");
return 1;
}
+ td->o.numa_memnodes = strdup(nodelist);
+ numa_free_nodemask(verify_bitmask);
+
break;
case MPOL_LOCAL:
case MPOL_DEFAULT:
while ((fname = get_next_name(&str)) != NULL) {
if (!strlen(fname))
break;
- add_file(td, fname, 0);
- td->o.nr_files++;
+ add_file(td, fname, 0, 1);
}
free(p);
uint32_t pattern_length;
char *loc1, *loc2;
+ /*
+ * Check if it's a string input
+ */
+ loc1 = strchr(input, '\"');
+ if (loc1) {
+ do {
+ loc1++;
+ if (*loc1 == '\0' || *loc1 == '\"')
+ break;
+
+ pattern[i] = *loc1;
+ i++;
+ } while (i < max_size);
+
+ if (!i)
+ return 1;
+
+ goto fill;
+ }
+
+ /*
+ * No string, find out if it's decimal or hexidecimal
+ */
loc1 = strstr(input, "0x");
loc2 = strstr(input, "0X");
if (loc1 || loc2)
* Fill the pattern all the way to the end. This greatly reduces
* the number of memcpy's we have to do when verifying the IO.
*/
+fill:
pattern_length = i;
while (i > 1 && i * 2 <= max_size) {
memcpy(&pattern[i], &pattern[0], i);
if (i == 1) {
/*
- * The code in verify_io_u_pattern assumes a single byte pattern
- * fills the whole verify pattern buffer.
+ * The code in verify_io_u_pattern assumes a single byte
+ * pattern fills the whole verify pattern buffer.
*/
memset(pattern, pattern[0], max_size);
}
ret = pattern_cb(td->o.buffer_pattern, MAX_PATTERN_SIZE, input,
&td->o.buffer_pattern_bytes);
- if (!ret) {
+ if (!ret && td->o.buffer_pattern_bytes) {
td->o.refill_buffers = 0;
td->o.scramble_buffers = 0;
td->o.zero_buffers = 0;
+ } else {
+ log_err("fio: failed parsing pattern `%s`\n", input);
+ ret = 1;
}
return ret;
}
+static int str_buffer_compress_cb(void *data, unsigned long long *il)
+{
+ struct thread_data *td = data;
+
+ td->flags |= TD_F_COMPRESS;
+ td->o.compress_percentage = *il;
+ return 0;
+}
+
+static int str_dedupe_cb(void *data, unsigned long long *il)
+{
+ struct thread_data *td = data;
+
+ td->flags |= TD_F_COMPRESS;
+ td->o.dedupe_percentage = *il;
+ td->o.refill_buffers = 1;
+ return 0;
+}
+
static int str_verify_pattern_cb(void *data, const char *input)
{
struct thread_data *td = data;
{ .ival = "falloc",
.help = "fallocate() file based engine",
},
+#endif
+#ifdef CONFIG_GFAPI
+ { .ival = "gfapi",
+ .help = "Glusterfs libgfapi(sync) based engine"
+ },
+ { .ival = "gfapi_async",
+ .help = "Glusterfs libgfapi(async) based engine"
+ },
+#endif
+#ifdef CONFIG_LIBHDFS
+ { .ival = "libhdfs",
+ .help = "Hadoop Distributed Filesystem (HDFS) engine"
+ },
#endif
{ .ival = "external",
.help = "Load external engine (append name)",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
},
+ {
+ .name = "io_limit",
+ .lname = "IO Limit",
+ .type = FIO_OPT_STR_VAL,
+ .off1 = td_var_offset(io_limit),
+ .interval = 1024 * 1024,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_INVALID,
+ },
{
.name = "fill_device",
.lname = "Fill device",
.category = FIO_OPT_C_FILE,
.group = FIO_OPT_G_INVALID,
},
+ {
+ .name = "file_append",
+ .lname = "File append",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(file_append),
+ .help = "IO will start at the end of the file(s)",
+ .def = "0",
+ .category = FIO_OPT_C_FILE,
+ .group = FIO_OPT_G_INVALID,
+ },
{
.name = "offset",
.lname = "IO offset",
.lname = "Number of IOs to perform",
.type = FIO_OPT_STR_VAL,
.off1 = td_var_offset(number_ios),
- .help = "Force job completion of this number of IOs",
+ .help = "Force job completion after this number of IOs",
.def = "0",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_INVALID,
.lname = "Block size division is seq/random (not read/write)",
.type = FIO_OPT_BOOL,
.off1 = td_var_offset(bs_is_seq_rand),
- .help = "Consider any blocksize setting to be sequential,ramdom",
+ .help = "Consider any blocksize setting to be sequential,random",
.def = "0",
.parent = "blocksize",
.category = FIO_OPT_C_IO,
.oval = MEM_MALLOC,
.help = "Use malloc(3) for IO buffers",
},
+#ifndef CONFIG_NO_SHM
{ .ival = "shm",
.oval = MEM_SHM,
.help = "Use shared memory segments for IO buffers",
.oval = MEM_SHMHUGE,
.help = "Like shm, but use huge pages",
},
+#endif
#endif
{ .ival = "mmap",
.oval = MEM_MMAP,
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_CRED,
},
+ {
+ .name = "cpus_allowed_policy",
+ .lname = "CPUs allowed distribution policy",
+ .type = FIO_OPT_STR,
+ .off1 = td_var_offset(cpus_allowed_policy),
+ .help = "Distribution policy for cpus_allowed",
+ .parent = "cpus_allowed",
+ .prio = 1,
+ .posval = {
+ { .ival = "shared",
+ .oval = FIO_CPUS_SHARED,
+ .help = "Mask shared between threads",
+ },
+ { .ival = "split",
+ .oval = FIO_CPUS_SPLIT,
+ .help = "Mask split between threads",
+ },
+ },
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
+ },
#endif
#ifdef CONFIG_LIBNUMA
{
.type = FIO_OPT_STR_SET,
.off1 = td_var_offset(use_thread),
.help = "Use threads instead of processes",
+#ifdef CONFIG_NO_SHM
+ .def = "1",
+ .no_warn_def = 1,
+#endif
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_PROCESS,
},
.category = FIO_OPT_C_LOG,
.group = FIO_OPT_G_INVALID,
},
+ {
+ .name = "log_offset",
+ .lname = "Log offset of IO",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(log_offset),
+ .help = "Include offset of IO for each log entry",
+ .def = "0",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
+#ifdef CONFIG_ZLIB
+ {
+ .name = "log_compression",
+ .lname = "Log compression",
+ .type = FIO_OPT_INT,
+ .off1 = td_var_offset(log_gz),
+ .help = "Log in compressed chunks of this size",
+ .minval = 32 * 1024 * 1024ULL,
+ .maxval = 512 * 1024 * 1024ULL,
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
+ {
+ .name = "log_store_compressed",
+ .lname = "Log store compressed",
+ .type = FIO_OPT_BOOL,
+ .off1 = td_var_offset(log_gz_store),
+ .help = "Store logs in a compressed format",
+ .category = FIO_OPT_C_LOG,
+ .group = FIO_OPT_G_INVALID,
+ },
+#endif
{
.name = "bwavgtime",
.lname = "Bandwidth average time",
.name = "buffer_compress_percentage",
.lname = "Buffer compression percentage",
.type = FIO_OPT_INT,
- .off1 = td_var_offset(compress_percentage),
+ .cb = str_buffer_compress_cb,
.maxval = 100,
.minval = 0,
.help = "How compressible the buffer is (approximately)",
.category = FIO_OPT_C_IO,
.group = FIO_OPT_G_IO_BUF,
},
+ {
+ .name = "dedupe_percentage",
+ .lname = "Dedupe percentage",
+ .type = FIO_OPT_INT,
+ .cb = str_dedupe_cb,
+ .maxval = 100,
+ .minval = 0,
+ .help = "Percentage of buffers that are dedupable",
+ .interval = 1,
+ .category = FIO_OPT_C_IO,
+ .group = FIO_OPT_G_IO_BUF,
+ },
{
.name = "clat_percentiles",
.lname = "Completion latency percentiles",
return NULL;
ret = fread(&buf[tmp - str], 1, 128 - (tmp - str), f);
- if (ret <= 0)
+ if (ret <= 0) {
+ pclose(f);
return NULL;
+ }
pclose(f);
buf[(tmp - str) + ret - 1] = '\0';
void fio_fill_default_options(struct thread_data *td)
{
+ td->o.magic = OPT_MAGIC;
fill_default_options(td, fio_options);
}
struct thread_options *o = data;
unsigned int kb_base = 0;
- if (o)
+ /*
+ * This is a hack... For private options, *data is not holding
+ * a pointer to the thread_options, but to private data. This means
+ * we can't safely dereference it, but magic is first so mem wise
+ * it is valid. But this also means that if the job first sets
+ * kb_base and expects that to be honored by private options,
+ * it will be disappointed. We will return the global default
+ * for this.
+ */
+ if (o && o->magic == OPT_MAGIC)
kb_base = o->kb_base;
if (!kb_base)
kb_base = 1024;