We can just check the option set mask now.
Signed-off-by: Jens Axboe <axboe@fb.com>
* Set affinity first, in case it has an impact on the memory
* allocations.
*/
- if (o->cpumask_set) {
+ if (fio_option_is_set(o, cpumask)) {
if (o->cpus_allowed_policy == FIO_CPUS_SPLIT) {
ret = fio_cpus_split(&o->cpumask, td->thread_number - 1);
if (!ret) {
#ifdef CONFIG_LIBNUMA
/* numa node setup */
- if (o->numa_cpumask_set || o->numa_memmask_set) {
+ if (fio_option_is_set(o, numa_cpunodes) ||
+ fio_option_is_set(o, numa_memnodes)) {
struct bitmask *mask;
if (numa_available() < 0) {
goto err;
}
- if (o->numa_cpumask_set) {
+ if (fio_option_is_set(o, numa_cpunodes)) {
mask = numa_parse_nodestring(o->numa_cpunodes);
ret = numa_run_on_node_mask(mask);
numa_free_nodemask(mask);
}
}
- if (o->numa_memmask_set) {
-
+ if (fio_option_is_set(o, numa_memnodes)) {
mask = NULL;
if (o->numa_memnodes)
mask = numa_parse_nodestring(o->numa_memnodes);
cgroup_shutdown(td, &cgroup_mnt);
verify_free_state(td);
- if (o->cpumask_set) {
+ if (fio_option_is_set(o, cpumask)) {
ret = fio_cpuset_exit(&o->cpumask);
if (ret)
td_verror(td, ret, "fio_cpuset_exit");
o->stonewall = le32_to_cpu(top->stonewall);
o->new_group = le32_to_cpu(top->new_group);
o->numjobs = le32_to_cpu(top->numjobs);
- o->cpumask_set = le32_to_cpu(top->cpumask_set);
- o->verify_cpumask_set = le32_to_cpu(top->verify_cpumask_set);
o->cpus_allowed_policy = le32_to_cpu(top->cpus_allowed_policy);
o->iolog = le32_to_cpu(top->iolog);
o->rwmixcycle = le32_to_cpu(top->rwmixcycle);
top->stonewall = cpu_to_le32(o->stonewall);
top->new_group = cpu_to_le32(o->new_group);
top->numjobs = cpu_to_le32(o->numjobs);
- top->cpumask_set = cpu_to_le32(o->cpumask_set);
- top->verify_cpumask_set = cpu_to_le32(o->verify_cpumask_set);
top->cpus_allowed_policy = cpu_to_le32(o->cpus_allowed_policy);
top->iolog = cpu_to_le32(o->iolog);
top->rwmixcycle = cpu_to_le32(o->rwmixcycle);
}
}
- td->o.cpumask_set = 1;
return 0;
}
}
free(p);
- if (!ret)
- td->o.cpumask_set = 1;
return ret;
}
static int str_cpus_allowed_cb(void *data, const char *input)
{
struct thread_data *td = data;
- int ret;
if (parse_dryrun())
return 0;
- ret = set_cpus_allowed(td, &td->o.cpumask, input);
- if (!ret)
- td->o.cpumask_set = 1;
-
- return ret;
+ return set_cpus_allowed(td, &td->o.cpumask, input);
}
static int str_verify_cpus_allowed_cb(void *data, const char *input)
{
struct thread_data *td = data;
- int ret;
- ret = set_cpus_allowed(td, &td->o.verify_cpumask, input);
- if (!ret)
- td->o.verify_cpumask_set = 1;
-
- return ret;
+ return set_cpus_allowed(td, &td->o.verify_cpumask, input);
}
#endif
numa_free_nodemask(verify_bitmask);
td->o.numa_cpunodes = strdup(input);
- td->o.numa_cpumask_set = 1;
return 0;
}
break;
}
- td->o.numa_memmask_set = 1;
return 0;
-
out:
return 1;
}
.name = "numa_cpu_nodes",
.type = FIO_OPT_STR,
.cb = str_numa_cpunodes_cb,
+ .off1 = td_var_offset(numa_cpunodes),
.help = "NUMA CPU nodes bind",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_INVALID,
.name = "numa_mem_policy",
.type = FIO_OPT_STR,
.cb = str_numa_mpol_cb,
+ .off1 = td_var_offset(numa_memnodes),
.help = "NUMA memory policy setup",
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_INVALID,
unsigned int new_group;
unsigned int numjobs;
os_cpu_mask_t cpumask;
- unsigned int cpumask_set;
os_cpu_mask_t verify_cpumask;
- unsigned int verify_cpumask_set;
unsigned int cpus_allowed_policy;
char *numa_cpunodes;
- unsigned int numa_cpumask_set;
unsigned short numa_mem_mode;
unsigned int numa_mem_prefer_node;
char *numa_memnodes;
- unsigned int numa_memmask_set;
unsigned int iolog;
unsigned int rwmixcycle;
unsigned int rwmix[DDIR_RWDIR_CNT];
uint32_t experimental_verify;
uint32_t verify_state;
uint32_t verify_state_save;
- uint32_t pad;
uint32_t use_thread;
uint32_t unlink;
uint32_t do_disk_util;
uint32_t bs_is_seq_rand;
uint32_t random_distribution;
+ uint32_t pad;
fio_fp64_t zipf_theta;
fio_fp64_t pareto_h;
uint32_t new_group;
uint32_t numjobs;
uint8_t cpumask[FIO_TOP_STR_MAX];
- uint32_t cpumask_set;
uint8_t verify_cpumask[FIO_TOP_STR_MAX];
- uint32_t verify_cpumask_set;
uint32_t cpus_allowed_policy;
uint32_t iolog;
uint32_t rwmixcycle;
struct io_u *io_u;
int ret = 0;
- if (td->o.verify_cpumask_set &&
+ if (fio_option_is_set(&td->o, verify_cpumask) &&
fio_setaffinity(td->pid, td->o.verify_cpumask)) {
log_err("fio: failed setting verify thread affinity\n");
goto done;