allows a range of CPUs. Say you wanted a binding to CPUs
1, 5, and 8-15, you would set cpus_allowed=1,5,8-15.
+cpus_allowed_policy=str Set the policy of how fio distributes the CPUs
+ specified by cpus_allowed or cpumask. Two policies are
+ supported:
+
+ shared All jobs will share the CPU set specified.
+ split Each job will get a unique CPU from the CPU set.
+
+ 'shared' is the default behaviour, if the option isn't
+ specified. If split is specified, then fio will error out if
+ there are more jobs defined than CPUs given in the set.
+
numa_cpu_nodes=str Set this job running on spcified NUMA nodes' CPUs. The
arguments allow comma delimited list of cpu numbers,
A-B ranges, or 'all'. Note, to enable numa options support,
* allocations.
*/
if (o->cpumask_set) {
+ if (o->cpus_allowed_policy == FIO_CPUS_SPLIT) {
+ ret = fio_cpus_split(&o->cpumask, td->thread_number);
+ if (!ret) {
+ log_err("fio: no CPUs set\n");
+ log_err("fio: Try increasing number of available CPUs\n");
+ td_verror(td, EINVAL, "cpus_split");
+ goto err;
+ }
+ }
ret = fio_setaffinity(td->pid, o->cpumask);
if (ret == -1) {
td_verror(td, errno, "cpu_set_affinity");
o->numjobs = le32_to_cpu(top->numjobs);
o->cpumask_set = le32_to_cpu(top->cpumask_set);
o->verify_cpumask_set = le32_to_cpu(top->verify_cpumask_set);
+ o->cpus_allowed_policy = le32_to_cpu(top->cpus_allowed_policy);
o->iolog = le32_to_cpu(top->iolog);
o->rwmixcycle = le32_to_cpu(top->rwmixcycle);
o->nice = le32_to_cpu(top->nice);
top->numjobs = cpu_to_le32(o->numjobs);
top->cpumask_set = cpu_to_le32(o->cpumask_set);
top->verify_cpumask_set = cpu_to_le32(o->verify_cpumask_set);
+ top->cpus_allowed_policy = cpu_to_le32(o->cpus_allowed_policy);
top->iolog = cpu_to_le32(o->iolog);
top->rwmixcycle = cpu_to_le32(o->rwmixcycle);
top->nice = cpu_to_le32(o->nice);
.BI cpus_allowed \fR=\fPstr
Same as \fBcpumask\fR, but allows a comma-delimited list of CPU numbers.
.TP
+.BI cpus_allowed_policy \fR=\fPstr
+Set the policy of how fio distributes the CPUs specified by \fBcpus_allowed\fR
+or \fBcpumask\fR. Two policies are supported:
+.RS
+.RS
+.TP
+.B shared
+All jobs will share the CPU set specified.
+.TP
+.B split
+Each job will get a unique CPU from the CPU set.
+.RE
+.P
+\fBshared\fR is the default behaviour, if the option isn't specified. If
+\fBsplit\fR is specified, then fio will error out if there are more jobs
+defined than CPUs given in the set.
+.RE
+.P
+.TP
.BI numa_cpu_nodes \fR=\fPstr
Set this job running on specified NUMA nodes' CPUs. The arguments allow
comma delimited list of cpu numbers, A-B ranges, or 'all'.
FIO_RAND_GEN_LFSR,
};
+enum {
+ FIO_CPUS_SHARED = 0,
+ FIO_CPUS_SPLIT,
+};
+
#endif
}
#ifdef FIO_HAVE_CPU_AFFINITY
+int fio_cpus_split(os_cpu_mask_t *mask, unsigned int cpu)
+{
+ const long max_cpu = cpus_online();
+ unsigned int i;
+
+ for (i = 0; i < max_cpu; i++) {
+ if (cpu != i) {
+ fio_cpu_clear(mask, i);
+ continue;
+ }
+ }
+
+ return fio_cpu_count(mask);
+}
+
static int str_cpumask_cb(void *data, unsigned long long *val)
{
struct thread_data *td = data;
.category = FIO_OPT_C_GENERAL,
.group = FIO_OPT_G_CRED,
},
+ {
+ .name = "cpus_allowed_policy",
+ .lname = "CPUs allowed distribution policy",
+ .type = FIO_OPT_STR,
+ .off1 = td_var_offset(cpus_allowed_policy),
+ .help = "Distribution policy for cpus_allowed",
+ .parent = "cpus_allowed",
+ .prio = 1,
+ .posval = {
+ { .ival = "shared",
+ .oval = FIO_CPUS_SHARED,
+ .help = "Mask shared between threads",
+ },
+ { .ival = "split",
+ .oval = FIO_CPUS_SPLIT,
+ .help = "Mask split between threads",
+ },
+ },
+ .category = FIO_OPT_C_GENERAL,
+ .group = FIO_OPT_G_CRED,
+ },
#endif
#ifdef CONFIG_LIBNUMA
{
#define fio_cpu_clear(mask, cpu) (void) CPU_CLR((cpu), (mask))
#define fio_cpu_set(mask, cpu) (void) CPU_SET((cpu), (mask))
+#define fio_cpu_count(maks) CPU_COUNT((mask))
static inline int fio_cpuset_init(os_cpu_mask_t *mask)
{
#define fio_cpu_clear(mask, cpu) (void) CPU_CLR((cpu), (mask))
#define fio_cpu_set(mask, cpu) (void) CPU_SET((cpu), (mask))
+#define fio_cpu_count(maks) CPU_COUNT((mask))
static inline int fio_cpuset_init(os_cpu_mask_t *mask)
{
return 0;
}
+static inline int fio_cpuset_count(os_cpu_mask_t *mask)
+{
+ unsigned int num_cpus;
+
+ if (pset_info(*mask, NULL, &num_cpus, NULL) < 0)
+ return 0;
+
+ return num_cpus;
+}
+
static inline int fio_cpuset_exit(os_cpu_mask_t *mask)
{
if (pset_destroy(*mask) < 0)
*mask |= 1 << cpu;
}
+static inline int fio_cpu_count(os_cpu_mask_t *mask, int cpu)
+{
+ return hweight64(*mask);
+}
+
static inline int fio_cpuset_init(os_cpu_mask_t *mask)
{
*mask = 0;
#define fio_getaffinity(pid, mask) do { } while (0)
#define fio_cpu_clear(mask, cpu) do { } while (0)
#define fio_cpuset_exit(mask) (-1)
+#define fio_cpus_split(mask, cpu) (0)
typedef unsigned long os_cpu_mask_t;
+#else
+extern int fio_cpus_split(os_cpu_mask_t *mask, unsigned int cpu);
#endif
#ifndef FIO_HAVE_IOPRIO
};
enum {
- FIO_SERVER_VER = 31,
+ FIO_SERVER_VER = 32,
FIO_SERVER_MAX_FRAGMENT_PDU = 1024,
unsigned int cpumask_set;
os_cpu_mask_t verify_cpumask;
unsigned int verify_cpumask_set;
+ unsigned int cpus_allowed_policy;
#ifdef CONFIG_LIBNUMA
struct bitmask *numa_cpunodesmask;
unsigned int numa_cpumask_set;
uint32_t cpumask_set;
uint8_t verify_cpumask[FIO_TOP_STR_MAX];
uint32_t verify_cpumask_set;
+ uint32_t cpus_allowed_policy;
uint32_t iolog;
uint32_t rwmixcycle;
uint32_t rwmix[DDIR_RWDIR_CNT];