1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016 Thomas Gleixner.
4 * Copyright (C) 2016-2017 Christoph Hellwig.
6 #include <linux/interrupt.h>
7 #include <linux/kernel.h>
8 #include <linux/slab.h>
10 #include <linux/sort.h>
12 static void grp_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
13 unsigned int cpus_per_grp)
15 const struct cpumask *siblmsk;
18 for ( ; cpus_per_grp > 0; ) {
19 cpu = cpumask_first(nmsk);
21 /* Should not happen, but I'm too lazy to think about it */
22 if (cpu >= nr_cpu_ids)
25 cpumask_clear_cpu(cpu, nmsk);
26 cpumask_set_cpu(cpu, irqmsk);
29 /* If the cpu has siblings, use them first */
30 siblmsk = topology_sibling_cpumask(cpu);
31 for (sibl = -1; cpus_per_grp > 0; ) {
32 sibl = cpumask_next(sibl, siblmsk);
33 if (sibl >= nr_cpu_ids)
35 if (!cpumask_test_and_clear_cpu(sibl, nmsk))
37 cpumask_set_cpu(sibl, irqmsk);
43 static cpumask_var_t *alloc_node_to_cpumask(void)
48 masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL);
52 for (node = 0; node < nr_node_ids; node++) {
53 if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL))
61 free_cpumask_var(masks[node]);
66 static void free_node_to_cpumask(cpumask_var_t *masks)
70 for (node = 0; node < nr_node_ids; node++)
71 free_cpumask_var(masks[node]);
75 static void build_node_to_cpumask(cpumask_var_t *masks)
79 for_each_possible_cpu(cpu)
80 cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
83 static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
84 const struct cpumask *mask, nodemask_t *nodemsk)
88 /* Calculate the number of nodes in the supplied affinity mask */
90 if (cpumask_intersects(mask, node_to_cpumask[n])) {
91 node_set(n, *nodemsk);
107 static int ncpus_cmp_func(const void *l, const void *r)
109 const struct node_groups *ln = l;
110 const struct node_groups *rn = r;
112 return ln->ncpus - rn->ncpus;
116 * Allocate group number for each node, so that for each node:
118 * 1) the allocated number is >= 1
120 * 2) the allocated number is <= active CPU number of this node
122 * The actual allocated total groups may be less than @numgrps when
123 * active total CPU number is less than @numgrps.
125 * Active CPUs means the CPUs in '@cpu_mask AND @node_to_cpumask[]'
128 static void alloc_nodes_groups(unsigned int numgrps,
129 cpumask_var_t *node_to_cpumask,
130 const struct cpumask *cpu_mask,
131 const nodemask_t nodemsk,
132 struct cpumask *nmsk,
133 struct node_groups *node_groups)
135 unsigned n, remaining_ncpus = 0;
137 for (n = 0; n < nr_node_ids; n++) {
138 node_groups[n].id = n;
139 node_groups[n].ncpus = UINT_MAX;
142 for_each_node_mask(n, nodemsk) {
145 cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
146 ncpus = cpumask_weight(nmsk);
150 remaining_ncpus += ncpus;
151 node_groups[n].ncpus = ncpus;
154 numgrps = min_t(unsigned, remaining_ncpus, numgrps);
156 sort(node_groups, nr_node_ids, sizeof(node_groups[0]),
157 ncpus_cmp_func, NULL);
160 * Allocate groups for each node according to the ratio of this
161 * node's nr_cpus to remaining un-assigned ncpus. 'numgrps' is
162 * bigger than number of active numa nodes. Always start the
163 * allocation from the node with minimized nr_cpus.
165 * This way guarantees that each active node gets allocated at
166 * least one group, and the theory is simple: over-allocation
167 * is only done when this node is assigned by one group, so
168 * other nodes will be allocated >= 1 groups, since 'numgrps' is
169 * bigger than number of numa nodes.
171 * One perfect invariant is that number of allocated groups for
172 * each node is <= CPU count of this node:
174 * 1) suppose there are two nodes: A and B
175 * ncpu(X) is CPU count of node X
176 * grps(X) is the group count allocated to node X via this
180 * ncpu(A) + ncpu(B) = N
181 * grps(A) + grps(B) = G
183 * grps(A) = max(1, round_down(G * ncpu(A) / N))
184 * grps(B) = G - grps(A)
186 * both N and G are integer, and 2 <= G <= N, suppose
187 * G = N - delta, and 0 <= delta <= N - 2
189 * 2) obviously grps(A) <= ncpu(A) because:
191 * if grps(A) is 1, then grps(A) <= ncpu(A) given
195 * grps(A) <= G * ncpu(A) / N <= ncpu(A), given G <= N
197 * 3) prove how grps(B) <= ncpu(B):
199 * if round_down(G * ncpu(A) / N) == 0, vecs(B) won't be
200 * over-allocated, so grps(B) <= ncpu(B),
205 * round_down(G * ncpu(A) / N) =
206 * round_down((N - delta) * ncpu(A) / N) =
207 * round_down((N * ncpu(A) - delta * ncpu(A)) / N) >=
208 * round_down((N * ncpu(A) - delta * N) / N) =
213 * grps(A) - G >= ncpu(A) - delta - G
215 * G - grps(A) <= G + delta - ncpu(A)
217 * grps(B) <= N - ncpu(A)
221 * For nodes >= 3, it can be thought as one node and another big
222 * node given that is exactly what this algorithm is implemented,
223 * and we always re-calculate 'remaining_ncpus' & 'numgrps', and
224 * finally for each node X: grps(X) <= ncpu(X).
227 for (n = 0; n < nr_node_ids; n++) {
228 unsigned ngroups, ncpus;
230 if (node_groups[n].ncpus == UINT_MAX)
233 WARN_ON_ONCE(numgrps == 0);
235 ncpus = node_groups[n].ncpus;
236 ngroups = max_t(unsigned, 1,
237 numgrps * ncpus / remaining_ncpus);
238 WARN_ON_ONCE(ngroups > ncpus);
240 node_groups[n].ngroups = ngroups;
242 remaining_ncpus -= ncpus;
247 static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
248 cpumask_var_t *node_to_cpumask,
249 const struct cpumask *cpu_mask,
250 struct cpumask *nmsk, struct cpumask *masks)
252 unsigned int i, n, nodes, cpus_per_grp, extra_grps, done = 0;
253 unsigned int last_grp = numgrps;
254 unsigned int curgrp = startgrp;
255 nodemask_t nodemsk = NODE_MASK_NONE;
256 struct node_groups *node_groups;
258 if (cpumask_empty(cpu_mask))
261 nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk);
264 * If the number of nodes in the mask is greater than or equal the
265 * number of groups we just spread the groups across the nodes.
267 if (numgrps <= nodes) {
268 for_each_node_mask(n, nodemsk) {
269 /* Ensure that only CPUs which are in both masks are set */
270 cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
271 cpumask_or(&masks[curgrp], &masks[curgrp], nmsk);
272 if (++curgrp == last_grp)
278 node_groups = kcalloc(nr_node_ids,
279 sizeof(struct node_groups),
284 /* allocate group number for each node */
285 alloc_nodes_groups(numgrps, node_to_cpumask, cpu_mask,
286 nodemsk, nmsk, node_groups);
287 for (i = 0; i < nr_node_ids; i++) {
288 unsigned int ncpus, v;
289 struct node_groups *nv = &node_groups[i];
291 if (nv->ngroups == UINT_MAX)
294 /* Get the cpus on this node which are in the mask */
295 cpumask_and(nmsk, cpu_mask, node_to_cpumask[nv->id]);
296 ncpus = cpumask_weight(nmsk);
300 WARN_ON_ONCE(nv->ngroups > ncpus);
302 /* Account for rounding errors */
303 extra_grps = ncpus - nv->ngroups * (ncpus / nv->ngroups);
305 /* Spread allocated groups on CPUs of the current node */
306 for (v = 0; v < nv->ngroups; v++, curgrp++) {
307 cpus_per_grp = ncpus / nv->ngroups;
309 /* Account for extra groups to compensate rounding errors */
316 * wrapping has to be considered given 'startgrp'
319 if (curgrp >= last_grp)
321 grp_spread_init_one(&masks[curgrp], nmsk,
331 * build affinity in two stages for each group, and try to put close CPUs
332 * in viewpoint of CPU and NUMA locality into same group, and we run
333 * two-stage grouping:
335 * 1) allocate present CPUs on these groups evenly first
336 * 2) allocate other possible CPUs on these groups evenly
338 static struct cpumask *group_cpus_evenly(unsigned int numgrps)
340 unsigned int curgrp = 0, nr_present = 0, nr_others = 0;
341 cpumask_var_t *node_to_cpumask;
342 cpumask_var_t nmsk, npresmsk;
344 struct cpumask *masks = NULL;
346 if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
349 if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
352 node_to_cpumask = alloc_node_to_cpumask();
353 if (!node_to_cpumask)
356 masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
358 goto fail_node_to_cpumask;
360 /* Stabilize the cpumasks */
362 build_node_to_cpumask(node_to_cpumask);
364 /* grouping present CPUs first */
365 ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
366 cpu_present_mask, nmsk, masks);
368 goto fail_build_affinity;
372 * Allocate non present CPUs starting from the next group to be
373 * handled. If the grouping of present CPUs already exhausted the
374 * group space, assign the non present CPUs to the already
375 * allocated out groups.
377 if (nr_present >= numgrps)
381 cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
382 ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
383 npresmsk, nmsk, masks);
391 WARN_ON(nr_present + nr_others < numgrps);
393 fail_node_to_cpumask:
394 free_node_to_cpumask(node_to_cpumask);
397 free_cpumask_var(npresmsk);
400 free_cpumask_var(nmsk);
408 static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
411 affd->set_size[0] = affvecs;
415 * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
416 * @nvecs: The total number of vectors
417 * @affd: Description of the affinity requirements
419 * Returns the irq_affinity_desc pointer or NULL if allocation failed.
421 struct irq_affinity_desc *
422 irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
424 unsigned int affvecs, curvec, usedvecs, i;
425 struct irq_affinity_desc *masks = NULL;
428 * Determine the number of vectors which need interrupt affinities
429 * assigned. If the pre/post request exhausts the available vectors
430 * then nothing to do here except for invoking the calc_sets()
431 * callback so the device driver can adjust to the situation.
433 if (nvecs > affd->pre_vectors + affd->post_vectors)
434 affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
439 * Simple invocations do not provide a calc_sets() callback. Install
442 if (!affd->calc_sets)
443 affd->calc_sets = default_calc_sets;
445 /* Recalculate the sets */
446 affd->calc_sets(affd, affvecs);
448 if (WARN_ON_ONCE(affd->nr_sets > IRQ_AFFINITY_MAX_SETS))
451 /* Nothing to assign? */
455 masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
459 /* Fill out vectors at the beginning that don't need affinity */
460 for (curvec = 0; curvec < affd->pre_vectors; curvec++)
461 cpumask_copy(&masks[curvec].mask, irq_default_affinity);
464 * Spread on present CPUs starting from affd->pre_vectors. If we
465 * have multiple sets, build each sets affinity mask separately.
467 for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
468 unsigned int this_vecs = affd->set_size[i];
470 struct cpumask *result = group_cpus_evenly(this_vecs);
477 for (j = 0; j < this_vecs; j++)
478 cpumask_copy(&masks[curvec + j].mask, &result[j]);
482 usedvecs += this_vecs;
485 /* Fill out vectors at the end that don't need affinity */
486 if (usedvecs >= affvecs)
487 curvec = affd->pre_vectors + affvecs;
489 curvec = affd->pre_vectors + usedvecs;
490 for (; curvec < nvecs; curvec++)
491 cpumask_copy(&masks[curvec].mask, irq_default_affinity);
493 /* Mark the managed interrupts */
494 for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++)
495 masks[i].is_managed = 1;
501 * irq_calc_affinity_vectors - Calculate the optimal number of vectors
502 * @minvec: The minimum number of vectors available
503 * @maxvec: The maximum number of vectors available
504 * @affd: Description of the affinity requirements
506 unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
507 const struct irq_affinity *affd)
509 unsigned int resv = affd->pre_vectors + affd->post_vectors;
510 unsigned int set_vecs;
515 if (affd->calc_sets) {
516 set_vecs = maxvec - resv;
519 set_vecs = cpumask_weight(cpu_possible_mask);
523 return resv + min(set_vecs, maxvec - resv);