genirq/affinity: Don't pass irq_affinity_desc array to irq_build_affinity_masks
authorMing Lei <ming.lei@redhat.com>
Tue, 27 Dec 2022 02:29:02 +0000 (10:29 +0800)
committerThomas Gleixner <tglx@linutronix.de>
Tue, 17 Jan 2023 17:50:06 +0000 (18:50 +0100)
Prepare for abstracting irq_build_affinity_masks() into a public function
for assigning all CPUs evenly into several groups.

Don't pass irq_affinity_desc array to irq_build_affinity_masks, instead
return a cpumask array by storing each assigned group into one element of
the array.

This allows to provide a generic interface for grouping all CPUs evenly
from a NUMA and CPU locality viewpoint, and the cost is one extra allocation
in irq_build_affinity_masks(), which should be fine since it is done via
GFP_KERNEL and irq_build_affinity_masks() is a slow path anyway.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: John Garry <john.g.garry@oracle.com>
Reviewed-by: Jens Axboe <axboe@kernel.dk>
Link: https://lore.kernel.org/r/20221227022905.352674-4-ming.lei@redhat.com
kernel/irq/affinity.c

index da6379cd27fd4fe6dc5838ab91cb624c457d84a6..00bba1020ecb214a8cba0743a7bcc4daf8a9190e 100644 (file)
@@ -249,7 +249,7 @@ static int __irq_build_affinity_masks(unsigned int startvec,
                                      cpumask_var_t *node_to_cpumask,
                                      const struct cpumask *cpu_mask,
                                      struct cpumask *nmsk,
-                                     struct irq_affinity_desc *masks)
+                                     struct cpumask *masks)
 {
        unsigned int i, n, nodes, cpus_per_vec, extra_vecs, done = 0;
        unsigned int last_affv = numvecs;
@@ -270,7 +270,7 @@ static int __irq_build_affinity_masks(unsigned int startvec,
                for_each_node_mask(n, nodemsk) {
                        /* Ensure that only CPUs which are in both masks are set */
                        cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
-                       cpumask_or(&masks[curvec].mask, &masks[curvec].mask, nmsk);
+                       cpumask_or(&masks[curvec], &masks[curvec], nmsk);
                        if (++curvec == last_affv)
                                curvec = 0;
                }
@@ -321,7 +321,7 @@ static int __irq_build_affinity_masks(unsigned int startvec,
                         */
                        if (curvec >= last_affv)
                                curvec = 0;
-                       irq_spread_init_one(&masks[curvec].mask, nmsk,
+                       irq_spread_init_one(&masks[curvec], nmsk,
                                                cpus_per_vec);
                }
                done += nv->nvectors;
@@ -335,16 +335,16 @@ static int __irq_build_affinity_masks(unsigned int startvec,
  *     1) spread present CPU on these vectors
  *     2) spread other possible CPUs on these vectors
  */
-static int irq_build_affinity_masks(unsigned int numvecs,
-                                   struct irq_affinity_desc *masks)
+static struct cpumask *irq_build_affinity_masks(unsigned int numvecs)
 {
        unsigned int curvec = 0, nr_present = 0, nr_others = 0;
        cpumask_var_t *node_to_cpumask;
        cpumask_var_t nmsk, npresmsk;
        int ret = -ENOMEM;
+       struct cpumask *masks = NULL;
 
        if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
-               return ret;
+               return NULL;
 
        if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
                goto fail_nmsk;
@@ -353,6 +353,10 @@ static int irq_build_affinity_masks(unsigned int numvecs,
        if (!node_to_cpumask)
                goto fail_npresmsk;
 
+       masks = kcalloc(numvecs, sizeof(*masks), GFP_KERNEL);
+       if (!masks)
+               goto fail_node_to_cpumask;
+
        /* Stabilize the cpumasks */
        cpus_read_lock();
        build_node_to_cpumask(node_to_cpumask);
@@ -386,6 +390,7 @@ static int irq_build_affinity_masks(unsigned int numvecs,
        if (ret >= 0)
                WARN_ON(nr_present + nr_others < numvecs);
 
+ fail_node_to_cpumask:
        free_node_to_cpumask(node_to_cpumask);
 
  fail_npresmsk:
@@ -393,7 +398,11 @@ static int irq_build_affinity_masks(unsigned int numvecs,
 
  fail_nmsk:
        free_cpumask_var(nmsk);
-       return ret < 0 ? ret : 0;
+       if (ret < 0) {
+               kfree(masks);
+               return NULL;
+       }
+       return masks;
 }
 
 static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
@@ -457,13 +466,18 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
         */
        for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
                unsigned int this_vecs = affd->set_size[i];
-               int ret;
+               int j;
+               struct cpumask *result = irq_build_affinity_masks(this_vecs);
 
-               ret = irq_build_affinity_masks(this_vecs, &masks[curvec]);
-               if (ret) {
+               if (!result) {
                        kfree(masks);
                        return NULL;
                }
+
+               for (j = 0; j < this_vecs; j++)
+                       cpumask_copy(&masks[curvec + j].mask, &result[j]);
+               kfree(result);
+
                curvec += this_vecs;
                usedvecs += this_vecs;
        }