blk-mq: Build default queue map via group_cpus_evenly()
[linux-block.git] / block / blk-mq-cpumap.c
index 9c2fce1a7b50eee5e63cb65b3cfe312b4d21ffdc..0c612c19feb8b1ad9531f4fe984ffa7b6e9b6e6e 100644 (file)
 #include <linux/mm.h>
 #include <linux/smp.h>
 #include <linux/cpu.h>
+#include <linux/group_cpus.h>
 
 #include <linux/blk-mq.h>
 #include "blk.h"
 #include "blk-mq.h"
 
-static int queue_index(struct blk_mq_queue_map *qmap,
-                      unsigned int nr_queues, const int q)
-{
-       return qmap->queue_offset + (q % nr_queues);
-}
-
-static int get_first_sibling(unsigned int cpu)
-{
-       unsigned int ret;
-
-       ret = cpumask_first(topology_sibling_cpumask(cpu));
-       if (ret < nr_cpu_ids)
-               return ret;
-
-       return cpu;
-}
-
 void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
 {
-       unsigned int *map = qmap->mq_map;
-       unsigned int nr_queues = qmap->nr_queues;
-       unsigned int cpu, first_sibling, q = 0;
-
-       for_each_possible_cpu(cpu)
-               map[cpu] = -1;
-
-       /*
-        * Spread queues among present CPUs first for minimizing
-        * count of dead queues which are mapped by all un-present CPUs
-        */
-       for_each_present_cpu(cpu) {
-               if (q >= nr_queues)
-                       break;
-               map[cpu] = queue_index(qmap, nr_queues, q++);
+       const struct cpumask *masks;
+       unsigned int queue, cpu;
+
+       masks = group_cpus_evenly(qmap->nr_queues);
+       if (!masks) {
+               for_each_possible_cpu(cpu)
+                       qmap->mq_map[cpu] = qmap->queue_offset;
+               return;
        }
 
-       for_each_possible_cpu(cpu) {
-               if (map[cpu] != -1)
-                       continue;
-               /*
-                * First do sequential mapping between CPUs and queues.
-                * In case we still have CPUs to map, and we have some number of
-                * threads per cores then map sibling threads to the same queue
-                * for performance optimizations.
-                */
-               if (q < nr_queues) {
-                       map[cpu] = queue_index(qmap, nr_queues, q++);
-               } else {
-                       first_sibling = get_first_sibling(cpu);
-                       if (first_sibling == cpu)
-                               map[cpu] = queue_index(qmap, nr_queues, q++);
-                       else
-                               map[cpu] = map[first_sibling];
-               }
+       for (queue = 0; queue < qmap->nr_queues; queue++) {
+               for_each_cpu(cpu, &masks[queue])
+                       qmap->mq_map[cpu] = qmap->queue_offset + queue;
        }
+       kfree(masks);
 }
 EXPORT_SYMBOL_GPL(blk_mq_map_queues);