Drivers: hv: vmbus: Improve the CPU affiliation for channels
authorK. Y. Srinivasan <kys@microsoft.com>
Wed, 5 Aug 2015 07:52:38 +0000 (00:52 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 5 Aug 2015 18:41:31 +0000 (11:41 -0700)
The current code tracks the assigned CPUs within a NUMA node in the context of
the primary channel. So, if we have a VM with a single NUMA node with 8 VCPUs, we may
end up unevenly distributing the channel load. Fix the issue by tracking affiliations
globally.

Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/hv/channel_mgmt.c
drivers/hv/hv.c
drivers/hv/hyperv_vmbus.h
include/linux/hyperv.h

index 30613dfa38b31047622ac250e64d67e45ce2d0eb..39c5afc7970c1f525b077b8965508aa7e512f9b0 100644 (file)
@@ -392,6 +392,7 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
        struct vmbus_channel *primary = channel->primary_channel;
        int next_node;
        struct cpumask available_mask;
+       struct cpumask *alloced_mask;
 
        for (i = IDE; i < MAX_PERF_CHN; i++) {
                if (!memcmp(type_guid->b, hp_devs[i].guid,
@@ -409,7 +410,6 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
                 * channel, bind it to cpu 0.
                 */
                channel->numa_node = 0;
-               cpumask_set_cpu(0, &channel->alloced_cpus_in_node);
                channel->target_cpu = 0;
                channel->target_vp = hv_context.vp_index[0];
                return;
@@ -434,21 +434,22 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
                channel->numa_node = next_node;
                primary = channel;
        }
+       alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
 
-       if (cpumask_weight(&primary->alloced_cpus_in_node) ==
+       if (cpumask_weight(alloced_mask) ==
            cpumask_weight(cpumask_of_node(primary->numa_node))) {
                /*
                 * We have cycled through all the CPUs in the node;
                 * reset the alloced map.
                 */
-               cpumask_clear(&primary->alloced_cpus_in_node);
+               cpumask_clear(alloced_mask);
        }
 
-       cpumask_xor(&available_mask, &primary->alloced_cpus_in_node,
+       cpumask_xor(&available_mask, alloced_mask,
                    cpumask_of_node(primary->numa_node));
 
        cur_cpu = cpumask_next(-1, &available_mask);
-       cpumask_set_cpu(cur_cpu, &primary->alloced_cpus_in_node);
+       cpumask_set_cpu(cur_cpu, alloced_mask);
 
        channel->target_cpu = cur_cpu;
        channel->target_vp = hv_context.vp_index[cur_cpu];
index 41d8072d61d9d845e7245d43542bfa7d98a94514..fd93cfde96d0a9637fd180be38ede0f60dca3119 100644 (file)
@@ -332,6 +332,13 @@ int hv_synic_alloc(void)
        size_t ced_size = sizeof(struct clock_event_device);
        int cpu;
 
+       hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids,
+                                        GFP_ATOMIC);
+       if (hv_context.hv_numa_map == NULL) {
+               pr_err("Unable to allocate NUMA map\n");
+               goto err;
+       }
+
        for_each_online_cpu(cpu) {
                hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
                if (hv_context.event_dpc[cpu] == NULL) {
@@ -345,6 +352,7 @@ int hv_synic_alloc(void)
                        pr_err("Unable to allocate clock event device\n");
                        goto err;
                }
+
                hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu);
 
                hv_context.synic_message_page[cpu] =
@@ -393,6 +401,7 @@ void hv_synic_free(void)
 {
        int cpu;
 
+       kfree(hv_context.hv_numa_map);
        for_each_online_cpu(cpu)
                hv_synic_free_cpu(cpu);
 }
index 6383707016574bfdfed991eb6ef4c347284cd829..6f258255ac94116da8d6ecb72196d1f7d18ef64a 100644 (file)
@@ -551,6 +551,11 @@ struct hv_context {
         * Support PV clockevent device.
         */
        struct clock_event_device *clk_evt[NR_CPUS];
+       /*
+        * To manage allocations in a NUMA node.
+        * Array indexed by numa node ID.
+        */
+       struct cpumask *hv_numa_map;
 };
 
 extern struct hv_context hv_context;
index 54733d5b503e091e01825c8fd195d1173db92ef3..5a3df5a47c8f40a795e06ada1b230654a714bbce 100644 (file)
@@ -699,7 +699,6 @@ struct vmbus_channel {
        /*
         * State to manage the CPU affiliation of channels.
         */
-       struct cpumask alloced_cpus_in_node;
        int numa_node;
        /*
         * Support for sub-channels. For high performance devices,