x86/cpu: Improve readability of per-CPU cpumask initialization code
authorIngo Molnar <mingo@kernel.org>
Wed, 10 Apr 2024 04:59:47 +0000 (06:59 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 10 Apr 2024 05:02:33 +0000 (07:02 +0200)
In smp_prepare_cpus_common() and x2apic_prepare_cpu():

 - use 'cpu' instead of 'i'
 - use 'node' instead of 'n'
 - use vertical alignment to improve readability
 - better structure basic blocks
 - reduce col80 checkpatch damage

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: linux-kernel@vger.kernel.org
arch/x86/kernel/apic/x2apic_cluster.c
arch/x86/kernel/smpboot.c

index afbb885ce2904eb6745573ebdb5164749abe5f5b..7db83212effb0f36689bdb421ea8a102763a6f91 100644 (file)
@@ -178,14 +178,16 @@ static int x2apic_prepare_cpu(unsigned int cpu)
        u32 phys_apicid = apic->cpu_present_to_apicid(cpu);
        u32 cluster = apic_cluster(phys_apicid);
        u32 logical_apicid = (cluster << 16) | (1 << (phys_apicid & 0xf));
+       int node = cpu_to_node(cpu);
 
        x86_cpu_to_logical_apicid[cpu] = logical_apicid;
 
-       if (alloc_clustermask(cpu, cluster, cpu_to_node(cpu)) < 0)
+       if (alloc_clustermask(cpu, cluster, node) < 0)
                return -ENOMEM;
-       if (!zalloc_cpumask_var_node(&per_cpu(ipi_mask, cpu), GFP_KERNEL,
-                               cpu_to_node(cpu)))
+
+       if (!zalloc_cpumask_var_node(&per_cpu(ipi_mask, cpu), GFP_KERNEL, node))
                return -ENOMEM;
+
        return 0;
 }
 
index 536dad1440369d9e49790f331f23a931d43f8692..a58109583c476515a96156077b7181a3a20153d9 100644 (file)
@@ -1033,21 +1033,22 @@ static __init void disable_smp(void)
 
 void __init smp_prepare_cpus_common(void)
 {
-       unsigned int i, n;
+       unsigned int cpu, node;
 
        /* Mark all except the boot CPU as hotpluggable */
-       for_each_possible_cpu(i) {
-               if (i)
-                       per_cpu(cpu_info.cpu_index, i) = nr_cpu_ids;
+       for_each_possible_cpu(cpu) {
+               if (cpu)
+                       per_cpu(cpu_info.cpu_index, cpu) = nr_cpu_ids;
        }
 
-       for_each_possible_cpu(i) {
-               n = cpu_to_node(i);
-               zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, i), GFP_KERNEL, n);
-               zalloc_cpumask_var_node(&per_cpu(cpu_core_map, i), GFP_KERNEL, n);
-               zalloc_cpumask_var_node(&per_cpu(cpu_die_map, i), GFP_KERNEL, n);
-               zalloc_cpumask_var_node(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL, n);
-               zalloc_cpumask_var_node(&per_cpu(cpu_l2c_shared_map, i), GFP_KERNEL, n);
+       for_each_possible_cpu(cpu) {
+               node = cpu_to_node(cpu);
+
+               zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map,    cpu), GFP_KERNEL, node);
+               zalloc_cpumask_var_node(&per_cpu(cpu_core_map,       cpu), GFP_KERNEL, node);
+               zalloc_cpumask_var_node(&per_cpu(cpu_die_map,        cpu), GFP_KERNEL, node);
+               zalloc_cpumask_var_node(&per_cpu(cpu_llc_shared_map, cpu), GFP_KERNEL, node);
+               zalloc_cpumask_var_node(&per_cpu(cpu_l2c_shared_map, cpu), GFP_KERNEL, node);
        }
 
        set_cpu_sibling_map(0);