arm64: Cleanup system cpucap handling
authorMark Rutland <mark.rutland@arm.com>
Tue, 12 Dec 2023 17:09:09 +0000 (17:09 +0000)
committerWill Deacon <will@kernel.org>
Wed, 13 Dec 2023 16:02:01 +0000 (16:02 +0000)
Recent changes to remove cpus_have_const_cap() introduced new users of
cpus_have_cap() in the period between detecting system cpucaps and
patching alternatives. It would be preferable to defer these until after
the relevant cpucaps have been patched so that these can use the usual
feature check helper functions, which is clearer and has less risk of
accidental usage of code relying upon an alternative which has not yet
been patched.

This patch reworks the system-wide cpucap detection and patching to
minimize this transient period:

* The detection, enablement, and patching of system cpucaps is moved
  into a new setup_system_capabilities() function so that these can be
  grouped together more clearly, with no other functions called in the
  period between detection and patching. This is called from
  setup_system_features() before the subsequent checks that depend on
  the cpucaps.

  The logging of TTBR0 PAN and cpucaps with a mask is also moved here to
  keep these as close as possible to update_cpu_capabilities().

  At the same time, comments are corrected and improved to make the
  intent clearer.

* As hyp_mode_check() only tests system register values (not hwcaps) and
  must be called prior to patching, the call to hyp_mode_check() is
  moved before the call to setup_system_features().

* In setup_system_features(), the use of system_uses_ttbr0_pan() is
  restored, now that this occurs after alternatives are patched. This is
  a partial revert of commit:

    53d62e995d9eaed1 ("arm64: Avoid cpus_have_const_cap() for ARM64_HAS_PAN")

* In sve_setup() and sme_setup(), the use of system_supports_sve() and
  system_supports_sme() respectively are restored, now that these occur
  after alternatives are patched. This is a partial revert of commit:

    a76521d160284a1e ("arm64: Avoid cpus_have_const_cap() for ARM64_{SVE,SME,SME2,FA64}")

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20231212170910.3745497-2-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/smp.c

index b335da126e86a966c11193ef97442da1a2f0ab8a..5f1320c38aa2509d519aa285c9fbd6bc8c062df5 100644 (file)
@@ -3318,23 +3318,40 @@ unsigned long cpu_get_elf_hwcap2(void)
        return elf_hwcap[1];
 }
 
-void __init setup_system_features(void)
+static void __init setup_system_capabilities(void)
 {
-       int i;
        /*
-        * The system-wide safe feature feature register values have been
-        * finalized. Finalize and log the available system capabilities.
+        * The system-wide safe feature register values have been finalized.
+        * Detect, enable, and patch alternatives for the available system
+        * cpucaps.
         */
        update_cpu_capabilities(SCOPE_SYSTEM);
-       if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
-           !cpus_have_cap(ARM64_HAS_PAN))
-               pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
+       enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
+       apply_alternatives_all();
 
        /*
-        * Enable all the available capabilities which have not been enabled
-        * already.
+        * Log any cpucaps with a cpumask as these aren't logged by
+        * update_cpu_capabilities().
         */
-       enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
+       for (int i = 0; i < ARM64_NCAPS; i++) {
+               const struct arm64_cpu_capabilities *caps = cpucap_ptrs[i];
+
+               if (caps && caps->cpus && caps->desc &&
+                       cpumask_any(caps->cpus) < nr_cpu_ids)
+                       pr_info("detected: %s on CPU%*pbl\n",
+                               caps->desc, cpumask_pr_args(caps->cpus));
+       }
+
+       /*
+        * TTBR0 PAN doesn't have its own cpucap, so log it manually.
+        */
+       if (system_uses_ttbr0_pan())
+               pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
+}
+
+void __init setup_system_features(void)
+{
+       setup_system_capabilities();
 
        kpti_install_ng_mappings();
 
@@ -3347,15 +3364,6 @@ void __init setup_system_features(void)
        if (!cache_type_cwg())
                pr_warn("No Cache Writeback Granule information, assuming %d\n",
                        ARCH_DMA_MINALIGN);
-
-       for (i = 0; i < ARM64_NCAPS; i++) {
-               const struct arm64_cpu_capabilities *caps = cpucap_ptrs[i];
-
-               if (caps && caps->cpus && caps->desc &&
-                       cpumask_any(caps->cpus) < nr_cpu_ids)
-                       pr_info("detected: %s on CPU%*pbl\n",
-                               caps->desc, cpumask_pr_args(caps->cpus));
-       }
 }
 
 void __init setup_user_features(void)
index 1559c706d32d1dd0a69fa74457cc5b34a4937c99..bc9384517db3d840f211eeada7a7d6d82fb631f8 100644 (file)
@@ -1171,7 +1171,7 @@ void __init sve_setup(void)
        unsigned long b;
        int max_bit;
 
-       if (!cpus_have_cap(ARM64_SVE))
+       if (!system_supports_sve())
                return;
 
        /*
@@ -1301,7 +1301,7 @@ void __init sme_setup(void)
        struct vl_info *info = &vl_info[ARM64_VEC_SME];
        int min_bit, max_bit;
 
-       if (!cpus_have_cap(ARM64_SME))
+       if (!system_supports_sme())
                return;
 
        /*
index defbab84e9e5c7968cabe030e28ef261c9507fb7..85384dc9a89d6a1f98135ada31d7aae3d95bdbf4 100644 (file)
@@ -439,9 +439,8 @@ static void __init hyp_mode_check(void)
 void __init smp_cpus_done(unsigned int max_cpus)
 {
        pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
-       setup_system_features();
        hyp_mode_check();
-       apply_alternatives_all();
+       setup_system_features();
        setup_user_features();
        mark_linear_text_alias_ro();
 }