arm64: spectre: Rename ARM64_HARDEN_EL2_VECTORS to ARM64_SPECTRE_V3A
authorWill Deacon <will@kernel.org>
Fri, 13 Nov 2020 11:38:45 +0000 (11:38 +0000)
committerMarc Zyngier <maz@kernel.org>
Mon, 16 Nov 2020 10:43:06 +0000 (10:43 +0000)
Since ARM64_HARDEN_EL2_VECTORS is really a mitigation for Spectre-v3a,
rename it accordingly for consistency with the v2 and v4 mitigation.

Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Quentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20201113113847.21619-9-will@kernel.org
Documentation/arm64/memory.rst
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/spectre.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/proton-pack.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/hyp-entry.S
arch/arm64/kvm/va_layout.c

index cf03b3290800c25ea7c5d8cba093b0189e3d727a..75df7fb30a7ba8db38ff160c4786a64fff5fd833 100644 (file)
@@ -100,7 +100,7 @@ hypervisor maps kernel pages in EL2 at a fixed (and potentially
 random) offset from the linear mapping. See the kern_hyp_va macro and
 kvm_update_va_mask function for more details. MMIO devices such as
 GICv2 gets mapped next to the HYP idmap page, as do vectors when
-ARM64_HARDEN_EL2_VECTORS is selected for particular CPUs.
+ARM64_SPECTRE_V3A is enabled for particular CPUs.
 
 When using KVM with the Virtualization Host Extensions, no additional
 mappings are created, since the host kernel runs directly in EL2.
index e7d98997c09c3058dd0656af26528ebd220cd2bc..162539d4c8cd0e5aac951ecbc5b65ba00f1ef644 100644 (file)
@@ -21,7 +21,7 @@
 #define ARM64_HAS_VIRT_HOST_EXTN               11
 #define ARM64_WORKAROUND_CAVIUM_27456          12
 #define ARM64_HAS_32BIT_EL0                    13
-#define ARM64_HARDEN_EL2_VECTORS               14
+#define ARM64_SPECTRE_V3A                      14
 #define ARM64_HAS_CNP                          15
 #define ARM64_HAS_NO_FPSIMD                    16
 #define ARM64_WORKAROUND_REPEAT_TLBI           17
index fa86b8f655b7b856db53f4ab62416aba01087327..b4df683ed8009143e803623808ff488e2bedb55c 100644 (file)
@@ -83,7 +83,7 @@ enum mitigation_state arm64_get_spectre_v2_state(void);
 bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope);
 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
 
-void cpu_el2_vector_harden_enable(const struct arm64_cpu_capabilities *__unused);
+void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
 
 enum mitigation_state arm64_get_spectre_v4_state(void);
 bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope);
index 7a040abaedeacc81c235ca10e5016095c9c60492..949d5615a47eb1c2030afa5bd5aa91a8f6e66ba8 100644 (file)
@@ -460,10 +460,10 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 #ifdef CONFIG_RANDOMIZE_BASE
        {
        /* Must come after the Spectre-v2 entry */
-               .desc = "EL2 vector hardening",
-               .capability = ARM64_HARDEN_EL2_VECTORS,
+               .desc = "Spectre-v3a",
+               .capability = ARM64_SPECTRE_V3A,
                ERRATA_MIDR_RANGE_LIST(ca57_a72),
-               .cpu_enable = cpu_el2_vector_harden_enable,
+               .cpu_enable = spectre_v3a_enable_mitigation,
        },
 #endif
        {
index a4ba941297509789b3e79f8c9ec79e33b2bb9da5..cf9f8b885aea289e1b315ace30784aad6192677b 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Handle detection, reporting and mitigation of Spectre v1, v2 and v4, as
+ * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as
  * detailed at:
  *
  *   https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
@@ -270,11 +270,18 @@ void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
        update_mitigation_state(&spectre_v2_state, state);
 }
 
-void cpu_el2_vector_harden_enable(const struct arm64_cpu_capabilities *__unused)
+/*
+ * Spectre-v3a.
+ *
+ * Phew, there's not an awful lot to do here! We just instruct EL2 to use
+ * an indirect trampoline for the hyp vectors so that guests can't read
+ * VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
+ */
+void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
 {
        struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
 
-       if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS))
+       if (this_cpu_has_cap(ARM64_SPECTRE_V3A))
                data->slot += HYP_VECTOR_INDIRECT;
 }
 
index 5e6fe5eef3ec4edd8bb628367c7cf3ae86a3363d..4cc29300f53376512ce9d86796e0ec640c6ad887 100644 (file)
@@ -1314,7 +1314,7 @@ static int kvm_init_vector_slots(void)
        base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
        kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
 
-       if (!cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS))
+       if (!cpus_have_const_cap(ARM64_SPECTRE_V3A))
                return 0;
 
        if (!has_vhe()) {
@@ -1388,15 +1388,15 @@ static void cpu_hyp_reset(void)
  *   placed in one of the vector slots, which is executed before jumping
  *   to the real vectors.
  *
- * - If the CPU also has the ARM64_HARDEN_EL2_VECTORS cap, the slot
+ * - If the CPU also has the ARM64_SPECTRE_V3A cap, the slot
  *   containing the hardening sequence is mapped next to the idmap page,
  *   and executed before jumping to the real vectors.
  *
- * - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
+ * - If the CPU only has the ARM64_SPECTRE_V3A cap, then an
  *   empty slot is selected, mapped next to the idmap page, and
  *   executed before jumping to the real vectors.
  *
- * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
+ * Note that ARM64_SPECTRE_V3A is somewhat incompatible with
  * VHE, as we don't have hypervisor-specific mappings. If the system
  * is VHE and yet selects this capability, it will be ignored.
  */
index d0a3660c72568f6dc644a1cf4b5a742e3af09856..e3249e2dda09e8da239a082acae7d7764c4767e8 100644 (file)
@@ -209,8 +209,7 @@ SYM_CODE_END(__kvm_hyp_vector)
        .if \indirect != 0
        alternative_cb  kvm_patch_vector_branch
        /*
-        * For ARM64_HARDEN_EL2_VECTORS configurations, these NOPs get replaced
-        * with:
+        * For ARM64_SPECTRE_V3A configurations, these NOPs get replaced with:
         *
         * movz x0, #(addr & 0xffff)
         * movk x0, #((addr >> 16) & 0xffff), lsl #16
index cc8e8756600fcf2f6b32a0609c43271310b74b47..02362fa27be469f48eb43a94d849bd52a2399f9a 100644 (file)
@@ -139,10 +139,8 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
 
        BUG_ON(nr_inst != 4);
 
-       if (!cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS) ||
-           WARN_ON_ONCE(has_vhe())) {
+       if (!cpus_have_const_cap(ARM64_SPECTRE_V3A) || WARN_ON_ONCE(has_vhe()))
                return;
-       }
 
        /*
         * Compute HYP VA by using the same computation as kern_hyp_va()