2 * Contains CPU specific errata definitions
4 * Copyright (C) 2014 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/arm-smccc.h>
20 #include <linux/psci.h>
21 #include <linux/types.h>
22 #include <linux/cpu.h>
24 #include <asm/cputype.h>
25 #include <asm/cpufeature.h>
27 static bool __maybe_unused
28 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
30 const struct arm64_midr_revidr *fix;
31 u32 midr = read_cpuid_id(), revidr;
33 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
34 if (!is_midr_in_range(midr, &entry->midr_range))
37 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
38 revidr = read_cpuid(REVIDR_EL1);
39 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
40 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
46 static bool __maybe_unused
47 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
50 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
51 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
54 static bool __maybe_unused
55 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
59 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
61 model = read_cpuid_id();
62 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
63 MIDR_ARCHITECTURE_MASK;
65 return model == entry->midr_range.model;
69 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
72 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
73 u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
74 u64 ctr_raw, ctr_real;
76 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
79 * We want to make sure that all the CPUs in the system expose
80 * a consistent CTR_EL0 to make sure that applications behaves
81 * correctly with migration.
83 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
85 * 1) It is safe if the system doesn't support IDC, as CPU anyway
86 * reports IDC = 0, consistent with the rest.
88 * 2) If the system has IDC, it is still safe as we trap CTR_EL0
89 * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
91 * So, we need to make sure either the raw CTR_EL0 or the effective
92 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
94 ctr_raw = read_cpuid_cachetype() & mask;
95 ctr_real = read_cpuid_effective_cachetype() & mask;
97 return (ctr_real != sys) && (ctr_raw != sys);
101 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
103 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
105 /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
106 if ((read_cpuid_cachetype() & mask) !=
107 (arm64_ftr_reg_ctrel0.sys_val & mask))
108 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
111 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
113 #include <asm/mmu_context.h>
114 #include <asm/cacheflush.h>
116 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
118 #ifdef CONFIG_KVM_INDIRECT_VECTORS
119 extern char __smccc_workaround_1_smc_start[];
120 extern char __smccc_workaround_1_smc_end[];
122 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
123 const char *hyp_vecs_end)
125 void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
128 for (i = 0; i < SZ_2K; i += 0x80)
129 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
131 __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
134 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
135 const char *hyp_vecs_start,
136 const char *hyp_vecs_end)
138 static DEFINE_RAW_SPINLOCK(bp_lock);
142 * enable_smccc_arch_workaround_1() passes NULL for the hyp_vecs
143 * start/end if we're a guest. Skip the hyp-vectors work.
145 if (!hyp_vecs_start) {
146 __this_cpu_write(bp_hardening_data.fn, fn);
150 raw_spin_lock(&bp_lock);
151 for_each_possible_cpu(cpu) {
152 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
153 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
159 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
160 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
161 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
164 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
165 __this_cpu_write(bp_hardening_data.fn, fn);
166 raw_spin_unlock(&bp_lock);
169 #define __smccc_workaround_1_smc_start NULL
170 #define __smccc_workaround_1_smc_end NULL
172 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
173 const char *hyp_vecs_start,
174 const char *hyp_vecs_end)
176 __this_cpu_write(bp_hardening_data.fn, fn);
178 #endif /* CONFIG_KVM_INDIRECT_VECTORS */
180 #include <uapi/linux/psci.h>
181 #include <linux/arm-smccc.h>
182 #include <linux/psci.h>
184 static void call_smc_arch_workaround_1(void)
186 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
189 static void call_hvc_arch_workaround_1(void)
191 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
194 static void qcom_link_stack_sanitization(void)
198 asm volatile("mov %0, x30 \n"
206 static bool __nospectre_v2;
207 static int __init parse_nospectre_v2(char *str)
209 __nospectre_v2 = true;
212 early_param("nospectre_v2", parse_nospectre_v2);
216 * 0: No workaround required
217 * 1: Workaround installed
219 static int detect_harden_bp_fw(void)
221 bp_hardening_cb_t cb;
222 void *smccc_start, *smccc_end;
223 struct arm_smccc_res res;
224 u32 midr = read_cpuid_id();
226 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
229 switch (psci_ops.conduit) {
230 case PSCI_CONDUIT_HVC:
231 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
232 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
233 switch ((int)res.a0) {
235 /* Firmware says we're just fine */
238 cb = call_hvc_arch_workaround_1;
239 /* This is a guest, no need to patch KVM vectors */
248 case PSCI_CONDUIT_SMC:
249 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
250 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
251 switch ((int)res.a0) {
253 /* Firmware says we're just fine */
256 cb = call_smc_arch_workaround_1;
257 smccc_start = __smccc_workaround_1_smc_start;
258 smccc_end = __smccc_workaround_1_smc_end;
269 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
270 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
271 cb = qcom_link_stack_sanitization;
273 if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
274 install_bp_hardening_cb(cb, smccc_start, smccc_end);
279 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
281 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
282 static bool __ssb_safe = true;
284 static const struct ssbd_options {
288 { "force-on", ARM64_SSBD_FORCE_ENABLE, },
289 { "force-off", ARM64_SSBD_FORCE_DISABLE, },
290 { "kernel", ARM64_SSBD_KERNEL, },
293 static int __init ssbd_cfg(char *buf)
300 for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
301 int len = strlen(ssbd_options[i].str);
303 if (strncmp(buf, ssbd_options[i].str, len))
306 ssbd_state = ssbd_options[i].state;
312 early_param("ssbd", ssbd_cfg);
314 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
315 __le32 *origptr, __le32 *updptr,
320 BUG_ON(nr_inst != 1);
322 switch (psci_ops.conduit) {
323 case PSCI_CONDUIT_HVC:
324 insn = aarch64_insn_get_hvc_value();
326 case PSCI_CONDUIT_SMC:
327 insn = aarch64_insn_get_smc_value();
333 *updptr = cpu_to_le32(insn);
336 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
337 __le32 *origptr, __le32 *updptr,
340 BUG_ON(nr_inst != 1);
342 * Only allow mitigation on EL1 entry/exit and guest
343 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
346 if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
347 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
350 void arm64_set_ssbd_mitigation(bool state)
352 if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
353 pr_info_once("SSBD disabled by kernel configuration\n");
357 if (this_cpu_has_cap(ARM64_SSBS)) {
359 asm volatile(SET_PSTATE_SSBS(0));
361 asm volatile(SET_PSTATE_SSBS(1));
365 switch (psci_ops.conduit) {
366 case PSCI_CONDUIT_HVC:
367 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
370 case PSCI_CONDUIT_SMC:
371 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
380 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
383 struct arm_smccc_res res;
384 bool required = true;
386 bool this_cpu_safe = false;
388 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
390 if (cpu_mitigations_off())
391 ssbd_state = ARM64_SSBD_FORCE_DISABLE;
393 /* delay setting __ssb_safe until we get a firmware response */
394 if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
395 this_cpu_safe = true;
397 if (this_cpu_has_cap(ARM64_SSBS)) {
404 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
405 ssbd_state = ARM64_SSBD_UNKNOWN;
411 switch (psci_ops.conduit) {
412 case PSCI_CONDUIT_HVC:
413 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
414 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
417 case PSCI_CONDUIT_SMC:
418 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
419 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
423 ssbd_state = ARM64_SSBD_UNKNOWN;
432 case SMCCC_RET_NOT_SUPPORTED:
433 ssbd_state = ARM64_SSBD_UNKNOWN;
438 /* machines with mixed mitigation requirements must not return this */
439 case SMCCC_RET_NOT_REQUIRED:
440 pr_info_once("%s mitigation not required\n", entry->desc);
441 ssbd_state = ARM64_SSBD_MITIGATED;
444 case SMCCC_RET_SUCCESS:
449 case 1: /* Mitigation not required on this CPU */
460 switch (ssbd_state) {
461 case ARM64_SSBD_FORCE_DISABLE:
462 arm64_set_ssbd_mitigation(false);
466 case ARM64_SSBD_KERNEL:
468 __this_cpu_write(arm64_ssbd_callback_required, 1);
469 arm64_set_ssbd_mitigation(true);
473 case ARM64_SSBD_FORCE_ENABLE:
474 arm64_set_ssbd_mitigation(true);
484 switch (ssbd_state) {
485 case ARM64_SSBD_FORCE_DISABLE:
486 pr_info_once("%s disabled from command-line\n", entry->desc);
489 case ARM64_SSBD_FORCE_ENABLE:
490 pr_info_once("%s forced from command-line\n", entry->desc);
497 /* known invulnerable cores */
498 static const struct midr_range arm64_ssb_cpus[] = {
499 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
500 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
501 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
505 static void __maybe_unused
506 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
508 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
511 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
512 .matches = is_affected_midr_range, \
513 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
515 #define CAP_MIDR_ALL_VERSIONS(model) \
516 .matches = is_affected_midr_range, \
517 .midr_range = MIDR_ALL_VERSIONS(model)
519 #define MIDR_FIXED(rev, revidr_mask) \
520 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
522 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
523 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
524 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
526 #define CAP_MIDR_RANGE_LIST(list) \
527 .matches = is_affected_midr_range_list, \
528 .midr_range_list = list
530 /* Errata affecting a range of revisions of given model variant */
531 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
532 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
534 /* Errata affecting a single variant/revision of a model */
535 #define ERRATA_MIDR_REV(model, var, rev) \
536 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
538 /* Errata affecting all variants/revisions of a given a model */
539 #define ERRATA_MIDR_ALL_VERSIONS(model) \
540 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
541 CAP_MIDR_ALL_VERSIONS(model)
543 /* Errata affecting a list of midr ranges, with same work around */
544 #define ERRATA_MIDR_RANGE_LIST(midr_list) \
545 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
546 CAP_MIDR_RANGE_LIST(midr_list)
548 /* Track overall mitigation state. We are only mitigated if all cores are ok */
549 static bool __hardenbp_enab = true;
550 static bool __spectrev2_safe = true;
553 * List of CPUs that do not need any Spectre-v2 mitigation at all.
555 static const struct midr_range spectre_v2_safe_list[] = {
556 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
557 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
558 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
563 * Track overall bp hardening for all heterogeneous cores in the machine.
564 * We are only considered "safe" if all booted cores are known safe.
566 static bool __maybe_unused
567 check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
571 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
573 /* If the CPU has CSV2 set, we're safe */
574 if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
575 ID_AA64PFR0_CSV2_SHIFT))
578 /* Alternatively, we have a list of unaffected CPUs */
579 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
582 /* Fallback to firmware detection */
583 need_wa = detect_harden_bp_fw();
587 __spectrev2_safe = false;
589 if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
590 pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
591 __hardenbp_enab = false;
596 if (__nospectre_v2 || cpu_mitigations_off()) {
597 pr_info_once("spectrev2 mitigation disabled by command line option\n");
598 __hardenbp_enab = false;
603 pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
604 __hardenbp_enab = false;
607 return (need_wa > 0);
610 #ifdef CONFIG_HARDEN_EL2_VECTORS
612 static const struct midr_range arm64_harden_el2_vectors[] = {
613 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
614 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
620 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
622 static const struct midr_range arm64_repeat_tlbi_cpus[] = {
623 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
624 MIDR_RANGE(MIDR_QCOM_FALKOR_V1, 0, 0, 0, 0),
626 #ifdef CONFIG_ARM64_ERRATUM_1286807
627 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
634 #ifdef CONFIG_CAVIUM_ERRATUM_27456
635 const struct midr_range cavium_erratum_27456_cpus[] = {
636 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
637 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
638 /* Cavium ThunderX, T81 pass 1.0 */
639 MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
644 #ifdef CONFIG_CAVIUM_ERRATUM_30115
645 static const struct midr_range cavium_erratum_30115_cpus[] = {
646 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
647 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
648 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
649 MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
650 /* Cavium ThunderX, T83 pass 1.0 */
651 MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
656 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
657 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
659 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
662 .midr_range.model = MIDR_QCOM_KRYO,
663 .matches = is_kryo_midr,
669 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
670 static const struct midr_range workaround_clean_cache[] = {
671 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
672 defined(CONFIG_ARM64_ERRATUM_827319) || \
673 defined(CONFIG_ARM64_ERRATUM_824069)
674 /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
675 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
677 #ifdef CONFIG_ARM64_ERRATUM_819472
678 /* Cortex-A53 r0p[01] : ARM errata 819472 */
679 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
685 #ifdef CONFIG_ARM64_ERRATUM_1188873
686 static const struct midr_range erratum_1188873_list[] = {
687 /* Cortex-A76 r0p0 to r2p0 */
688 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
689 /* Neoverse-N1 r0p0 to r2p0 */
690 MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 2, 0),
695 const struct arm64_cpu_capabilities arm64_errata[] = {
696 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
698 .desc = "ARM errata 826319, 827319, 824069, 819472",
699 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
700 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
701 .cpu_enable = cpu_enable_cache_maint_trap,
704 #ifdef CONFIG_ARM64_ERRATUM_832075
706 /* Cortex-A57 r0p0 - r1p2 */
707 .desc = "ARM erratum 832075",
708 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
709 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
714 #ifdef CONFIG_ARM64_ERRATUM_834220
716 /* Cortex-A57 r0p0 - r1p2 */
717 .desc = "ARM erratum 834220",
718 .capability = ARM64_WORKAROUND_834220,
719 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
724 #ifdef CONFIG_ARM64_ERRATUM_843419
726 /* Cortex-A53 r0p[01234] */
727 .desc = "ARM erratum 843419",
728 .capability = ARM64_WORKAROUND_843419,
729 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
730 MIDR_FIXED(0x4, BIT(8)),
733 #ifdef CONFIG_ARM64_ERRATUM_845719
735 /* Cortex-A53 r0p[01234] */
736 .desc = "ARM erratum 845719",
737 .capability = ARM64_WORKAROUND_845719,
738 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
741 #ifdef CONFIG_CAVIUM_ERRATUM_23154
743 /* Cavium ThunderX, pass 1.x */
744 .desc = "Cavium erratum 23154",
745 .capability = ARM64_WORKAROUND_CAVIUM_23154,
746 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
749 #ifdef CONFIG_CAVIUM_ERRATUM_27456
751 .desc = "Cavium erratum 27456",
752 .capability = ARM64_WORKAROUND_CAVIUM_27456,
753 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
756 #ifdef CONFIG_CAVIUM_ERRATUM_30115
758 .desc = "Cavium erratum 30115",
759 .capability = ARM64_WORKAROUND_CAVIUM_30115,
760 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
764 .desc = "Mismatched cache type (CTR_EL0)",
765 .capability = ARM64_MISMATCHED_CACHE_TYPE,
766 .matches = has_mismatched_cache_type,
767 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
768 .cpu_enable = cpu_enable_trap_ctr_access,
770 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
772 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
773 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
774 .matches = cpucap_multi_entry_cap_matches,
775 .match_list = qcom_erratum_1003_list,
778 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
780 .desc = "Qualcomm erratum 1009, ARM erratum 1286807",
781 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
782 ERRATA_MIDR_RANGE_LIST(arm64_repeat_tlbi_cpus),
785 #ifdef CONFIG_ARM64_ERRATUM_858921
787 /* Cortex-A73 all versions */
788 .desc = "ARM erratum 858921",
789 .capability = ARM64_WORKAROUND_858921,
790 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
794 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
795 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
796 .matches = check_branch_predictor,
798 #ifdef CONFIG_HARDEN_EL2_VECTORS
800 .desc = "EL2 vector hardening",
801 .capability = ARM64_HARDEN_EL2_VECTORS,
802 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
806 .desc = "Speculative Store Bypass Disable",
807 .capability = ARM64_SSBD,
808 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
809 .matches = has_ssbd_mitigation,
810 .midr_range_list = arm64_ssb_cpus,
812 #ifdef CONFIG_ARM64_ERRATUM_1188873
814 .desc = "ARM erratum 1188873",
815 .capability = ARM64_WORKAROUND_1188873,
816 ERRATA_MIDR_RANGE_LIST(erratum_1188873_list),
819 #ifdef CONFIG_ARM64_ERRATUM_1165522
821 /* Cortex-A76 r0p0 to r2p0 */
822 .desc = "ARM erratum 1165522",
823 .capability = ARM64_WORKAROUND_1165522,
824 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
831 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
834 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
837 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
840 if (__spectrev2_safe)
841 return sprintf(buf, "Not affected\n");
844 return sprintf(buf, "Mitigation: Branch predictor hardening\n");
846 return sprintf(buf, "Vulnerable\n");
849 ssize_t cpu_show_spec_store_bypass(struct device *dev,
850 struct device_attribute *attr, char *buf)
853 return sprintf(buf, "Not affected\n");
855 switch (ssbd_state) {
856 case ARM64_SSBD_KERNEL:
857 case ARM64_SSBD_FORCE_ENABLE:
858 if (IS_ENABLED(CONFIG_ARM64_SSBD))
860 "Mitigation: Speculative Store Bypass disabled via prctl\n");
863 return sprintf(buf, "Vulnerable\n");