Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
e116a375 AP |
2 | /* |
3 | * Contains CPU specific errata definitions | |
4 | * | |
5 | * Copyright (C) 2014 ARM Ltd. | |
e116a375 AP |
6 | */ |
7 | ||
94a5d879 | 8 | #include <linux/arm-smccc.h> |
e116a375 | 9 | #include <linux/types.h> |
a111b7c0 | 10 | #include <linux/cpu.h> |
e116a375 AP |
11 | #include <asm/cpu.h> |
12 | #include <asm/cputype.h> | |
13 | #include <asm/cpufeature.h> | |
4db61fef | 14 | #include <asm/kvm_asm.h> |
93916beb | 15 | #include <asm/smp_plat.h> |
e116a375 | 16 | |
301bcfac | 17 | static bool __maybe_unused |
92406f0c | 18 | is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) |
301bcfac | 19 | { |
e8002e02 AB |
20 | const struct arm64_midr_revidr *fix; |
21 | u32 midr = read_cpuid_id(), revidr; | |
22 | ||
92406f0c | 23 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
1df31050 | 24 | if (!is_midr_in_range(midr, &entry->midr_range)) |
e8002e02 AB |
25 | return false; |
26 | ||
27 | midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; | |
28 | revidr = read_cpuid(REVIDR_EL1); | |
29 | for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++) | |
30 | if (midr == fix->midr_rv && (revidr & fix->revidr_mask)) | |
31 | return false; | |
32 | ||
33 | return true; | |
301bcfac AP |
34 | } |
35 | ||
be5b2998 SP |
36 | static bool __maybe_unused |
37 | is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry, | |
38 | int scope) | |
301bcfac | 39 | { |
92406f0c | 40 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
be5b2998 | 41 | return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list); |
301bcfac AP |
42 | } |
43 | ||
bb487118 SB |
44 | static bool __maybe_unused |
45 | is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) | |
46 | { | |
47 | u32 model; | |
48 | ||
49 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
50 | ||
51 | model = read_cpuid_id(); | |
52 | model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | | |
53 | MIDR_ARCHITECTURE_MASK; | |
54 | ||
1df31050 | 55 | return model == entry->midr_range.model; |
bb487118 SB |
56 | } |
57 | ||
116c81f4 | 58 | static bool |
314d53d2 SP |
59 | has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, |
60 | int scope) | |
116c81f4 | 61 | { |
1602df02 SP |
62 | u64 mask = arm64_ftr_reg_ctrel0.strict_mask; |
63 | u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask; | |
64 | u64 ctr_raw, ctr_real; | |
314d53d2 | 65 | |
116c81f4 | 66 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
1602df02 SP |
67 | |
68 | /* | |
69 | * We want to make sure that all the CPUs in the system expose | |
70 | * a consistent CTR_EL0 to make sure that applications behaves | |
71 | * correctly with migration. | |
72 | * | |
73 | * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 : | |
74 | * | |
75 | * 1) It is safe if the system doesn't support IDC, as CPU anyway | |
76 | * reports IDC = 0, consistent with the rest. | |
77 | * | |
78 | * 2) If the system has IDC, it is still safe as we trap CTR_EL0 | |
79 | * access on this CPU via the ARM64_HAS_CACHE_IDC capability. | |
80 | * | |
81 | * So, we need to make sure either the raw CTR_EL0 or the effective | |
82 | * CTR_EL0 matches the system's copy to allow a secondary CPU to boot. | |
83 | */ | |
84 | ctr_raw = read_cpuid_cachetype() & mask; | |
85 | ctr_real = read_cpuid_effective_cachetype() & mask; | |
86 | ||
87 | return (ctr_real != sys) && (ctr_raw != sys); | |
116c81f4 SP |
88 | } |
89 | ||
c0cda3b8 | 90 | static void |
05460849 | 91 | cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap) |
116c81f4 | 92 | { |
4afe8e79 | 93 | u64 mask = arm64_ftr_reg_ctrel0.strict_mask; |
05460849 | 94 | bool enable_uct_trap = false; |
4afe8e79 SP |
95 | |
96 | /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */ | |
97 | if ((read_cpuid_cachetype() & mask) != | |
98 | (arm64_ftr_reg_ctrel0.sys_val & mask)) | |
05460849 JM |
99 | enable_uct_trap = true; |
100 | ||
101 | /* ... or if the system is affected by an erratum */ | |
102 | if (cap->capability == ARM64_WORKAROUND_1542419) | |
103 | enable_uct_trap = true; | |
104 | ||
105 | if (enable_uct_trap) | |
4afe8e79 | 106 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); |
116c81f4 SP |
107 | } |
108 | ||
4205a89b MZ |
109 | atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); |
110 | ||
0f15adbb WD |
111 | #include <asm/mmu_context.h> |
112 | #include <asm/cacheflush.h> | |
113 | ||
114 | DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); | |
115 | ||
e8b22d0f | 116 | #ifdef CONFIG_KVM_INDIRECT_VECTORS |
0f15adbb WD |
117 | static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, |
118 | const char *hyp_vecs_end) | |
119 | { | |
6e52aab9 | 120 | void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K); |
0f15adbb WD |
121 | int i; |
122 | ||
123 | for (i = 0; i < SZ_2K; i += 0x80) | |
124 | memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); | |
125 | ||
3b8c9f1c | 126 | __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); |
0f15adbb WD |
127 | } |
128 | ||
73f38166 MZ |
129 | static void install_bp_hardening_cb(bp_hardening_cb_t fn, |
130 | const char *hyp_vecs_start, | |
131 | const char *hyp_vecs_end) | |
0f15adbb | 132 | { |
d8797b12 | 133 | static DEFINE_RAW_SPINLOCK(bp_lock); |
0f15adbb WD |
134 | int cpu, slot = -1; |
135 | ||
4debef55 | 136 | /* |
7a292b6c TR |
137 | * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if |
138 | * we're a guest. Skip the hyp-vectors work. | |
4debef55 JM |
139 | */ |
140 | if (!hyp_vecs_start) { | |
141 | __this_cpu_write(bp_hardening_data.fn, fn); | |
142 | return; | |
143 | } | |
144 | ||
d8797b12 | 145 | raw_spin_lock(&bp_lock); |
0f15adbb WD |
146 | for_each_possible_cpu(cpu) { |
147 | if (per_cpu(bp_hardening_data.fn, cpu) == fn) { | |
148 | slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); | |
149 | break; | |
150 | } | |
151 | } | |
152 | ||
153 | if (slot == -1) { | |
4205a89b MZ |
154 | slot = atomic_inc_return(&arm64_el2_vector_last_slot); |
155 | BUG_ON(slot >= BP_HARDEN_EL2_SLOTS); | |
0f15adbb WD |
156 | __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); |
157 | } | |
158 | ||
159 | __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); | |
160 | __this_cpu_write(bp_hardening_data.fn, fn); | |
d8797b12 | 161 | raw_spin_unlock(&bp_lock); |
0f15adbb WD |
162 | } |
163 | #else | |
73f38166 | 164 | static void install_bp_hardening_cb(bp_hardening_cb_t fn, |
0f15adbb WD |
165 | const char *hyp_vecs_start, |
166 | const char *hyp_vecs_end) | |
167 | { | |
168 | __this_cpu_write(bp_hardening_data.fn, fn); | |
169 | } | |
e8b22d0f | 170 | #endif /* CONFIG_KVM_INDIRECT_VECTORS */ |
0f15adbb | 171 | |
b092201e | 172 | #include <linux/arm-smccc.h> |
aa6acde6 | 173 | |
9a25136a | 174 | static void __maybe_unused call_smc_arch_workaround_1(void) |
b092201e MZ |
175 | { |
176 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); | |
177 | } | |
178 | ||
179 | static void call_hvc_arch_workaround_1(void) | |
180 | { | |
181 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); | |
182 | } | |
183 | ||
4bc352ff SD |
184 | static void qcom_link_stack_sanitization(void) |
185 | { | |
186 | u64 tmp; | |
187 | ||
188 | asm volatile("mov %0, x30 \n" | |
189 | ".rept 16 \n" | |
190 | "bl . + 4 \n" | |
191 | ".endr \n" | |
192 | "mov x30, %0 \n" | |
193 | : "=&r" (tmp)); | |
194 | } | |
195 | ||
e5ce5e72 JL |
196 | static bool __nospectre_v2; |
197 | static int __init parse_nospectre_v2(char *str) | |
198 | { | |
199 | __nospectre_v2 = true; | |
200 | return 0; | |
201 | } | |
202 | early_param("nospectre_v2", parse_nospectre_v2); | |
203 | ||
73f38166 MZ |
204 | /* |
205 | * -1: No workaround | |
206 | * 0: No workaround required | |
207 | * 1: Workaround installed | |
208 | */ | |
209 | static int detect_harden_bp_fw(void) | |
b092201e MZ |
210 | { |
211 | bp_hardening_cb_t cb; | |
212 | void *smccc_start, *smccc_end; | |
213 | struct arm_smccc_res res; | |
4bc352ff | 214 | u32 midr = read_cpuid_id(); |
b092201e | 215 | |
ce4d5ca2 SP |
216 | arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
217 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); | |
218 | ||
219 | switch ((int)res.a0) { | |
220 | case 1: | |
221 | /* Firmware says we're just fine */ | |
222 | return 0; | |
223 | case 0: | |
224 | break; | |
225 | default: | |
73f38166 | 226 | return -1; |
ce4d5ca2 | 227 | } |
b092201e | 228 | |
c98bd299 MR |
229 | switch (arm_smccc_1_1_get_conduit()) { |
230 | case SMCCC_CONDUIT_HVC: | |
ce4d5ca2 SP |
231 | cb = call_hvc_arch_workaround_1; |
232 | /* This is a guest, no need to patch KVM vectors */ | |
233 | smccc_start = NULL; | |
234 | smccc_end = NULL; | |
b092201e MZ |
235 | break; |
236 | ||
c98bd299 | 237 | case SMCCC_CONDUIT_SMC: |
ce4d5ca2 | 238 | cb = call_smc_arch_workaround_1; |
b11483ef | 239 | #if IS_ENABLED(CONFIG_KVM) |
4db61fef MB |
240 | smccc_start = __smccc_workaround_1_smc; |
241 | smccc_end = __smccc_workaround_1_smc + | |
242 | __SMCCC_WORKAROUND_1_SMC_SZ; | |
b11483ef MZ |
243 | #else |
244 | smccc_start = NULL; | |
245 | smccc_end = NULL; | |
4db61fef | 246 | #endif |
b11483ef | 247 | break; |
b092201e MZ |
248 | |
249 | default: | |
73f38166 | 250 | return -1; |
b092201e MZ |
251 | } |
252 | ||
4bc352ff SD |
253 | if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || |
254 | ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) | |
255 | cb = qcom_link_stack_sanitization; | |
256 | ||
8c1e3d2b JL |
257 | if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) |
258 | install_bp_hardening_cb(cb, smccc_start, smccc_end); | |
b092201e | 259 | |
73f38166 | 260 | return 1; |
aa6acde6 | 261 | } |
0f15adbb | 262 | |
5cf9ce6e MZ |
263 | DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); |
264 | ||
a43ae4df | 265 | int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; |
526e065d | 266 | static bool __ssb_safe = true; |
a43ae4df MZ |
267 | |
268 | static const struct ssbd_options { | |
269 | const char *str; | |
270 | int state; | |
271 | } ssbd_options[] = { | |
272 | { "force-on", ARM64_SSBD_FORCE_ENABLE, }, | |
273 | { "force-off", ARM64_SSBD_FORCE_DISABLE, }, | |
274 | { "kernel", ARM64_SSBD_KERNEL, }, | |
275 | }; | |
276 | ||
277 | static int __init ssbd_cfg(char *buf) | |
278 | { | |
279 | int i; | |
280 | ||
281 | if (!buf || !buf[0]) | |
282 | return -EINVAL; | |
283 | ||
284 | for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) { | |
285 | int len = strlen(ssbd_options[i].str); | |
286 | ||
287 | if (strncmp(buf, ssbd_options[i].str, len)) | |
288 | continue; | |
289 | ||
290 | ssbd_state = ssbd_options[i].state; | |
291 | return 0; | |
292 | } | |
293 | ||
294 | return -EINVAL; | |
295 | } | |
296 | early_param("ssbd", ssbd_cfg); | |
297 | ||
8e290624 MZ |
298 | void __init arm64_update_smccc_conduit(struct alt_instr *alt, |
299 | __le32 *origptr, __le32 *updptr, | |
300 | int nr_inst) | |
301 | { | |
302 | u32 insn; | |
303 | ||
304 | BUG_ON(nr_inst != 1); | |
305 | ||
c98bd299 MR |
306 | switch (arm_smccc_1_1_get_conduit()) { |
307 | case SMCCC_CONDUIT_HVC: | |
8e290624 MZ |
308 | insn = aarch64_insn_get_hvc_value(); |
309 | break; | |
c98bd299 | 310 | case SMCCC_CONDUIT_SMC: |
8e290624 MZ |
311 | insn = aarch64_insn_get_smc_value(); |
312 | break; | |
313 | default: | |
314 | return; | |
315 | } | |
316 | ||
317 | *updptr = cpu_to_le32(insn); | |
318 | } | |
a725e3dd | 319 | |
986372c4 MZ |
320 | void __init arm64_enable_wa2_handling(struct alt_instr *alt, |
321 | __le32 *origptr, __le32 *updptr, | |
322 | int nr_inst) | |
323 | { | |
324 | BUG_ON(nr_inst != 1); | |
325 | /* | |
326 | * Only allow mitigation on EL1 entry/exit and guest | |
327 | * ARCH_WORKAROUND_2 handling if the SSBD state allows it to | |
328 | * be flipped. | |
329 | */ | |
330 | if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL) | |
331 | *updptr = cpu_to_le32(aarch64_insn_gen_nop()); | |
332 | } | |
333 | ||
647d0519 | 334 | void arm64_set_ssbd_mitigation(bool state) |
a725e3dd | 335 | { |
ce4d5ca2 SP |
336 | int conduit; |
337 | ||
d42281b6 JL |
338 | if (!IS_ENABLED(CONFIG_ARM64_SSBD)) { |
339 | pr_info_once("SSBD disabled by kernel configuration\n"); | |
340 | return; | |
341 | } | |
342 | ||
8f04e8e6 WD |
343 | if (this_cpu_has_cap(ARM64_SSBS)) { |
344 | if (state) | |
345 | asm volatile(SET_PSTATE_SSBS(0)); | |
346 | else | |
347 | asm volatile(SET_PSTATE_SSBS(1)); | |
348 | return; | |
349 | } | |
350 | ||
ce4d5ca2 SP |
351 | conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state, |
352 | NULL); | |
a725e3dd | 353 | |
ce4d5ca2 | 354 | WARN_ON_ONCE(conduit == SMCCC_CONDUIT_NONE); |
a725e3dd MZ |
355 | } |
356 | ||
357 | static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, | |
358 | int scope) | |
359 | { | |
360 | struct arm_smccc_res res; | |
a43ae4df MZ |
361 | bool required = true; |
362 | s32 val; | |
526e065d | 363 | bool this_cpu_safe = false; |
ce4d5ca2 | 364 | int conduit; |
a725e3dd MZ |
365 | |
366 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
367 | ||
a111b7c0 JP |
368 | if (cpu_mitigations_off()) |
369 | ssbd_state = ARM64_SSBD_FORCE_DISABLE; | |
370 | ||
eb337cdf WD |
371 | /* delay setting __ssb_safe until we get a firmware response */ |
372 | if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list)) | |
373 | this_cpu_safe = true; | |
374 | ||
8f04e8e6 | 375 | if (this_cpu_has_cap(ARM64_SSBS)) { |
eb337cdf WD |
376 | if (!this_cpu_safe) |
377 | __ssb_safe = false; | |
8f04e8e6 WD |
378 | required = false; |
379 | goto out_printmsg; | |
380 | } | |
381 | ||
ce4d5ca2 SP |
382 | conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
383 | ARM_SMCCC_ARCH_WORKAROUND_2, &res); | |
a725e3dd | 384 | |
ce4d5ca2 | 385 | if (conduit == SMCCC_CONDUIT_NONE) { |
a43ae4df | 386 | ssbd_state = ARM64_SSBD_UNKNOWN; |
526e065d JL |
387 | if (!this_cpu_safe) |
388 | __ssb_safe = false; | |
a43ae4df | 389 | return false; |
a725e3dd MZ |
390 | } |
391 | ||
a43ae4df MZ |
392 | val = (s32)res.a0; |
393 | ||
394 | switch (val) { | |
395 | case SMCCC_RET_NOT_SUPPORTED: | |
396 | ssbd_state = ARM64_SSBD_UNKNOWN; | |
526e065d JL |
397 | if (!this_cpu_safe) |
398 | __ssb_safe = false; | |
a43ae4df MZ |
399 | return false; |
400 | ||
526e065d | 401 | /* machines with mixed mitigation requirements must not return this */ |
a43ae4df MZ |
402 | case SMCCC_RET_NOT_REQUIRED: |
403 | pr_info_once("%s mitigation not required\n", entry->desc); | |
404 | ssbd_state = ARM64_SSBD_MITIGATED; | |
405 | return false; | |
406 | ||
407 | case SMCCC_RET_SUCCESS: | |
526e065d | 408 | __ssb_safe = false; |
a43ae4df MZ |
409 | required = true; |
410 | break; | |
411 | ||
412 | case 1: /* Mitigation not required on this CPU */ | |
413 | required = false; | |
414 | break; | |
415 | ||
416 | default: | |
417 | WARN_ON(1); | |
526e065d JL |
418 | if (!this_cpu_safe) |
419 | __ssb_safe = false; | |
a43ae4df MZ |
420 | return false; |
421 | } | |
422 | ||
423 | switch (ssbd_state) { | |
424 | case ARM64_SSBD_FORCE_DISABLE: | |
a43ae4df MZ |
425 | arm64_set_ssbd_mitigation(false); |
426 | required = false; | |
427 | break; | |
428 | ||
429 | case ARM64_SSBD_KERNEL: | |
430 | if (required) { | |
431 | __this_cpu_write(arm64_ssbd_callback_required, 1); | |
432 | arm64_set_ssbd_mitigation(true); | |
433 | } | |
434 | break; | |
435 | ||
436 | case ARM64_SSBD_FORCE_ENABLE: | |
a725e3dd | 437 | arm64_set_ssbd_mitigation(true); |
a43ae4df MZ |
438 | required = true; |
439 | break; | |
440 | ||
441 | default: | |
442 | WARN_ON(1); | |
443 | break; | |
a725e3dd MZ |
444 | } |
445 | ||
8f04e8e6 WD |
446 | out_printmsg: |
447 | switch (ssbd_state) { | |
448 | case ARM64_SSBD_FORCE_DISABLE: | |
449 | pr_info_once("%s disabled from command-line\n", entry->desc); | |
450 | break; | |
451 | ||
452 | case ARM64_SSBD_FORCE_ENABLE: | |
453 | pr_info_once("%s forced from command-line\n", entry->desc); | |
454 | break; | |
455 | } | |
456 | ||
a43ae4df | 457 | return required; |
a725e3dd | 458 | } |
8e290624 | 459 | |
526e065d JL |
460 | /* known invulnerable cores */ |
461 | static const struct midr_range arm64_ssb_cpus[] = { | |
462 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), | |
463 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), | |
464 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), | |
e059770c | 465 | MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), |
108447fd SPR |
466 | MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), |
467 | MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), | |
526e065d JL |
468 | {}, |
469 | }; | |
470 | ||
969f5ea6 WD |
471 | #ifdef CONFIG_ARM64_ERRATUM_1463225 |
472 | DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); | |
473 | ||
474 | static bool | |
475 | has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry, | |
476 | int scope) | |
477 | { | |
a9e821b8 | 478 | return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode(); |
969f5ea6 WD |
479 | } |
480 | #endif | |
481 | ||
b8925ee2 WD |
482 | static void __maybe_unused |
483 | cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) | |
484 | { | |
485 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); | |
486 | } | |
487 | ||
5e7951ce SP |
488 | #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ |
489 | .matches = is_affected_midr_range, \ | |
1df31050 | 490 | .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) |
5e7951ce SP |
491 | |
492 | #define CAP_MIDR_ALL_VERSIONS(model) \ | |
493 | .matches = is_affected_midr_range, \ | |
1df31050 | 494 | .midr_range = MIDR_ALL_VERSIONS(model) |
06f1494f | 495 | |
e8002e02 AB |
496 | #define MIDR_FIXED(rev, revidr_mask) \ |
497 | .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}} | |
498 | ||
5e7951ce SP |
499 | #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ |
500 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ | |
501 | CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) | |
502 | ||
be5b2998 SP |
503 | #define CAP_MIDR_RANGE_LIST(list) \ |
504 | .matches = is_affected_midr_range_list, \ | |
505 | .midr_range_list = list | |
506 | ||
5e7951ce SP |
507 | /* Errata affecting a range of revisions of given model variant */ |
508 | #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \ | |
509 | ERRATA_MIDR_RANGE(m, var, r_min, var, r_max) | |
510 | ||
511 | /* Errata affecting a single variant/revision of a model */ | |
512 | #define ERRATA_MIDR_REV(model, var, rev) \ | |
513 | ERRATA_MIDR_RANGE(model, var, rev, var, rev) | |
514 | ||
515 | /* Errata affecting all variants/revisions of a given a model */ | |
516 | #define ERRATA_MIDR_ALL_VERSIONS(model) \ | |
517 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ | |
518 | CAP_MIDR_ALL_VERSIONS(model) | |
519 | ||
be5b2998 SP |
520 | /* Errata affecting a list of midr ranges, with same work around */ |
521 | #define ERRATA_MIDR_RANGE_LIST(midr_list) \ | |
522 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ | |
523 | CAP_MIDR_RANGE_LIST(midr_list) | |
524 | ||
d2532e27 JL |
525 | /* Track overall mitigation state. We are only mitigated if all cores are ok */ |
526 | static bool __hardenbp_enab = true; | |
527 | static bool __spectrev2_safe = true; | |
528 | ||
c118bbb5 AP |
529 | int get_spectre_v2_workaround_state(void) |
530 | { | |
531 | if (__spectrev2_safe) | |
532 | return ARM64_BP_HARDEN_NOT_REQUIRED; | |
533 | ||
534 | if (!__hardenbp_enab) | |
535 | return ARM64_BP_HARDEN_UNKNOWN; | |
536 | ||
537 | return ARM64_BP_HARDEN_WA_NEEDED; | |
538 | } | |
539 | ||
be5b2998 | 540 | /* |
73f38166 | 541 | * List of CPUs that do not need any Spectre-v2 mitigation at all. |
be5b2998 | 542 | */ |
73f38166 MZ |
543 | static const struct midr_range spectre_v2_safe_list[] = { |
544 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), | |
545 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), | |
546 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), | |
e059770c | 547 | MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), |
aa638cfe | 548 | MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), |
83b0c36b SPR |
549 | MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), |
550 | MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), | |
73f38166 | 551 | { /* sentinel */ } |
be5b2998 SP |
552 | }; |
553 | ||
d2532e27 JL |
554 | /* |
555 | * Track overall bp hardening for all heterogeneous cores in the machine. | |
556 | * We are only considered "safe" if all booted cores are known safe. | |
557 | */ | |
73f38166 MZ |
558 | static bool __maybe_unused |
559 | check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) | |
560 | { | |
561 | int need_wa; | |
562 | ||
563 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
564 | ||
565 | /* If the CPU has CSV2 set, we're safe */ | |
566 | if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1), | |
567 | ID_AA64PFR0_CSV2_SHIFT)) | |
568 | return false; | |
569 | ||
570 | /* Alternatively, we have a list of unaffected CPUs */ | |
571 | if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list)) | |
572 | return false; | |
573 | ||
574 | /* Fallback to firmware detection */ | |
575 | need_wa = detect_harden_bp_fw(); | |
576 | if (!need_wa) | |
577 | return false; | |
578 | ||
d2532e27 JL |
579 | __spectrev2_safe = false; |
580 | ||
8c1e3d2b JL |
581 | if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) { |
582 | pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n"); | |
583 | __hardenbp_enab = false; | |
584 | return false; | |
585 | } | |
586 | ||
73f38166 | 587 | /* forced off */ |
a111b7c0 | 588 | if (__nospectre_v2 || cpu_mitigations_off()) { |
73f38166 | 589 | pr_info_once("spectrev2 mitigation disabled by command line option\n"); |
d2532e27 | 590 | __hardenbp_enab = false; |
73f38166 MZ |
591 | return false; |
592 | } | |
593 | ||
d2532e27 | 594 | if (need_wa < 0) { |
73f38166 | 595 | pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n"); |
d2532e27 JL |
596 | __hardenbp_enab = false; |
597 | } | |
73f38166 MZ |
598 | |
599 | return (need_wa > 0); | |
600 | } | |
06f1494f | 601 | |
18fce561 MZ |
602 | static void |
603 | cpu_enable_branch_predictor_hardening(const struct arm64_cpu_capabilities *cap) | |
604 | { | |
605 | cap->matches(cap, SCOPE_LOCAL_CPU); | |
606 | } | |
607 | ||
93916beb MZ |
608 | static const __maybe_unused struct midr_range tx2_family_cpus[] = { |
609 | MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), | |
610 | MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), | |
611 | {}, | |
612 | }; | |
613 | ||
614 | static bool __maybe_unused | |
615 | needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry, | |
616 | int scope) | |
617 | { | |
618 | int i; | |
619 | ||
620 | if (!is_affected_midr_range_list(entry, scope) || | |
621 | !is_hyp_mode_available()) | |
622 | return false; | |
623 | ||
624 | for_each_possible_cpu(i) { | |
625 | if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0) | |
626 | return true; | |
627 | } | |
628 | ||
629 | return false; | |
630 | } | |
631 | ||
05460849 JM |
632 | static bool __maybe_unused |
633 | has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry, | |
634 | int scope) | |
635 | { | |
636 | u32 midr = read_cpuid_id(); | |
637 | bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT); | |
638 | const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1); | |
639 | ||
640 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
641 | return is_midr_in_range(midr, &range) && has_dic; | |
642 | } | |
8892b718 | 643 | |
a59a2edb | 644 | #ifdef CONFIG_RANDOMIZE_BASE |
8892b718 | 645 | |
f75e2294 | 646 | static const struct midr_range ca57_a72[] = { |
8892b718 MZ |
647 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), |
648 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), | |
649 | {}, | |
650 | }; | |
651 | ||
dc6ed61d MZ |
652 | #endif |
653 | ||
ce8c80c5 | 654 | #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI |
36c602dc | 655 | static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = { |
ce8c80c5 | 656 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 |
36c602dc BA |
657 | { |
658 | ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0) | |
659 | }, | |
660 | { | |
661 | .midr_range.model = MIDR_QCOM_KRYO, | |
662 | .matches = is_kryo_midr, | |
663 | }, | |
ce8c80c5 CM |
664 | #endif |
665 | #ifdef CONFIG_ARM64_ERRATUM_1286807 | |
36c602dc BA |
666 | { |
667 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), | |
668 | }, | |
ce8c80c5 CM |
669 | #endif |
670 | {}, | |
671 | }; | |
ce8c80c5 CM |
672 | #endif |
673 | ||
f58cdf7e | 674 | #ifdef CONFIG_CAVIUM_ERRATUM_27456 |
b89d82ef | 675 | const struct midr_range cavium_erratum_27456_cpus[] = { |
f58cdf7e SP |
676 | /* Cavium ThunderX, T88 pass 1.x - 2.1 */ |
677 | MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), | |
678 | /* Cavium ThunderX, T81 pass 1.0 */ | |
679 | MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), | |
680 | {}, | |
681 | }; | |
682 | #endif | |
683 | ||
684 | #ifdef CONFIG_CAVIUM_ERRATUM_30115 | |
685 | static const struct midr_range cavium_erratum_30115_cpus[] = { | |
686 | /* Cavium ThunderX, T88 pass 1.x - 2.2 */ | |
687 | MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2), | |
688 | /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ | |
689 | MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), | |
690 | /* Cavium ThunderX, T83 pass 1.0 */ | |
691 | MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), | |
692 | {}, | |
693 | }; | |
694 | #endif | |
695 | ||
a3dcea2c SP |
696 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
697 | static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = { | |
698 | { | |
699 | ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), | |
700 | }, | |
701 | { | |
702 | .midr_range.model = MIDR_QCOM_KRYO, | |
703 | .matches = is_kryo_midr, | |
704 | }, | |
705 | {}, | |
706 | }; | |
707 | #endif | |
708 | ||
c9460dcb SP |
709 | #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE |
710 | static const struct midr_range workaround_clean_cache[] = { | |
c0a01b84 AP |
711 | #if defined(CONFIG_ARM64_ERRATUM_826319) || \ |
712 | defined(CONFIG_ARM64_ERRATUM_827319) || \ | |
713 | defined(CONFIG_ARM64_ERRATUM_824069) | |
c9460dcb SP |
714 | /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */ |
715 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), | |
716 | #endif | |
717 | #ifdef CONFIG_ARM64_ERRATUM_819472 | |
718 | /* Cortex-A53 r0p[01] : ARM errata 819472 */ | |
719 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), | |
c0a01b84 | 720 | #endif |
c9460dcb SP |
721 | {}, |
722 | }; | |
723 | #endif | |
724 | ||
a5325089 MZ |
725 | #ifdef CONFIG_ARM64_ERRATUM_1418040 |
726 | /* | |
727 | * - 1188873 affects r0p0 to r2p0 | |
728 | * - 1418040 affects r0p0 to r3p1 | |
729 | */ | |
730 | static const struct midr_range erratum_1418040_list[] = { | |
731 | /* Cortex-A76 r0p0 to r3p1 */ | |
732 | MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), | |
733 | /* Neoverse-N1 r0p0 to r3p1 */ | |
734 | MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1), | |
a9e821b8 SPR |
735 | /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ |
736 | MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), | |
6989303a MZ |
737 | {}, |
738 | }; | |
739 | #endif | |
740 | ||
bfc97f9f DB |
741 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
742 | static const struct midr_range erratum_845719_list[] = { | |
743 | /* Cortex-A53 r0p[01234] */ | |
744 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), | |
745 | /* Brahma-B53 r0p[0] */ | |
746 | MIDR_REV(MIDR_BRAHMA_B53, 0, 0), | |
747 | {}, | |
748 | }; | |
749 | #endif | |
750 | ||
1cf45b8f FF |
751 | #ifdef CONFIG_ARM64_ERRATUM_843419 |
752 | static const struct arm64_cpu_capabilities erratum_843419_list[] = { | |
753 | { | |
754 | /* Cortex-A53 r0p[01234] */ | |
755 | .matches = is_affected_midr_range, | |
756 | ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), | |
757 | MIDR_FIXED(0x4, BIT(8)), | |
758 | }, | |
759 | { | |
760 | /* Brahma-B53 r0p[0] */ | |
761 | .matches = is_affected_midr_range, | |
762 | ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0), | |
763 | }, | |
764 | {}, | |
765 | }; | |
766 | #endif | |
767 | ||
02ab1f50 AS |
768 | #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT |
769 | static const struct midr_range erratum_speculative_at_list[] = { | |
e85d68fa SP |
770 | #ifdef CONFIG_ARM64_ERRATUM_1165522 |
771 | /* Cortex A76 r0p0 to r2p0 */ | |
772 | MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), | |
275fa0ea | 773 | #endif |
02ab1f50 AS |
774 | #ifdef CONFIG_ARM64_ERRATUM_1319367 |
775 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), | |
776 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), | |
777 | #endif | |
275fa0ea SP |
778 | #ifdef CONFIG_ARM64_ERRATUM_1530923 |
779 | /* Cortex A55 r0p0 to r2p0 */ | |
780 | MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0), | |
9b23d95c SPR |
781 | /* Kryo4xx Silver (rdpe => r1p0) */ |
782 | MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe), | |
e85d68fa SP |
783 | #endif |
784 | {}, | |
785 | }; | |
786 | #endif | |
787 | ||
a9e821b8 SPR |
788 | #ifdef CONFIG_ARM64_ERRATUM_1463225 |
789 | static const struct midr_range erratum_1463225[] = { | |
790 | /* Cortex-A76 r0p0 - r3p1 */ | |
791 | MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), | |
792 | /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ | |
793 | MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), | |
09c717c9 | 794 | {}, |
a9e821b8 SPR |
795 | }; |
796 | #endif | |
797 | ||
c9460dcb SP |
798 | const struct arm64_cpu_capabilities arm64_errata[] = { |
799 | #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE | |
c0a01b84 | 800 | { |
357dd8a2 | 801 | .desc = "ARM errata 826319, 827319, 824069, or 819472", |
c0a01b84 | 802 | .capability = ARM64_WORKAROUND_CLEAN_CACHE, |
c9460dcb | 803 | ERRATA_MIDR_RANGE_LIST(workaround_clean_cache), |
c0cda3b8 | 804 | .cpu_enable = cpu_enable_cache_maint_trap, |
c0a01b84 AP |
805 | }, |
806 | #endif | |
807 | #ifdef CONFIG_ARM64_ERRATUM_832075 | |
301bcfac | 808 | { |
5afaa1fc AP |
809 | /* Cortex-A57 r0p0 - r1p2 */ |
810 | .desc = "ARM erratum 832075", | |
811 | .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, | |
5e7951ce SP |
812 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, |
813 | 0, 0, | |
814 | 1, 2), | |
5afaa1fc | 815 | }, |
905e8c5d | 816 | #endif |
498cd5c3 MZ |
817 | #ifdef CONFIG_ARM64_ERRATUM_834220 |
818 | { | |
819 | /* Cortex-A57 r0p0 - r1p2 */ | |
820 | .desc = "ARM erratum 834220", | |
821 | .capability = ARM64_WORKAROUND_834220, | |
5e7951ce SP |
822 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, |
823 | 0, 0, | |
824 | 1, 2), | |
498cd5c3 MZ |
825 | }, |
826 | #endif | |
ca79acca AB |
827 | #ifdef CONFIG_ARM64_ERRATUM_843419 |
828 | { | |
ca79acca AB |
829 | .desc = "ARM erratum 843419", |
830 | .capability = ARM64_WORKAROUND_843419, | |
1cf45b8f FF |
831 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
832 | .matches = cpucap_multi_entry_cap_matches, | |
833 | .match_list = erratum_843419_list, | |
498cd5c3 MZ |
834 | }, |
835 | #endif | |
905e8c5d WD |
836 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
837 | { | |
905e8c5d WD |
838 | .desc = "ARM erratum 845719", |
839 | .capability = ARM64_WORKAROUND_845719, | |
bfc97f9f | 840 | ERRATA_MIDR_RANGE_LIST(erratum_845719_list), |
905e8c5d | 841 | }, |
6d4e11c5 RR |
842 | #endif |
843 | #ifdef CONFIG_CAVIUM_ERRATUM_23154 | |
844 | { | |
845 | /* Cavium ThunderX, pass 1.x */ | |
846 | .desc = "Cavium erratum 23154", | |
847 | .capability = ARM64_WORKAROUND_CAVIUM_23154, | |
5e7951ce | 848 | ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1), |
6d4e11c5 | 849 | }, |
104a0c02 AP |
850 | #endif |
851 | #ifdef CONFIG_CAVIUM_ERRATUM_27456 | |
852 | { | |
47c459be GK |
853 | .desc = "Cavium erratum 27456", |
854 | .capability = ARM64_WORKAROUND_CAVIUM_27456, | |
f58cdf7e | 855 | ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus), |
47c459be | 856 | }, |
690a3415 DD |
857 | #endif |
858 | #ifdef CONFIG_CAVIUM_ERRATUM_30115 | |
859 | { | |
690a3415 DD |
860 | .desc = "Cavium erratum 30115", |
861 | .capability = ARM64_WORKAROUND_CAVIUM_30115, | |
f58cdf7e | 862 | ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus), |
690a3415 | 863 | }, |
c0a01b84 | 864 | #endif |
116c81f4 | 865 | { |
880f7cc4 | 866 | .desc = "Mismatched cache type (CTR_EL0)", |
314d53d2 SP |
867 | .capability = ARM64_MISMATCHED_CACHE_TYPE, |
868 | .matches = has_mismatched_cache_type, | |
5b4747c5 | 869 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
c0cda3b8 | 870 | .cpu_enable = cpu_enable_trap_ctr_access, |
116c81f4 | 871 | }, |
38fd94b0 CC |
872 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
873 | { | |
a3dcea2c | 874 | .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003", |
bb487118 | 875 | .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, |
d4af3c4b | 876 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
1e013d06 | 877 | .matches = cpucap_multi_entry_cap_matches, |
a3dcea2c | 878 | .match_list = qcom_erratum_1003_list, |
bb487118 | 879 | }, |
38fd94b0 | 880 | #endif |
ce8c80c5 | 881 | #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI |
d9ff80f8 | 882 | { |
357dd8a2 | 883 | .desc = "Qualcomm erratum 1009, or ARM erratum 1286807", |
d9ff80f8 | 884 | .capability = ARM64_WORKAROUND_REPEAT_TLBI, |
36c602dc BA |
885 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
886 | .matches = cpucap_multi_entry_cap_matches, | |
887 | .match_list = arm64_repeat_tlbi_list, | |
d9ff80f8 | 888 | }, |
eeb1efbc MZ |
889 | #endif |
890 | #ifdef CONFIG_ARM64_ERRATUM_858921 | |
891 | { | |
892 | /* Cortex-A73 all versions */ | |
893 | .desc = "ARM erratum 858921", | |
894 | .capability = ARM64_WORKAROUND_858921, | |
5e7951ce | 895 | ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), |
eeb1efbc | 896 | }, |
aa6acde6 | 897 | #endif |
aa6acde6 | 898 | { |
18fce561 | 899 | .desc = "Branch predictor hardening", |
aa6acde6 | 900 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
73f38166 MZ |
901 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
902 | .matches = check_branch_predictor, | |
18fce561 | 903 | .cpu_enable = cpu_enable_branch_predictor_hardening, |
f3d795d9 | 904 | }, |
a59a2edb | 905 | #ifdef CONFIG_RANDOMIZE_BASE |
4b472ffd | 906 | { |
8892b718 | 907 | .desc = "EL2 vector hardening", |
4b472ffd | 908 | .capability = ARM64_HARDEN_EL2_VECTORS, |
f75e2294 | 909 | ERRATA_MIDR_RANGE_LIST(ca57_a72), |
4b472ffd | 910 | }, |
a725e3dd | 911 | #endif |
a725e3dd MZ |
912 | { |
913 | .desc = "Speculative Store Bypass Disable", | |
914 | .capability = ARM64_SSBD, | |
915 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, | |
916 | .matches = has_ssbd_mitigation, | |
526e065d | 917 | .midr_range_list = arm64_ssb_cpus, |
a725e3dd | 918 | }, |
a5325089 | 919 | #ifdef CONFIG_ARM64_ERRATUM_1418040 |
95b861a4 | 920 | { |
a5325089 MZ |
921 | .desc = "ARM erratum 1418040", |
922 | .capability = ARM64_WORKAROUND_1418040, | |
923 | ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), | |
bf87bb08 MZ |
924 | .type = (ARM64_CPUCAP_SCOPE_LOCAL_CPU | |
925 | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU), | |
95b861a4 | 926 | }, |
8b2cca9a | 927 | #endif |
02ab1f50 | 928 | #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT |
8b2cca9a | 929 | { |
c350717e | 930 | .desc = "ARM errata 1165522, 1319367, or 1530923", |
02ab1f50 AS |
931 | .capability = ARM64_WORKAROUND_SPECULATIVE_AT, |
932 | ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list), | |
8b2cca9a | 933 | }, |
969f5ea6 WD |
934 | #endif |
935 | #ifdef CONFIG_ARM64_ERRATUM_1463225 | |
936 | { | |
937 | .desc = "ARM erratum 1463225", | |
938 | .capability = ARM64_WORKAROUND_1463225, | |
939 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, | |
940 | .matches = has_cortex_a76_erratum_1463225, | |
a9e821b8 | 941 | .midr_range_list = erratum_1463225, |
969f5ea6 | 942 | }, |
93916beb MZ |
943 | #endif |
944 | #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219 | |
945 | { | |
946 | .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)", | |
947 | .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM, | |
948 | ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), | |
949 | .matches = needs_tx2_tvm_workaround, | |
950 | }, | |
9405447e MZ |
951 | { |
952 | .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)", | |
953 | .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM, | |
954 | ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), | |
955 | }, | |
6a036afb | 956 | #endif |
05460849 JM |
957 | #ifdef CONFIG_ARM64_ERRATUM_1542419 |
958 | { | |
959 | /* we depend on the firmware portion for correctness */ | |
960 | .desc = "ARM erratum 1542419 (kernel portion)", | |
961 | .capability = ARM64_WORKAROUND_1542419, | |
962 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, | |
963 | .matches = has_neoverse_n1_erratum_1542419, | |
964 | .cpu_enable = cpu_enable_trap_ctr_access, | |
965 | }, | |
d9ff80f8 | 966 | #endif |
5afaa1fc | 967 | { |
301bcfac | 968 | } |
e116a375 | 969 | }; |
3891ebcc MYK |
970 | |
971 | ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, | |
972 | char *buf) | |
973 | { | |
974 | return sprintf(buf, "Mitigation: __user pointer sanitization\n"); | |
975 | } | |
d2532e27 JL |
976 | |
977 | ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, | |
978 | char *buf) | |
979 | { | |
c118bbb5 AP |
980 | switch (get_spectre_v2_workaround_state()) { |
981 | case ARM64_BP_HARDEN_NOT_REQUIRED: | |
d2532e27 | 982 | return sprintf(buf, "Not affected\n"); |
c118bbb5 | 983 | case ARM64_BP_HARDEN_WA_NEEDED: |
d2532e27 | 984 | return sprintf(buf, "Mitigation: Branch predictor hardening\n"); |
c118bbb5 AP |
985 | case ARM64_BP_HARDEN_UNKNOWN: |
986 | default: | |
987 | return sprintf(buf, "Vulnerable\n"); | |
988 | } | |
d2532e27 | 989 | } |
526e065d JL |
990 | |
991 | ssize_t cpu_show_spec_store_bypass(struct device *dev, | |
992 | struct device_attribute *attr, char *buf) | |
993 | { | |
994 | if (__ssb_safe) | |
995 | return sprintf(buf, "Not affected\n"); | |
996 | ||
997 | switch (ssbd_state) { | |
998 | case ARM64_SSBD_KERNEL: | |
999 | case ARM64_SSBD_FORCE_ENABLE: | |
1000 | if (IS_ENABLED(CONFIG_ARM64_SSBD)) | |
1001 | return sprintf(buf, | |
1002 | "Mitigation: Speculative Store Bypass disabled via prctl\n"); | |
1003 | } | |
1004 | ||
1005 | return sprintf(buf, "Vulnerable\n"); | |
1006 | } |