Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
e116a375 AP |
2 | /* |
3 | * Contains CPU specific errata definitions | |
4 | * | |
5 | * Copyright (C) 2014 ARM Ltd. | |
e116a375 AP |
6 | */ |
7 | ||
94a5d879 | 8 | #include <linux/arm-smccc.h> |
e116a375 | 9 | #include <linux/types.h> |
a111b7c0 | 10 | #include <linux/cpu.h> |
e116a375 AP |
11 | #include <asm/cpu.h> |
12 | #include <asm/cputype.h> | |
13 | #include <asm/cpufeature.h> | |
4db61fef | 14 | #include <asm/kvm_asm.h> |
93916beb | 15 | #include <asm/smp_plat.h> |
e116a375 | 16 | |
301bcfac | 17 | static bool __maybe_unused |
92406f0c | 18 | is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) |
301bcfac | 19 | { |
e8002e02 AB |
20 | const struct arm64_midr_revidr *fix; |
21 | u32 midr = read_cpuid_id(), revidr; | |
22 | ||
92406f0c | 23 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
1df31050 | 24 | if (!is_midr_in_range(midr, &entry->midr_range)) |
e8002e02 AB |
25 | return false; |
26 | ||
27 | midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; | |
28 | revidr = read_cpuid(REVIDR_EL1); | |
29 | for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++) | |
30 | if (midr == fix->midr_rv && (revidr & fix->revidr_mask)) | |
31 | return false; | |
32 | ||
33 | return true; | |
301bcfac AP |
34 | } |
35 | ||
be5b2998 SP |
36 | static bool __maybe_unused |
37 | is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry, | |
38 | int scope) | |
301bcfac | 39 | { |
92406f0c | 40 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
be5b2998 | 41 | return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list); |
301bcfac AP |
42 | } |
43 | ||
bb487118 SB |
44 | static bool __maybe_unused |
45 | is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) | |
46 | { | |
47 | u32 model; | |
48 | ||
49 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
50 | ||
51 | model = read_cpuid_id(); | |
52 | model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | | |
53 | MIDR_ARCHITECTURE_MASK; | |
54 | ||
1df31050 | 55 | return model == entry->midr_range.model; |
bb487118 SB |
56 | } |
57 | ||
116c81f4 | 58 | static bool |
314d53d2 SP |
59 | has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, |
60 | int scope) | |
116c81f4 | 61 | { |
1602df02 SP |
62 | u64 mask = arm64_ftr_reg_ctrel0.strict_mask; |
63 | u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask; | |
64 | u64 ctr_raw, ctr_real; | |
314d53d2 | 65 | |
116c81f4 | 66 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
1602df02 SP |
67 | |
68 | /* | |
69 | * We want to make sure that all the CPUs in the system expose | |
70 | * a consistent CTR_EL0 to make sure that applications behaves | |
71 | * correctly with migration. | |
72 | * | |
73 | * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 : | |
74 | * | |
75 | * 1) It is safe if the system doesn't support IDC, as CPU anyway | |
76 | * reports IDC = 0, consistent with the rest. | |
77 | * | |
78 | * 2) If the system has IDC, it is still safe as we trap CTR_EL0 | |
79 | * access on this CPU via the ARM64_HAS_CACHE_IDC capability. | |
80 | * | |
81 | * So, we need to make sure either the raw CTR_EL0 or the effective | |
82 | * CTR_EL0 matches the system's copy to allow a secondary CPU to boot. | |
83 | */ | |
84 | ctr_raw = read_cpuid_cachetype() & mask; | |
85 | ctr_real = read_cpuid_effective_cachetype() & mask; | |
86 | ||
87 | return (ctr_real != sys) && (ctr_raw != sys); | |
116c81f4 SP |
88 | } |
89 | ||
c0cda3b8 | 90 | static void |
05460849 | 91 | cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap) |
116c81f4 | 92 | { |
4afe8e79 | 93 | u64 mask = arm64_ftr_reg_ctrel0.strict_mask; |
05460849 | 94 | bool enable_uct_trap = false; |
4afe8e79 SP |
95 | |
96 | /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */ | |
97 | if ((read_cpuid_cachetype() & mask) != | |
98 | (arm64_ftr_reg_ctrel0.sys_val & mask)) | |
05460849 JM |
99 | enable_uct_trap = true; |
100 | ||
101 | /* ... or if the system is affected by an erratum */ | |
102 | if (cap->capability == ARM64_WORKAROUND_1542419) | |
103 | enable_uct_trap = true; | |
104 | ||
105 | if (enable_uct_trap) | |
4afe8e79 | 106 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); |
116c81f4 SP |
107 | } |
108 | ||
4205a89b MZ |
109 | atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); |
110 | ||
0f15adbb WD |
111 | #include <asm/mmu_context.h> |
112 | #include <asm/cacheflush.h> | |
113 | ||
114 | DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); | |
115 | ||
e8b22d0f | 116 | #ifdef CONFIG_KVM_INDIRECT_VECTORS |
0f15adbb WD |
117 | static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, |
118 | const char *hyp_vecs_end) | |
119 | { | |
6e52aab9 | 120 | void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K); |
0f15adbb WD |
121 | int i; |
122 | ||
123 | for (i = 0; i < SZ_2K; i += 0x80) | |
124 | memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); | |
125 | ||
3b8c9f1c | 126 | __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); |
0f15adbb WD |
127 | } |
128 | ||
73f38166 MZ |
129 | static void install_bp_hardening_cb(bp_hardening_cb_t fn, |
130 | const char *hyp_vecs_start, | |
131 | const char *hyp_vecs_end) | |
0f15adbb | 132 | { |
d8797b12 | 133 | static DEFINE_RAW_SPINLOCK(bp_lock); |
0f15adbb WD |
134 | int cpu, slot = -1; |
135 | ||
4debef55 | 136 | /* |
7a292b6c TR |
137 | * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if |
138 | * we're a guest. Skip the hyp-vectors work. | |
4debef55 JM |
139 | */ |
140 | if (!hyp_vecs_start) { | |
141 | __this_cpu_write(bp_hardening_data.fn, fn); | |
142 | return; | |
143 | } | |
144 | ||
d8797b12 | 145 | raw_spin_lock(&bp_lock); |
0f15adbb WD |
146 | for_each_possible_cpu(cpu) { |
147 | if (per_cpu(bp_hardening_data.fn, cpu) == fn) { | |
148 | slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); | |
149 | break; | |
150 | } | |
151 | } | |
152 | ||
153 | if (slot == -1) { | |
4205a89b MZ |
154 | slot = atomic_inc_return(&arm64_el2_vector_last_slot); |
155 | BUG_ON(slot >= BP_HARDEN_EL2_SLOTS); | |
0f15adbb WD |
156 | __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); |
157 | } | |
158 | ||
159 | __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); | |
160 | __this_cpu_write(bp_hardening_data.fn, fn); | |
d8797b12 | 161 | raw_spin_unlock(&bp_lock); |
0f15adbb WD |
162 | } |
163 | #else | |
73f38166 | 164 | static void install_bp_hardening_cb(bp_hardening_cb_t fn, |
0f15adbb WD |
165 | const char *hyp_vecs_start, |
166 | const char *hyp_vecs_end) | |
167 | { | |
168 | __this_cpu_write(bp_hardening_data.fn, fn); | |
169 | } | |
e8b22d0f | 170 | #endif /* CONFIG_KVM_INDIRECT_VECTORS */ |
0f15adbb | 171 | |
b092201e | 172 | #include <linux/arm-smccc.h> |
aa6acde6 | 173 | |
9a25136a | 174 | static void __maybe_unused call_smc_arch_workaround_1(void) |
b092201e MZ |
175 | { |
176 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); | |
177 | } | |
178 | ||
179 | static void call_hvc_arch_workaround_1(void) | |
180 | { | |
181 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); | |
182 | } | |
183 | ||
4bc352ff SD |
184 | static void qcom_link_stack_sanitization(void) |
185 | { | |
186 | u64 tmp; | |
187 | ||
188 | asm volatile("mov %0, x30 \n" | |
189 | ".rept 16 \n" | |
190 | "bl . + 4 \n" | |
191 | ".endr \n" | |
192 | "mov x30, %0 \n" | |
193 | : "=&r" (tmp)); | |
194 | } | |
195 | ||
e5ce5e72 JL |
196 | static bool __nospectre_v2; |
197 | static int __init parse_nospectre_v2(char *str) | |
198 | { | |
199 | __nospectre_v2 = true; | |
200 | return 0; | |
201 | } | |
202 | early_param("nospectre_v2", parse_nospectre_v2); | |
203 | ||
73f38166 MZ |
204 | /* |
205 | * -1: No workaround | |
206 | * 0: No workaround required | |
207 | * 1: Workaround installed | |
208 | */ | |
209 | static int detect_harden_bp_fw(void) | |
b092201e MZ |
210 | { |
211 | bp_hardening_cb_t cb; | |
212 | void *smccc_start, *smccc_end; | |
213 | struct arm_smccc_res res; | |
4bc352ff | 214 | u32 midr = read_cpuid_id(); |
b092201e | 215 | |
ce4d5ca2 SP |
216 | arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
217 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); | |
218 | ||
219 | switch ((int)res.a0) { | |
220 | case 1: | |
221 | /* Firmware says we're just fine */ | |
222 | return 0; | |
223 | case 0: | |
224 | break; | |
225 | default: | |
73f38166 | 226 | return -1; |
ce4d5ca2 | 227 | } |
b092201e | 228 | |
c98bd299 MR |
229 | switch (arm_smccc_1_1_get_conduit()) { |
230 | case SMCCC_CONDUIT_HVC: | |
ce4d5ca2 SP |
231 | cb = call_hvc_arch_workaround_1; |
232 | /* This is a guest, no need to patch KVM vectors */ | |
233 | smccc_start = NULL; | |
234 | smccc_end = NULL; | |
b092201e MZ |
235 | break; |
236 | ||
d82755b2 | 237 | #if IS_ENABLED(CONFIG_KVM) |
c98bd299 | 238 | case SMCCC_CONDUIT_SMC: |
ce4d5ca2 | 239 | cb = call_smc_arch_workaround_1; |
4db61fef MB |
240 | smccc_start = __smccc_workaround_1_smc; |
241 | smccc_end = __smccc_workaround_1_smc + | |
242 | __SMCCC_WORKAROUND_1_SMC_SZ; | |
b092201e | 243 | break; |
4db61fef | 244 | #endif |
b092201e MZ |
245 | |
246 | default: | |
73f38166 | 247 | return -1; |
b092201e MZ |
248 | } |
249 | ||
4bc352ff SD |
250 | if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || |
251 | ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) | |
252 | cb = qcom_link_stack_sanitization; | |
253 | ||
8c1e3d2b JL |
254 | if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) |
255 | install_bp_hardening_cb(cb, smccc_start, smccc_end); | |
b092201e | 256 | |
73f38166 | 257 | return 1; |
aa6acde6 | 258 | } |
0f15adbb | 259 | |
5cf9ce6e MZ |
260 | DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); |
261 | ||
a43ae4df | 262 | int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; |
526e065d | 263 | static bool __ssb_safe = true; |
a43ae4df MZ |
264 | |
265 | static const struct ssbd_options { | |
266 | const char *str; | |
267 | int state; | |
268 | } ssbd_options[] = { | |
269 | { "force-on", ARM64_SSBD_FORCE_ENABLE, }, | |
270 | { "force-off", ARM64_SSBD_FORCE_DISABLE, }, | |
271 | { "kernel", ARM64_SSBD_KERNEL, }, | |
272 | }; | |
273 | ||
274 | static int __init ssbd_cfg(char *buf) | |
275 | { | |
276 | int i; | |
277 | ||
278 | if (!buf || !buf[0]) | |
279 | return -EINVAL; | |
280 | ||
281 | for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) { | |
282 | int len = strlen(ssbd_options[i].str); | |
283 | ||
284 | if (strncmp(buf, ssbd_options[i].str, len)) | |
285 | continue; | |
286 | ||
287 | ssbd_state = ssbd_options[i].state; | |
288 | return 0; | |
289 | } | |
290 | ||
291 | return -EINVAL; | |
292 | } | |
293 | early_param("ssbd", ssbd_cfg); | |
294 | ||
8e290624 MZ |
295 | void __init arm64_update_smccc_conduit(struct alt_instr *alt, |
296 | __le32 *origptr, __le32 *updptr, | |
297 | int nr_inst) | |
298 | { | |
299 | u32 insn; | |
300 | ||
301 | BUG_ON(nr_inst != 1); | |
302 | ||
c98bd299 MR |
303 | switch (arm_smccc_1_1_get_conduit()) { |
304 | case SMCCC_CONDUIT_HVC: | |
8e290624 MZ |
305 | insn = aarch64_insn_get_hvc_value(); |
306 | break; | |
c98bd299 | 307 | case SMCCC_CONDUIT_SMC: |
8e290624 MZ |
308 | insn = aarch64_insn_get_smc_value(); |
309 | break; | |
310 | default: | |
311 | return; | |
312 | } | |
313 | ||
314 | *updptr = cpu_to_le32(insn); | |
315 | } | |
a725e3dd | 316 | |
986372c4 MZ |
317 | void __init arm64_enable_wa2_handling(struct alt_instr *alt, |
318 | __le32 *origptr, __le32 *updptr, | |
319 | int nr_inst) | |
320 | { | |
321 | BUG_ON(nr_inst != 1); | |
322 | /* | |
323 | * Only allow mitigation on EL1 entry/exit and guest | |
324 | * ARCH_WORKAROUND_2 handling if the SSBD state allows it to | |
325 | * be flipped. | |
326 | */ | |
327 | if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL) | |
328 | *updptr = cpu_to_le32(aarch64_insn_gen_nop()); | |
329 | } | |
330 | ||
647d0519 | 331 | void arm64_set_ssbd_mitigation(bool state) |
a725e3dd | 332 | { |
ce4d5ca2 SP |
333 | int conduit; |
334 | ||
d42281b6 JL |
335 | if (!IS_ENABLED(CONFIG_ARM64_SSBD)) { |
336 | pr_info_once("SSBD disabled by kernel configuration\n"); | |
337 | return; | |
338 | } | |
339 | ||
8f04e8e6 WD |
340 | if (this_cpu_has_cap(ARM64_SSBS)) { |
341 | if (state) | |
342 | asm volatile(SET_PSTATE_SSBS(0)); | |
343 | else | |
344 | asm volatile(SET_PSTATE_SSBS(1)); | |
345 | return; | |
346 | } | |
347 | ||
ce4d5ca2 SP |
348 | conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state, |
349 | NULL); | |
a725e3dd | 350 | |
ce4d5ca2 | 351 | WARN_ON_ONCE(conduit == SMCCC_CONDUIT_NONE); |
a725e3dd MZ |
352 | } |
353 | ||
354 | static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, | |
355 | int scope) | |
356 | { | |
357 | struct arm_smccc_res res; | |
a43ae4df MZ |
358 | bool required = true; |
359 | s32 val; | |
526e065d | 360 | bool this_cpu_safe = false; |
ce4d5ca2 | 361 | int conduit; |
a725e3dd MZ |
362 | |
363 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
364 | ||
a111b7c0 JP |
365 | if (cpu_mitigations_off()) |
366 | ssbd_state = ARM64_SSBD_FORCE_DISABLE; | |
367 | ||
eb337cdf WD |
368 | /* delay setting __ssb_safe until we get a firmware response */ |
369 | if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list)) | |
370 | this_cpu_safe = true; | |
371 | ||
8f04e8e6 | 372 | if (this_cpu_has_cap(ARM64_SSBS)) { |
eb337cdf WD |
373 | if (!this_cpu_safe) |
374 | __ssb_safe = false; | |
8f04e8e6 WD |
375 | required = false; |
376 | goto out_printmsg; | |
377 | } | |
378 | ||
ce4d5ca2 SP |
379 | conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
380 | ARM_SMCCC_ARCH_WORKAROUND_2, &res); | |
a725e3dd | 381 | |
ce4d5ca2 | 382 | if (conduit == SMCCC_CONDUIT_NONE) { |
a43ae4df | 383 | ssbd_state = ARM64_SSBD_UNKNOWN; |
526e065d JL |
384 | if (!this_cpu_safe) |
385 | __ssb_safe = false; | |
a43ae4df | 386 | return false; |
a725e3dd MZ |
387 | } |
388 | ||
a43ae4df MZ |
389 | val = (s32)res.a0; |
390 | ||
391 | switch (val) { | |
392 | case SMCCC_RET_NOT_SUPPORTED: | |
393 | ssbd_state = ARM64_SSBD_UNKNOWN; | |
526e065d JL |
394 | if (!this_cpu_safe) |
395 | __ssb_safe = false; | |
a43ae4df MZ |
396 | return false; |
397 | ||
526e065d | 398 | /* machines with mixed mitigation requirements must not return this */ |
a43ae4df MZ |
399 | case SMCCC_RET_NOT_REQUIRED: |
400 | pr_info_once("%s mitigation not required\n", entry->desc); | |
401 | ssbd_state = ARM64_SSBD_MITIGATED; | |
402 | return false; | |
403 | ||
404 | case SMCCC_RET_SUCCESS: | |
526e065d | 405 | __ssb_safe = false; |
a43ae4df MZ |
406 | required = true; |
407 | break; | |
408 | ||
409 | case 1: /* Mitigation not required on this CPU */ | |
410 | required = false; | |
411 | break; | |
412 | ||
413 | default: | |
414 | WARN_ON(1); | |
526e065d JL |
415 | if (!this_cpu_safe) |
416 | __ssb_safe = false; | |
a43ae4df MZ |
417 | return false; |
418 | } | |
419 | ||
420 | switch (ssbd_state) { | |
421 | case ARM64_SSBD_FORCE_DISABLE: | |
a43ae4df MZ |
422 | arm64_set_ssbd_mitigation(false); |
423 | required = false; | |
424 | break; | |
425 | ||
426 | case ARM64_SSBD_KERNEL: | |
427 | if (required) { | |
428 | __this_cpu_write(arm64_ssbd_callback_required, 1); | |
429 | arm64_set_ssbd_mitigation(true); | |
430 | } | |
431 | break; | |
432 | ||
433 | case ARM64_SSBD_FORCE_ENABLE: | |
a725e3dd | 434 | arm64_set_ssbd_mitigation(true); |
a43ae4df MZ |
435 | required = true; |
436 | break; | |
437 | ||
438 | default: | |
439 | WARN_ON(1); | |
440 | break; | |
a725e3dd MZ |
441 | } |
442 | ||
8f04e8e6 WD |
443 | out_printmsg: |
444 | switch (ssbd_state) { | |
445 | case ARM64_SSBD_FORCE_DISABLE: | |
446 | pr_info_once("%s disabled from command-line\n", entry->desc); | |
447 | break; | |
448 | ||
449 | case ARM64_SSBD_FORCE_ENABLE: | |
450 | pr_info_once("%s forced from command-line\n", entry->desc); | |
451 | break; | |
452 | } | |
453 | ||
a43ae4df | 454 | return required; |
a725e3dd | 455 | } |
8e290624 | 456 | |
526e065d JL |
457 | /* known invulnerable cores */ |
458 | static const struct midr_range arm64_ssb_cpus[] = { | |
459 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), | |
460 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), | |
461 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), | |
e059770c | 462 | MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), |
108447fd SPR |
463 | MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), |
464 | MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), | |
526e065d JL |
465 | {}, |
466 | }; | |
467 | ||
969f5ea6 WD |
468 | #ifdef CONFIG_ARM64_ERRATUM_1463225 |
469 | DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); | |
470 | ||
471 | static bool | |
472 | has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry, | |
473 | int scope) | |
474 | { | |
a9e821b8 | 475 | return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode(); |
969f5ea6 WD |
476 | } |
477 | #endif | |
478 | ||
b8925ee2 WD |
479 | static void __maybe_unused |
480 | cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) | |
481 | { | |
482 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); | |
483 | } | |
484 | ||
5e7951ce SP |
485 | #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ |
486 | .matches = is_affected_midr_range, \ | |
1df31050 | 487 | .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) |
5e7951ce SP |
488 | |
489 | #define CAP_MIDR_ALL_VERSIONS(model) \ | |
490 | .matches = is_affected_midr_range, \ | |
1df31050 | 491 | .midr_range = MIDR_ALL_VERSIONS(model) |
06f1494f | 492 | |
e8002e02 AB |
493 | #define MIDR_FIXED(rev, revidr_mask) \ |
494 | .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}} | |
495 | ||
5e7951ce SP |
496 | #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ |
497 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ | |
498 | CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) | |
499 | ||
be5b2998 SP |
500 | #define CAP_MIDR_RANGE_LIST(list) \ |
501 | .matches = is_affected_midr_range_list, \ | |
502 | .midr_range_list = list | |
503 | ||
5e7951ce SP |
504 | /* Errata affecting a range of revisions of given model variant */ |
505 | #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \ | |
506 | ERRATA_MIDR_RANGE(m, var, r_min, var, r_max) | |
507 | ||
508 | /* Errata affecting a single variant/revision of a model */ | |
509 | #define ERRATA_MIDR_REV(model, var, rev) \ | |
510 | ERRATA_MIDR_RANGE(model, var, rev, var, rev) | |
511 | ||
512 | /* Errata affecting all variants/revisions of a given a model */ | |
513 | #define ERRATA_MIDR_ALL_VERSIONS(model) \ | |
514 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ | |
515 | CAP_MIDR_ALL_VERSIONS(model) | |
516 | ||
be5b2998 SP |
517 | /* Errata affecting a list of midr ranges, with same work around */ |
518 | #define ERRATA_MIDR_RANGE_LIST(midr_list) \ | |
519 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ | |
520 | CAP_MIDR_RANGE_LIST(midr_list) | |
521 | ||
d2532e27 JL |
522 | /* Track overall mitigation state. We are only mitigated if all cores are ok */ |
523 | static bool __hardenbp_enab = true; | |
524 | static bool __spectrev2_safe = true; | |
525 | ||
c118bbb5 AP |
526 | int get_spectre_v2_workaround_state(void) |
527 | { | |
528 | if (__spectrev2_safe) | |
529 | return ARM64_BP_HARDEN_NOT_REQUIRED; | |
530 | ||
531 | if (!__hardenbp_enab) | |
532 | return ARM64_BP_HARDEN_UNKNOWN; | |
533 | ||
534 | return ARM64_BP_HARDEN_WA_NEEDED; | |
535 | } | |
536 | ||
be5b2998 | 537 | /* |
73f38166 | 538 | * List of CPUs that do not need any Spectre-v2 mitigation at all. |
be5b2998 | 539 | */ |
73f38166 MZ |
540 | static const struct midr_range spectre_v2_safe_list[] = { |
541 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), | |
542 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), | |
543 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), | |
e059770c | 544 | MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), |
aa638cfe | 545 | MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), |
83b0c36b SPR |
546 | MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), |
547 | MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), | |
73f38166 | 548 | { /* sentinel */ } |
be5b2998 SP |
549 | }; |
550 | ||
d2532e27 JL |
551 | /* |
552 | * Track overall bp hardening for all heterogeneous cores in the machine. | |
553 | * We are only considered "safe" if all booted cores are known safe. | |
554 | */ | |
73f38166 MZ |
555 | static bool __maybe_unused |
556 | check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) | |
557 | { | |
558 | int need_wa; | |
559 | ||
560 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
561 | ||
562 | /* If the CPU has CSV2 set, we're safe */ | |
563 | if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1), | |
564 | ID_AA64PFR0_CSV2_SHIFT)) | |
565 | return false; | |
566 | ||
567 | /* Alternatively, we have a list of unaffected CPUs */ | |
568 | if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list)) | |
569 | return false; | |
570 | ||
571 | /* Fallback to firmware detection */ | |
572 | need_wa = detect_harden_bp_fw(); | |
573 | if (!need_wa) | |
574 | return false; | |
575 | ||
d2532e27 JL |
576 | __spectrev2_safe = false; |
577 | ||
8c1e3d2b JL |
578 | if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) { |
579 | pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n"); | |
580 | __hardenbp_enab = false; | |
581 | return false; | |
582 | } | |
583 | ||
73f38166 | 584 | /* forced off */ |
a111b7c0 | 585 | if (__nospectre_v2 || cpu_mitigations_off()) { |
73f38166 | 586 | pr_info_once("spectrev2 mitigation disabled by command line option\n"); |
d2532e27 | 587 | __hardenbp_enab = false; |
73f38166 MZ |
588 | return false; |
589 | } | |
590 | ||
d2532e27 | 591 | if (need_wa < 0) { |
73f38166 | 592 | pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n"); |
d2532e27 JL |
593 | __hardenbp_enab = false; |
594 | } | |
73f38166 MZ |
595 | |
596 | return (need_wa > 0); | |
597 | } | |
06f1494f | 598 | |
93916beb MZ |
599 | static const __maybe_unused struct midr_range tx2_family_cpus[] = { |
600 | MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), | |
601 | MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), | |
602 | {}, | |
603 | }; | |
604 | ||
605 | static bool __maybe_unused | |
606 | needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry, | |
607 | int scope) | |
608 | { | |
609 | int i; | |
610 | ||
611 | if (!is_affected_midr_range_list(entry, scope) || | |
612 | !is_hyp_mode_available()) | |
613 | return false; | |
614 | ||
615 | for_each_possible_cpu(i) { | |
616 | if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0) | |
617 | return true; | |
618 | } | |
619 | ||
620 | return false; | |
621 | } | |
622 | ||
05460849 JM |
623 | static bool __maybe_unused |
624 | has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry, | |
625 | int scope) | |
626 | { | |
627 | u32 midr = read_cpuid_id(); | |
628 | bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT); | |
629 | const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1); | |
630 | ||
631 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
632 | return is_midr_in_range(midr, &range) && has_dic; | |
633 | } | |
8892b718 | 634 | |
a59a2edb | 635 | #ifdef CONFIG_RANDOMIZE_BASE |
8892b718 | 636 | |
f75e2294 | 637 | static const struct midr_range ca57_a72[] = { |
8892b718 MZ |
638 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), |
639 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), | |
640 | {}, | |
641 | }; | |
642 | ||
dc6ed61d MZ |
643 | #endif |
644 | ||
ce8c80c5 | 645 | #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI |
36c602dc | 646 | static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = { |
ce8c80c5 | 647 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 |
36c602dc BA |
648 | { |
649 | ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0) | |
650 | }, | |
651 | { | |
652 | .midr_range.model = MIDR_QCOM_KRYO, | |
653 | .matches = is_kryo_midr, | |
654 | }, | |
ce8c80c5 CM |
655 | #endif |
656 | #ifdef CONFIG_ARM64_ERRATUM_1286807 | |
36c602dc BA |
657 | { |
658 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), | |
659 | }, | |
ce8c80c5 CM |
660 | #endif |
661 | {}, | |
662 | }; | |
ce8c80c5 CM |
663 | #endif |
664 | ||
f58cdf7e | 665 | #ifdef CONFIG_CAVIUM_ERRATUM_27456 |
b89d82ef | 666 | const struct midr_range cavium_erratum_27456_cpus[] = { |
f58cdf7e SP |
667 | /* Cavium ThunderX, T88 pass 1.x - 2.1 */ |
668 | MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), | |
669 | /* Cavium ThunderX, T81 pass 1.0 */ | |
670 | MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), | |
671 | {}, | |
672 | }; | |
673 | #endif | |
674 | ||
675 | #ifdef CONFIG_CAVIUM_ERRATUM_30115 | |
676 | static const struct midr_range cavium_erratum_30115_cpus[] = { | |
677 | /* Cavium ThunderX, T88 pass 1.x - 2.2 */ | |
678 | MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2), | |
679 | /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ | |
680 | MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), | |
681 | /* Cavium ThunderX, T83 pass 1.0 */ | |
682 | MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), | |
683 | {}, | |
684 | }; | |
685 | #endif | |
686 | ||
a3dcea2c SP |
687 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
688 | static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = { | |
689 | { | |
690 | ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), | |
691 | }, | |
692 | { | |
693 | .midr_range.model = MIDR_QCOM_KRYO, | |
694 | .matches = is_kryo_midr, | |
695 | }, | |
696 | {}, | |
697 | }; | |
698 | #endif | |
699 | ||
c9460dcb SP |
700 | #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE |
701 | static const struct midr_range workaround_clean_cache[] = { | |
c0a01b84 AP |
702 | #if defined(CONFIG_ARM64_ERRATUM_826319) || \ |
703 | defined(CONFIG_ARM64_ERRATUM_827319) || \ | |
704 | defined(CONFIG_ARM64_ERRATUM_824069) | |
c9460dcb SP |
705 | /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */ |
706 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), | |
707 | #endif | |
708 | #ifdef CONFIG_ARM64_ERRATUM_819472 | |
709 | /* Cortex-A53 r0p[01] : ARM errata 819472 */ | |
710 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), | |
c0a01b84 | 711 | #endif |
c9460dcb SP |
712 | {}, |
713 | }; | |
714 | #endif | |
715 | ||
a5325089 MZ |
716 | #ifdef CONFIG_ARM64_ERRATUM_1418040 |
717 | /* | |
718 | * - 1188873 affects r0p0 to r2p0 | |
719 | * - 1418040 affects r0p0 to r3p1 | |
720 | */ | |
721 | static const struct midr_range erratum_1418040_list[] = { | |
722 | /* Cortex-A76 r0p0 to r3p1 */ | |
723 | MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), | |
724 | /* Neoverse-N1 r0p0 to r3p1 */ | |
725 | MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1), | |
a9e821b8 SPR |
726 | /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ |
727 | MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), | |
6989303a MZ |
728 | {}, |
729 | }; | |
730 | #endif | |
731 | ||
bfc97f9f DB |
732 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
733 | static const struct midr_range erratum_845719_list[] = { | |
734 | /* Cortex-A53 r0p[01234] */ | |
735 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), | |
736 | /* Brahma-B53 r0p[0] */ | |
737 | MIDR_REV(MIDR_BRAHMA_B53, 0, 0), | |
738 | {}, | |
739 | }; | |
740 | #endif | |
741 | ||
1cf45b8f FF |
742 | #ifdef CONFIG_ARM64_ERRATUM_843419 |
743 | static const struct arm64_cpu_capabilities erratum_843419_list[] = { | |
744 | { | |
745 | /* Cortex-A53 r0p[01234] */ | |
746 | .matches = is_affected_midr_range, | |
747 | ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), | |
748 | MIDR_FIXED(0x4, BIT(8)), | |
749 | }, | |
750 | { | |
751 | /* Brahma-B53 r0p[0] */ | |
752 | .matches = is_affected_midr_range, | |
753 | ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0), | |
754 | }, | |
755 | {}, | |
756 | }; | |
757 | #endif | |
758 | ||
02ab1f50 AS |
759 | #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT |
760 | static const struct midr_range erratum_speculative_at_list[] = { | |
e85d68fa SP |
761 | #ifdef CONFIG_ARM64_ERRATUM_1165522 |
762 | /* Cortex A76 r0p0 to r2p0 */ | |
763 | MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), | |
275fa0ea | 764 | #endif |
02ab1f50 AS |
765 | #ifdef CONFIG_ARM64_ERRATUM_1319367 |
766 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), | |
767 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), | |
768 | #endif | |
275fa0ea SP |
769 | #ifdef CONFIG_ARM64_ERRATUM_1530923 |
770 | /* Cortex A55 r0p0 to r2p0 */ | |
771 | MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0), | |
9b23d95c SPR |
772 | /* Kryo4xx Silver (rdpe => r1p0) */ |
773 | MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe), | |
e85d68fa SP |
774 | #endif |
775 | {}, | |
776 | }; | |
777 | #endif | |
778 | ||
a9e821b8 SPR |
779 | #ifdef CONFIG_ARM64_ERRATUM_1463225 |
780 | static const struct midr_range erratum_1463225[] = { | |
781 | /* Cortex-A76 r0p0 - r3p1 */ | |
782 | MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), | |
783 | /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ | |
784 | MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), | |
09c717c9 | 785 | {}, |
a9e821b8 SPR |
786 | }; |
787 | #endif | |
788 | ||
c9460dcb SP |
789 | const struct arm64_cpu_capabilities arm64_errata[] = { |
790 | #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE | |
c0a01b84 | 791 | { |
357dd8a2 | 792 | .desc = "ARM errata 826319, 827319, 824069, or 819472", |
c0a01b84 | 793 | .capability = ARM64_WORKAROUND_CLEAN_CACHE, |
c9460dcb | 794 | ERRATA_MIDR_RANGE_LIST(workaround_clean_cache), |
c0cda3b8 | 795 | .cpu_enable = cpu_enable_cache_maint_trap, |
c0a01b84 AP |
796 | }, |
797 | #endif | |
798 | #ifdef CONFIG_ARM64_ERRATUM_832075 | |
301bcfac | 799 | { |
5afaa1fc AP |
800 | /* Cortex-A57 r0p0 - r1p2 */ |
801 | .desc = "ARM erratum 832075", | |
802 | .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, | |
5e7951ce SP |
803 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, |
804 | 0, 0, | |
805 | 1, 2), | |
5afaa1fc | 806 | }, |
905e8c5d | 807 | #endif |
498cd5c3 MZ |
808 | #ifdef CONFIG_ARM64_ERRATUM_834220 |
809 | { | |
810 | /* Cortex-A57 r0p0 - r1p2 */ | |
811 | .desc = "ARM erratum 834220", | |
812 | .capability = ARM64_WORKAROUND_834220, | |
5e7951ce SP |
813 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, |
814 | 0, 0, | |
815 | 1, 2), | |
498cd5c3 MZ |
816 | }, |
817 | #endif | |
ca79acca AB |
818 | #ifdef CONFIG_ARM64_ERRATUM_843419 |
819 | { | |
ca79acca AB |
820 | .desc = "ARM erratum 843419", |
821 | .capability = ARM64_WORKAROUND_843419, | |
1cf45b8f FF |
822 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
823 | .matches = cpucap_multi_entry_cap_matches, | |
824 | .match_list = erratum_843419_list, | |
498cd5c3 MZ |
825 | }, |
826 | #endif | |
905e8c5d WD |
827 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
828 | { | |
905e8c5d WD |
829 | .desc = "ARM erratum 845719", |
830 | .capability = ARM64_WORKAROUND_845719, | |
bfc97f9f | 831 | ERRATA_MIDR_RANGE_LIST(erratum_845719_list), |
905e8c5d | 832 | }, |
6d4e11c5 RR |
833 | #endif |
834 | #ifdef CONFIG_CAVIUM_ERRATUM_23154 | |
835 | { | |
836 | /* Cavium ThunderX, pass 1.x */ | |
837 | .desc = "Cavium erratum 23154", | |
838 | .capability = ARM64_WORKAROUND_CAVIUM_23154, | |
5e7951ce | 839 | ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1), |
6d4e11c5 | 840 | }, |
104a0c02 AP |
841 | #endif |
842 | #ifdef CONFIG_CAVIUM_ERRATUM_27456 | |
843 | { | |
47c459be GK |
844 | .desc = "Cavium erratum 27456", |
845 | .capability = ARM64_WORKAROUND_CAVIUM_27456, | |
f58cdf7e | 846 | ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus), |
47c459be | 847 | }, |
690a3415 DD |
848 | #endif |
849 | #ifdef CONFIG_CAVIUM_ERRATUM_30115 | |
850 | { | |
690a3415 DD |
851 | .desc = "Cavium erratum 30115", |
852 | .capability = ARM64_WORKAROUND_CAVIUM_30115, | |
f58cdf7e | 853 | ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus), |
690a3415 | 854 | }, |
c0a01b84 | 855 | #endif |
116c81f4 | 856 | { |
880f7cc4 | 857 | .desc = "Mismatched cache type (CTR_EL0)", |
314d53d2 SP |
858 | .capability = ARM64_MISMATCHED_CACHE_TYPE, |
859 | .matches = has_mismatched_cache_type, | |
5b4747c5 | 860 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
c0cda3b8 | 861 | .cpu_enable = cpu_enable_trap_ctr_access, |
116c81f4 | 862 | }, |
38fd94b0 CC |
863 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
864 | { | |
a3dcea2c | 865 | .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003", |
bb487118 | 866 | .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, |
d4af3c4b | 867 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
1e013d06 | 868 | .matches = cpucap_multi_entry_cap_matches, |
a3dcea2c | 869 | .match_list = qcom_erratum_1003_list, |
bb487118 | 870 | }, |
38fd94b0 | 871 | #endif |
ce8c80c5 | 872 | #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI |
d9ff80f8 | 873 | { |
357dd8a2 | 874 | .desc = "Qualcomm erratum 1009, or ARM erratum 1286807", |
d9ff80f8 | 875 | .capability = ARM64_WORKAROUND_REPEAT_TLBI, |
36c602dc BA |
876 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
877 | .matches = cpucap_multi_entry_cap_matches, | |
878 | .match_list = arm64_repeat_tlbi_list, | |
d9ff80f8 | 879 | }, |
eeb1efbc MZ |
880 | #endif |
881 | #ifdef CONFIG_ARM64_ERRATUM_858921 | |
882 | { | |
883 | /* Cortex-A73 all versions */ | |
884 | .desc = "ARM erratum 858921", | |
885 | .capability = ARM64_WORKAROUND_858921, | |
5e7951ce | 886 | ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), |
eeb1efbc | 887 | }, |
aa6acde6 | 888 | #endif |
aa6acde6 WD |
889 | { |
890 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, | |
73f38166 MZ |
891 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
892 | .matches = check_branch_predictor, | |
f3d795d9 | 893 | }, |
a59a2edb | 894 | #ifdef CONFIG_RANDOMIZE_BASE |
4b472ffd | 895 | { |
8892b718 | 896 | .desc = "EL2 vector hardening", |
4b472ffd | 897 | .capability = ARM64_HARDEN_EL2_VECTORS, |
f75e2294 | 898 | ERRATA_MIDR_RANGE_LIST(ca57_a72), |
4b472ffd | 899 | }, |
a725e3dd | 900 | #endif |
a725e3dd MZ |
901 | { |
902 | .desc = "Speculative Store Bypass Disable", | |
903 | .capability = ARM64_SSBD, | |
904 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, | |
905 | .matches = has_ssbd_mitigation, | |
526e065d | 906 | .midr_range_list = arm64_ssb_cpus, |
a725e3dd | 907 | }, |
a5325089 | 908 | #ifdef CONFIG_ARM64_ERRATUM_1418040 |
95b861a4 | 909 | { |
a5325089 MZ |
910 | .desc = "ARM erratum 1418040", |
911 | .capability = ARM64_WORKAROUND_1418040, | |
912 | ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), | |
ed888cb0 MZ |
913 | /* |
914 | * We need to allow affected CPUs to come in late, but | |
915 | * also need the non-affected CPUs to be able to come | |
916 | * in at any point in time. Wonderful. | |
917 | */ | |
918 | .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, | |
95b861a4 | 919 | }, |
8b2cca9a | 920 | #endif |
02ab1f50 | 921 | #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT |
8b2cca9a | 922 | { |
c350717e | 923 | .desc = "ARM errata 1165522, 1319367, or 1530923", |
02ab1f50 AS |
924 | .capability = ARM64_WORKAROUND_SPECULATIVE_AT, |
925 | ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list), | |
8b2cca9a | 926 | }, |
969f5ea6 WD |
927 | #endif |
928 | #ifdef CONFIG_ARM64_ERRATUM_1463225 | |
929 | { | |
930 | .desc = "ARM erratum 1463225", | |
931 | .capability = ARM64_WORKAROUND_1463225, | |
932 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, | |
933 | .matches = has_cortex_a76_erratum_1463225, | |
a9e821b8 | 934 | .midr_range_list = erratum_1463225, |
969f5ea6 | 935 | }, |
93916beb MZ |
936 | #endif |
937 | #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219 | |
938 | { | |
939 | .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)", | |
940 | .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM, | |
941 | ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), | |
942 | .matches = needs_tx2_tvm_workaround, | |
943 | }, | |
9405447e MZ |
944 | { |
945 | .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)", | |
946 | .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM, | |
947 | ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), | |
948 | }, | |
6a036afb | 949 | #endif |
05460849 JM |
950 | #ifdef CONFIG_ARM64_ERRATUM_1542419 |
951 | { | |
952 | /* we depend on the firmware portion for correctness */ | |
953 | .desc = "ARM erratum 1542419 (kernel portion)", | |
954 | .capability = ARM64_WORKAROUND_1542419, | |
955 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, | |
956 | .matches = has_neoverse_n1_erratum_1542419, | |
957 | .cpu_enable = cpu_enable_trap_ctr_access, | |
958 | }, | |
d9ff80f8 | 959 | #endif |
5afaa1fc | 960 | { |
301bcfac | 961 | } |
e116a375 | 962 | }; |
3891ebcc MYK |
963 | |
964 | ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, | |
965 | char *buf) | |
966 | { | |
967 | return sprintf(buf, "Mitigation: __user pointer sanitization\n"); | |
968 | } | |
d2532e27 JL |
969 | |
970 | ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, | |
971 | char *buf) | |
972 | { | |
c118bbb5 AP |
973 | switch (get_spectre_v2_workaround_state()) { |
974 | case ARM64_BP_HARDEN_NOT_REQUIRED: | |
d2532e27 | 975 | return sprintf(buf, "Not affected\n"); |
c118bbb5 | 976 | case ARM64_BP_HARDEN_WA_NEEDED: |
d2532e27 | 977 | return sprintf(buf, "Mitigation: Branch predictor hardening\n"); |
c118bbb5 AP |
978 | case ARM64_BP_HARDEN_UNKNOWN: |
979 | default: | |
980 | return sprintf(buf, "Vulnerable\n"); | |
981 | } | |
d2532e27 | 982 | } |
526e065d JL |
983 | |
984 | ssize_t cpu_show_spec_store_bypass(struct device *dev, | |
985 | struct device_attribute *attr, char *buf) | |
986 | { | |
987 | if (__ssb_safe) | |
988 | return sprintf(buf, "Not affected\n"); | |
989 | ||
990 | switch (ssbd_state) { | |
991 | case ARM64_SSBD_KERNEL: | |
992 | case ARM64_SSBD_FORCE_ENABLE: | |
993 | if (IS_ENABLED(CONFIG_ARM64_SSBD)) | |
994 | return sprintf(buf, | |
995 | "Mitigation: Speculative Store Bypass disabled via prctl\n"); | |
996 | } | |
997 | ||
998 | return sprintf(buf, "Vulnerable\n"); | |
999 | } |