Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
e116a375 AP |
2 | /* |
3 | * Contains CPU specific errata definitions | |
4 | * | |
5 | * Copyright (C) 2014 ARM Ltd. | |
e116a375 AP |
6 | */ |
7 | ||
94a5d879 | 8 | #include <linux/arm-smccc.h> |
e116a375 | 9 | #include <linux/types.h> |
a111b7c0 | 10 | #include <linux/cpu.h> |
e116a375 AP |
11 | #include <asm/cpu.h> |
12 | #include <asm/cputype.h> | |
13 | #include <asm/cpufeature.h> | |
4db61fef | 14 | #include <asm/kvm_asm.h> |
93916beb | 15 | #include <asm/smp_plat.h> |
e116a375 | 16 | |
301bcfac | 17 | static bool __maybe_unused |
92406f0c | 18 | is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) |
301bcfac | 19 | { |
e8002e02 AB |
20 | const struct arm64_midr_revidr *fix; |
21 | u32 midr = read_cpuid_id(), revidr; | |
22 | ||
92406f0c | 23 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
1df31050 | 24 | if (!is_midr_in_range(midr, &entry->midr_range)) |
e8002e02 AB |
25 | return false; |
26 | ||
27 | midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; | |
28 | revidr = read_cpuid(REVIDR_EL1); | |
29 | for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++) | |
30 | if (midr == fix->midr_rv && (revidr & fix->revidr_mask)) | |
31 | return false; | |
32 | ||
33 | return true; | |
301bcfac AP |
34 | } |
35 | ||
be5b2998 SP |
36 | static bool __maybe_unused |
37 | is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry, | |
38 | int scope) | |
301bcfac | 39 | { |
92406f0c | 40 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
be5b2998 | 41 | return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list); |
301bcfac AP |
42 | } |
43 | ||
bb487118 SB |
44 | static bool __maybe_unused |
45 | is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) | |
46 | { | |
47 | u32 model; | |
48 | ||
49 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
50 | ||
51 | model = read_cpuid_id(); | |
52 | model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | | |
53 | MIDR_ARCHITECTURE_MASK; | |
54 | ||
1df31050 | 55 | return model == entry->midr_range.model; |
bb487118 SB |
56 | } |
57 | ||
116c81f4 | 58 | static bool |
314d53d2 SP |
59 | has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, |
60 | int scope) | |
116c81f4 | 61 | { |
1602df02 SP |
62 | u64 mask = arm64_ftr_reg_ctrel0.strict_mask; |
63 | u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask; | |
64 | u64 ctr_raw, ctr_real; | |
314d53d2 | 65 | |
116c81f4 | 66 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
1602df02 SP |
67 | |
68 | /* | |
69 | * We want to make sure that all the CPUs in the system expose | |
70 | * a consistent CTR_EL0 to make sure that applications behaves | |
71 | * correctly with migration. | |
72 | * | |
73 | * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 : | |
74 | * | |
75 | * 1) It is safe if the system doesn't support IDC, as CPU anyway | |
76 | * reports IDC = 0, consistent with the rest. | |
77 | * | |
78 | * 2) If the system has IDC, it is still safe as we trap CTR_EL0 | |
79 | * access on this CPU via the ARM64_HAS_CACHE_IDC capability. | |
80 | * | |
81 | * So, we need to make sure either the raw CTR_EL0 or the effective | |
82 | * CTR_EL0 matches the system's copy to allow a secondary CPU to boot. | |
83 | */ | |
84 | ctr_raw = read_cpuid_cachetype() & mask; | |
85 | ctr_real = read_cpuid_effective_cachetype() & mask; | |
86 | ||
87 | return (ctr_real != sys) && (ctr_raw != sys); | |
116c81f4 SP |
88 | } |
89 | ||
c0cda3b8 | 90 | static void |
05460849 | 91 | cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap) |
116c81f4 | 92 | { |
4afe8e79 | 93 | u64 mask = arm64_ftr_reg_ctrel0.strict_mask; |
05460849 | 94 | bool enable_uct_trap = false; |
4afe8e79 SP |
95 | |
96 | /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */ | |
97 | if ((read_cpuid_cachetype() & mask) != | |
98 | (arm64_ftr_reg_ctrel0.sys_val & mask)) | |
05460849 JM |
99 | enable_uct_trap = true; |
100 | ||
101 | /* ... or if the system is affected by an erratum */ | |
102 | if (cap->capability == ARM64_WORKAROUND_1542419) | |
103 | enable_uct_trap = true; | |
104 | ||
105 | if (enable_uct_trap) | |
4afe8e79 | 106 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); |
116c81f4 SP |
107 | } |
108 | ||
4205a89b MZ |
109 | atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); |
110 | ||
0f15adbb WD |
111 | #include <asm/mmu_context.h> |
112 | #include <asm/cacheflush.h> | |
113 | ||
114 | DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); | |
115 | ||
5359a87d | 116 | #ifdef CONFIG_RANDOMIZE_BASE |
0f15adbb WD |
117 | static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, |
118 | const char *hyp_vecs_end) | |
119 | { | |
6e52aab9 | 120 | void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K); |
0f15adbb WD |
121 | int i; |
122 | ||
123 | for (i = 0; i < SZ_2K; i += 0x80) | |
124 | memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); | |
125 | ||
3b8c9f1c | 126 | __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); |
0f15adbb WD |
127 | } |
128 | ||
b181048f | 129 | static void install_bp_hardening_cb(bp_hardening_cb_t fn) |
0f15adbb | 130 | { |
d8797b12 | 131 | static DEFINE_RAW_SPINLOCK(bp_lock); |
0f15adbb | 132 | int cpu, slot = -1; |
b181048f WD |
133 | const char *hyp_vecs_start = __smccc_workaround_1_smc; |
134 | const char *hyp_vecs_end = __smccc_workaround_1_smc + | |
135 | __SMCCC_WORKAROUND_1_SMC_SZ; | |
0f15adbb | 136 | |
4debef55 | 137 | /* |
7a292b6c TR |
138 | * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if |
139 | * we're a guest. Skip the hyp-vectors work. | |
4debef55 | 140 | */ |
b181048f | 141 | if (!is_hyp_mode_available()) { |
4debef55 JM |
142 | __this_cpu_write(bp_hardening_data.fn, fn); |
143 | return; | |
144 | } | |
145 | ||
d8797b12 | 146 | raw_spin_lock(&bp_lock); |
0f15adbb WD |
147 | for_each_possible_cpu(cpu) { |
148 | if (per_cpu(bp_hardening_data.fn, cpu) == fn) { | |
149 | slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); | |
150 | break; | |
151 | } | |
152 | } | |
153 | ||
154 | if (slot == -1) { | |
4205a89b MZ |
155 | slot = atomic_inc_return(&arm64_el2_vector_last_slot); |
156 | BUG_ON(slot >= BP_HARDEN_EL2_SLOTS); | |
0f15adbb WD |
157 | __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); |
158 | } | |
159 | ||
160 | __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); | |
161 | __this_cpu_write(bp_hardening_data.fn, fn); | |
d8797b12 | 162 | raw_spin_unlock(&bp_lock); |
0f15adbb WD |
163 | } |
164 | #else | |
b181048f | 165 | static void install_bp_hardening_cb(bp_hardening_cb_t fn) |
0f15adbb WD |
166 | { |
167 | __this_cpu_write(bp_hardening_data.fn, fn); | |
168 | } | |
5359a87d | 169 | #endif /* CONFIG_RANDOMIZE_BASE */ |
0f15adbb | 170 | |
b092201e | 171 | #include <linux/arm-smccc.h> |
aa6acde6 | 172 | |
9a25136a | 173 | static void __maybe_unused call_smc_arch_workaround_1(void) |
b092201e MZ |
174 | { |
175 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); | |
176 | } | |
177 | ||
178 | static void call_hvc_arch_workaround_1(void) | |
179 | { | |
180 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); | |
181 | } | |
182 | ||
4bc352ff SD |
183 | static void qcom_link_stack_sanitization(void) |
184 | { | |
185 | u64 tmp; | |
186 | ||
187 | asm volatile("mov %0, x30 \n" | |
188 | ".rept 16 \n" | |
189 | "bl . + 4 \n" | |
190 | ".endr \n" | |
191 | "mov x30, %0 \n" | |
192 | : "=&r" (tmp)); | |
193 | } | |
194 | ||
e5ce5e72 JL |
195 | static bool __nospectre_v2; |
196 | static int __init parse_nospectre_v2(char *str) | |
197 | { | |
198 | __nospectre_v2 = true; | |
199 | return 0; | |
200 | } | |
201 | early_param("nospectre_v2", parse_nospectre_v2); | |
202 | ||
73f38166 MZ |
203 | /* |
204 | * -1: No workaround | |
205 | * 0: No workaround required | |
206 | * 1: Workaround installed | |
207 | */ | |
208 | static int detect_harden_bp_fw(void) | |
b092201e MZ |
209 | { |
210 | bp_hardening_cb_t cb; | |
b092201e | 211 | struct arm_smccc_res res; |
4bc352ff | 212 | u32 midr = read_cpuid_id(); |
b092201e | 213 | |
ce4d5ca2 SP |
214 | arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
215 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); | |
216 | ||
217 | switch ((int)res.a0) { | |
218 | case 1: | |
219 | /* Firmware says we're just fine */ | |
220 | return 0; | |
221 | case 0: | |
222 | break; | |
223 | default: | |
73f38166 | 224 | return -1; |
ce4d5ca2 | 225 | } |
b092201e | 226 | |
c98bd299 MR |
227 | switch (arm_smccc_1_1_get_conduit()) { |
228 | case SMCCC_CONDUIT_HVC: | |
ce4d5ca2 | 229 | cb = call_hvc_arch_workaround_1; |
b092201e MZ |
230 | break; |
231 | ||
c98bd299 | 232 | case SMCCC_CONDUIT_SMC: |
ce4d5ca2 | 233 | cb = call_smc_arch_workaround_1; |
b11483ef | 234 | break; |
b092201e MZ |
235 | |
236 | default: | |
73f38166 | 237 | return -1; |
b092201e MZ |
238 | } |
239 | ||
4bc352ff SD |
240 | if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || |
241 | ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) | |
242 | cb = qcom_link_stack_sanitization; | |
243 | ||
b181048f | 244 | install_bp_hardening_cb(cb); |
73f38166 | 245 | return 1; |
aa6acde6 | 246 | } |
0f15adbb | 247 | |
5cf9ce6e MZ |
248 | DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); |
249 | ||
a43ae4df | 250 | int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; |
526e065d | 251 | static bool __ssb_safe = true; |
a43ae4df MZ |
252 | |
253 | static const struct ssbd_options { | |
254 | const char *str; | |
255 | int state; | |
256 | } ssbd_options[] = { | |
257 | { "force-on", ARM64_SSBD_FORCE_ENABLE, }, | |
258 | { "force-off", ARM64_SSBD_FORCE_DISABLE, }, | |
259 | { "kernel", ARM64_SSBD_KERNEL, }, | |
260 | }; | |
261 | ||
262 | static int __init ssbd_cfg(char *buf) | |
263 | { | |
264 | int i; | |
265 | ||
266 | if (!buf || !buf[0]) | |
267 | return -EINVAL; | |
268 | ||
269 | for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) { | |
270 | int len = strlen(ssbd_options[i].str); | |
271 | ||
272 | if (strncmp(buf, ssbd_options[i].str, len)) | |
273 | continue; | |
274 | ||
275 | ssbd_state = ssbd_options[i].state; | |
276 | return 0; | |
277 | } | |
278 | ||
279 | return -EINVAL; | |
280 | } | |
281 | early_param("ssbd", ssbd_cfg); | |
282 | ||
8e290624 MZ |
283 | void __init arm64_update_smccc_conduit(struct alt_instr *alt, |
284 | __le32 *origptr, __le32 *updptr, | |
285 | int nr_inst) | |
286 | { | |
287 | u32 insn; | |
288 | ||
289 | BUG_ON(nr_inst != 1); | |
290 | ||
c98bd299 MR |
291 | switch (arm_smccc_1_1_get_conduit()) { |
292 | case SMCCC_CONDUIT_HVC: | |
8e290624 MZ |
293 | insn = aarch64_insn_get_hvc_value(); |
294 | break; | |
c98bd299 | 295 | case SMCCC_CONDUIT_SMC: |
8e290624 MZ |
296 | insn = aarch64_insn_get_smc_value(); |
297 | break; | |
298 | default: | |
299 | return; | |
300 | } | |
301 | ||
302 | *updptr = cpu_to_le32(insn); | |
303 | } | |
a725e3dd | 304 | |
986372c4 MZ |
305 | void __init arm64_enable_wa2_handling(struct alt_instr *alt, |
306 | __le32 *origptr, __le32 *updptr, | |
307 | int nr_inst) | |
308 | { | |
309 | BUG_ON(nr_inst != 1); | |
310 | /* | |
311 | * Only allow mitigation on EL1 entry/exit and guest | |
312 | * ARCH_WORKAROUND_2 handling if the SSBD state allows it to | |
313 | * be flipped. | |
314 | */ | |
315 | if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL) | |
316 | *updptr = cpu_to_le32(aarch64_insn_gen_nop()); | |
317 | } | |
318 | ||
647d0519 | 319 | void arm64_set_ssbd_mitigation(bool state) |
a725e3dd | 320 | { |
ce4d5ca2 SP |
321 | int conduit; |
322 | ||
8f04e8e6 WD |
323 | if (this_cpu_has_cap(ARM64_SSBS)) { |
324 | if (state) | |
325 | asm volatile(SET_PSTATE_SSBS(0)); | |
326 | else | |
327 | asm volatile(SET_PSTATE_SSBS(1)); | |
328 | return; | |
329 | } | |
330 | ||
ce4d5ca2 SP |
331 | conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state, |
332 | NULL); | |
a725e3dd | 333 | |
ce4d5ca2 | 334 | WARN_ON_ONCE(conduit == SMCCC_CONDUIT_NONE); |
a725e3dd MZ |
335 | } |
336 | ||
337 | static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, | |
338 | int scope) | |
339 | { | |
340 | struct arm_smccc_res res; | |
a43ae4df MZ |
341 | bool required = true; |
342 | s32 val; | |
526e065d | 343 | bool this_cpu_safe = false; |
ce4d5ca2 | 344 | int conduit; |
a725e3dd MZ |
345 | |
346 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
347 | ||
a111b7c0 JP |
348 | if (cpu_mitigations_off()) |
349 | ssbd_state = ARM64_SSBD_FORCE_DISABLE; | |
350 | ||
eb337cdf WD |
351 | /* delay setting __ssb_safe until we get a firmware response */ |
352 | if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list)) | |
353 | this_cpu_safe = true; | |
354 | ||
8f04e8e6 | 355 | if (this_cpu_has_cap(ARM64_SSBS)) { |
eb337cdf WD |
356 | if (!this_cpu_safe) |
357 | __ssb_safe = false; | |
8f04e8e6 WD |
358 | required = false; |
359 | goto out_printmsg; | |
360 | } | |
361 | ||
ce4d5ca2 SP |
362 | conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
363 | ARM_SMCCC_ARCH_WORKAROUND_2, &res); | |
a725e3dd | 364 | |
ce4d5ca2 | 365 | if (conduit == SMCCC_CONDUIT_NONE) { |
a43ae4df | 366 | ssbd_state = ARM64_SSBD_UNKNOWN; |
526e065d JL |
367 | if (!this_cpu_safe) |
368 | __ssb_safe = false; | |
a43ae4df | 369 | return false; |
a725e3dd MZ |
370 | } |
371 | ||
a43ae4df MZ |
372 | val = (s32)res.a0; |
373 | ||
374 | switch (val) { | |
375 | case SMCCC_RET_NOT_SUPPORTED: | |
376 | ssbd_state = ARM64_SSBD_UNKNOWN; | |
526e065d JL |
377 | if (!this_cpu_safe) |
378 | __ssb_safe = false; | |
a43ae4df MZ |
379 | return false; |
380 | ||
526e065d | 381 | /* machines with mixed mitigation requirements must not return this */ |
a43ae4df MZ |
382 | case SMCCC_RET_NOT_REQUIRED: |
383 | pr_info_once("%s mitigation not required\n", entry->desc); | |
384 | ssbd_state = ARM64_SSBD_MITIGATED; | |
385 | return false; | |
386 | ||
387 | case SMCCC_RET_SUCCESS: | |
526e065d | 388 | __ssb_safe = false; |
a43ae4df MZ |
389 | required = true; |
390 | break; | |
391 | ||
392 | case 1: /* Mitigation not required on this CPU */ | |
393 | required = false; | |
394 | break; | |
395 | ||
396 | default: | |
397 | WARN_ON(1); | |
526e065d JL |
398 | if (!this_cpu_safe) |
399 | __ssb_safe = false; | |
a43ae4df MZ |
400 | return false; |
401 | } | |
402 | ||
403 | switch (ssbd_state) { | |
404 | case ARM64_SSBD_FORCE_DISABLE: | |
a43ae4df MZ |
405 | arm64_set_ssbd_mitigation(false); |
406 | required = false; | |
407 | break; | |
408 | ||
409 | case ARM64_SSBD_KERNEL: | |
410 | if (required) { | |
411 | __this_cpu_write(arm64_ssbd_callback_required, 1); | |
412 | arm64_set_ssbd_mitigation(true); | |
413 | } | |
414 | break; | |
415 | ||
416 | case ARM64_SSBD_FORCE_ENABLE: | |
a725e3dd | 417 | arm64_set_ssbd_mitigation(true); |
a43ae4df MZ |
418 | required = true; |
419 | break; | |
420 | ||
421 | default: | |
422 | WARN_ON(1); | |
423 | break; | |
a725e3dd MZ |
424 | } |
425 | ||
8f04e8e6 WD |
426 | out_printmsg: |
427 | switch (ssbd_state) { | |
428 | case ARM64_SSBD_FORCE_DISABLE: | |
429 | pr_info_once("%s disabled from command-line\n", entry->desc); | |
430 | break; | |
431 | ||
432 | case ARM64_SSBD_FORCE_ENABLE: | |
433 | pr_info_once("%s forced from command-line\n", entry->desc); | |
434 | break; | |
435 | } | |
436 | ||
a43ae4df | 437 | return required; |
a725e3dd | 438 | } |
8e290624 | 439 | |
39533e12 MZ |
440 | static void cpu_enable_ssbd_mitigation(const struct arm64_cpu_capabilities *cap) |
441 | { | |
442 | if (ssbd_state != ARM64_SSBD_FORCE_DISABLE) | |
443 | cap->matches(cap, SCOPE_LOCAL_CPU); | |
444 | } | |
445 | ||
526e065d JL |
446 | /* known invulnerable cores */ |
447 | static const struct midr_range arm64_ssb_cpus[] = { | |
448 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), | |
449 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), | |
450 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), | |
e059770c | 451 | MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), |
108447fd SPR |
452 | MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), |
453 | MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), | |
526e065d JL |
454 | {}, |
455 | }; | |
456 | ||
969f5ea6 WD |
457 | #ifdef CONFIG_ARM64_ERRATUM_1463225 |
458 | DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); | |
459 | ||
460 | static bool | |
461 | has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry, | |
462 | int scope) | |
463 | { | |
a9e821b8 | 464 | return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode(); |
969f5ea6 WD |
465 | } |
466 | #endif | |
467 | ||
b8925ee2 WD |
468 | static void __maybe_unused |
469 | cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) | |
470 | { | |
471 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); | |
472 | } | |
473 | ||
5e7951ce SP |
474 | #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ |
475 | .matches = is_affected_midr_range, \ | |
1df31050 | 476 | .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) |
5e7951ce SP |
477 | |
478 | #define CAP_MIDR_ALL_VERSIONS(model) \ | |
479 | .matches = is_affected_midr_range, \ | |
1df31050 | 480 | .midr_range = MIDR_ALL_VERSIONS(model) |
06f1494f | 481 | |
e8002e02 AB |
482 | #define MIDR_FIXED(rev, revidr_mask) \ |
483 | .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}} | |
484 | ||
5e7951ce SP |
485 | #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ |
486 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ | |
487 | CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) | |
488 | ||
be5b2998 SP |
489 | #define CAP_MIDR_RANGE_LIST(list) \ |
490 | .matches = is_affected_midr_range_list, \ | |
491 | .midr_range_list = list | |
492 | ||
5e7951ce SP |
493 | /* Errata affecting a range of revisions of given model variant */ |
494 | #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \ | |
495 | ERRATA_MIDR_RANGE(m, var, r_min, var, r_max) | |
496 | ||
497 | /* Errata affecting a single variant/revision of a model */ | |
498 | #define ERRATA_MIDR_REV(model, var, rev) \ | |
499 | ERRATA_MIDR_RANGE(model, var, rev, var, rev) | |
500 | ||
501 | /* Errata affecting all variants/revisions of a given a model */ | |
502 | #define ERRATA_MIDR_ALL_VERSIONS(model) \ | |
503 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ | |
504 | CAP_MIDR_ALL_VERSIONS(model) | |
505 | ||
be5b2998 SP |
506 | /* Errata affecting a list of midr ranges, with same work around */ |
507 | #define ERRATA_MIDR_RANGE_LIST(midr_list) \ | |
508 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ | |
509 | CAP_MIDR_RANGE_LIST(midr_list) | |
510 | ||
d2532e27 JL |
511 | /* Track overall mitigation state. We are only mitigated if all cores are ok */ |
512 | static bool __hardenbp_enab = true; | |
513 | static bool __spectrev2_safe = true; | |
514 | ||
c118bbb5 AP |
515 | int get_spectre_v2_workaround_state(void) |
516 | { | |
517 | if (__spectrev2_safe) | |
518 | return ARM64_BP_HARDEN_NOT_REQUIRED; | |
519 | ||
520 | if (!__hardenbp_enab) | |
521 | return ARM64_BP_HARDEN_UNKNOWN; | |
522 | ||
523 | return ARM64_BP_HARDEN_WA_NEEDED; | |
524 | } | |
525 | ||
be5b2998 | 526 | /* |
73f38166 | 527 | * List of CPUs that do not need any Spectre-v2 mitigation at all. |
be5b2998 | 528 | */ |
73f38166 MZ |
529 | static const struct midr_range spectre_v2_safe_list[] = { |
530 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), | |
531 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), | |
532 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), | |
e059770c | 533 | MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), |
aa638cfe | 534 | MIDR_ALL_VERSIONS(MIDR_HISI_TSV110), |
83b0c36b SPR |
535 | MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), |
536 | MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), | |
73f38166 | 537 | { /* sentinel */ } |
be5b2998 SP |
538 | }; |
539 | ||
d2532e27 JL |
540 | /* |
541 | * Track overall bp hardening for all heterogeneous cores in the machine. | |
542 | * We are only considered "safe" if all booted cores are known safe. | |
543 | */ | |
73f38166 MZ |
544 | static bool __maybe_unused |
545 | check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) | |
546 | { | |
547 | int need_wa; | |
548 | ||
549 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
550 | ||
551 | /* If the CPU has CSV2 set, we're safe */ | |
552 | if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1), | |
553 | ID_AA64PFR0_CSV2_SHIFT)) | |
554 | return false; | |
555 | ||
556 | /* Alternatively, we have a list of unaffected CPUs */ | |
557 | if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list)) | |
558 | return false; | |
559 | ||
560 | /* Fallback to firmware detection */ | |
561 | need_wa = detect_harden_bp_fw(); | |
562 | if (!need_wa) | |
563 | return false; | |
564 | ||
d2532e27 JL |
565 | __spectrev2_safe = false; |
566 | ||
73f38166 | 567 | /* forced off */ |
a111b7c0 | 568 | if (__nospectre_v2 || cpu_mitigations_off()) { |
73f38166 | 569 | pr_info_once("spectrev2 mitigation disabled by command line option\n"); |
d2532e27 | 570 | __hardenbp_enab = false; |
73f38166 MZ |
571 | return false; |
572 | } | |
573 | ||
d2532e27 | 574 | if (need_wa < 0) { |
73f38166 | 575 | pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n"); |
d2532e27 JL |
576 | __hardenbp_enab = false; |
577 | } | |
73f38166 MZ |
578 | |
579 | return (need_wa > 0); | |
580 | } | |
06f1494f | 581 | |
18fce561 MZ |
582 | static void |
583 | cpu_enable_branch_predictor_hardening(const struct arm64_cpu_capabilities *cap) | |
584 | { | |
585 | cap->matches(cap, SCOPE_LOCAL_CPU); | |
586 | } | |
587 | ||
93916beb MZ |
588 | static const __maybe_unused struct midr_range tx2_family_cpus[] = { |
589 | MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), | |
590 | MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), | |
591 | {}, | |
592 | }; | |
593 | ||
594 | static bool __maybe_unused | |
595 | needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry, | |
596 | int scope) | |
597 | { | |
598 | int i; | |
599 | ||
600 | if (!is_affected_midr_range_list(entry, scope) || | |
601 | !is_hyp_mode_available()) | |
602 | return false; | |
603 | ||
604 | for_each_possible_cpu(i) { | |
605 | if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0) | |
606 | return true; | |
607 | } | |
608 | ||
609 | return false; | |
610 | } | |
611 | ||
05460849 JM |
612 | static bool __maybe_unused |
613 | has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry, | |
614 | int scope) | |
615 | { | |
616 | u32 midr = read_cpuid_id(); | |
617 | bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT); | |
618 | const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1); | |
619 | ||
620 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
621 | return is_midr_in_range(midr, &range) && has_dic; | |
622 | } | |
8892b718 | 623 | |
a59a2edb | 624 | #ifdef CONFIG_RANDOMIZE_BASE |
8892b718 | 625 | |
f75e2294 | 626 | static const struct midr_range ca57_a72[] = { |
8892b718 MZ |
627 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), |
628 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), | |
629 | {}, | |
630 | }; | |
631 | ||
dc6ed61d MZ |
632 | #endif |
633 | ||
ce8c80c5 | 634 | #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI |
36c602dc | 635 | static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = { |
ce8c80c5 | 636 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 |
36c602dc BA |
637 | { |
638 | ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0) | |
639 | }, | |
640 | { | |
641 | .midr_range.model = MIDR_QCOM_KRYO, | |
642 | .matches = is_kryo_midr, | |
643 | }, | |
ce8c80c5 CM |
644 | #endif |
645 | #ifdef CONFIG_ARM64_ERRATUM_1286807 | |
36c602dc BA |
646 | { |
647 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), | |
648 | }, | |
ce8c80c5 CM |
649 | #endif |
650 | {}, | |
651 | }; | |
ce8c80c5 CM |
652 | #endif |
653 | ||
f58cdf7e | 654 | #ifdef CONFIG_CAVIUM_ERRATUM_27456 |
b89d82ef | 655 | const struct midr_range cavium_erratum_27456_cpus[] = { |
f58cdf7e SP |
656 | /* Cavium ThunderX, T88 pass 1.x - 2.1 */ |
657 | MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), | |
658 | /* Cavium ThunderX, T81 pass 1.0 */ | |
659 | MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), | |
660 | {}, | |
661 | }; | |
662 | #endif | |
663 | ||
664 | #ifdef CONFIG_CAVIUM_ERRATUM_30115 | |
665 | static const struct midr_range cavium_erratum_30115_cpus[] = { | |
666 | /* Cavium ThunderX, T88 pass 1.x - 2.2 */ | |
667 | MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2), | |
668 | /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ | |
669 | MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), | |
670 | /* Cavium ThunderX, T83 pass 1.0 */ | |
671 | MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), | |
672 | {}, | |
673 | }; | |
674 | #endif | |
675 | ||
a3dcea2c SP |
676 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
677 | static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = { | |
678 | { | |
679 | ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), | |
680 | }, | |
681 | { | |
682 | .midr_range.model = MIDR_QCOM_KRYO, | |
683 | .matches = is_kryo_midr, | |
684 | }, | |
685 | {}, | |
686 | }; | |
687 | #endif | |
688 | ||
c9460dcb SP |
689 | #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE |
690 | static const struct midr_range workaround_clean_cache[] = { | |
c0a01b84 AP |
691 | #if defined(CONFIG_ARM64_ERRATUM_826319) || \ |
692 | defined(CONFIG_ARM64_ERRATUM_827319) || \ | |
693 | defined(CONFIG_ARM64_ERRATUM_824069) | |
c9460dcb SP |
694 | /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */ |
695 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), | |
696 | #endif | |
697 | #ifdef CONFIG_ARM64_ERRATUM_819472 | |
698 | /* Cortex-A53 r0p[01] : ARM errata 819472 */ | |
699 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), | |
c0a01b84 | 700 | #endif |
c9460dcb SP |
701 | {}, |
702 | }; | |
703 | #endif | |
704 | ||
a5325089 MZ |
705 | #ifdef CONFIG_ARM64_ERRATUM_1418040 |
706 | /* | |
707 | * - 1188873 affects r0p0 to r2p0 | |
708 | * - 1418040 affects r0p0 to r3p1 | |
709 | */ | |
710 | static const struct midr_range erratum_1418040_list[] = { | |
711 | /* Cortex-A76 r0p0 to r3p1 */ | |
712 | MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), | |
713 | /* Neoverse-N1 r0p0 to r3p1 */ | |
714 | MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1), | |
a9e821b8 SPR |
715 | /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ |
716 | MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), | |
6989303a MZ |
717 | {}, |
718 | }; | |
719 | #endif | |
720 | ||
bfc97f9f DB |
721 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
722 | static const struct midr_range erratum_845719_list[] = { | |
723 | /* Cortex-A53 r0p[01234] */ | |
724 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), | |
725 | /* Brahma-B53 r0p[0] */ | |
726 | MIDR_REV(MIDR_BRAHMA_B53, 0, 0), | |
727 | {}, | |
728 | }; | |
729 | #endif | |
730 | ||
1cf45b8f FF |
731 | #ifdef CONFIG_ARM64_ERRATUM_843419 |
732 | static const struct arm64_cpu_capabilities erratum_843419_list[] = { | |
733 | { | |
734 | /* Cortex-A53 r0p[01234] */ | |
735 | .matches = is_affected_midr_range, | |
736 | ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), | |
737 | MIDR_FIXED(0x4, BIT(8)), | |
738 | }, | |
739 | { | |
740 | /* Brahma-B53 r0p[0] */ | |
741 | .matches = is_affected_midr_range, | |
742 | ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0), | |
743 | }, | |
744 | {}, | |
745 | }; | |
746 | #endif | |
747 | ||
02ab1f50 AS |
748 | #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT |
749 | static const struct midr_range erratum_speculative_at_list[] = { | |
e85d68fa SP |
750 | #ifdef CONFIG_ARM64_ERRATUM_1165522 |
751 | /* Cortex A76 r0p0 to r2p0 */ | |
752 | MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), | |
275fa0ea | 753 | #endif |
02ab1f50 AS |
754 | #ifdef CONFIG_ARM64_ERRATUM_1319367 |
755 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), | |
756 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), | |
757 | #endif | |
275fa0ea SP |
758 | #ifdef CONFIG_ARM64_ERRATUM_1530923 |
759 | /* Cortex A55 r0p0 to r2p0 */ | |
760 | MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0), | |
9b23d95c SPR |
761 | /* Kryo4xx Silver (rdpe => r1p0) */ |
762 | MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe), | |
e85d68fa SP |
763 | #endif |
764 | {}, | |
765 | }; | |
766 | #endif | |
767 | ||
a9e821b8 SPR |
768 | #ifdef CONFIG_ARM64_ERRATUM_1463225 |
769 | static const struct midr_range erratum_1463225[] = { | |
770 | /* Cortex-A76 r0p0 - r3p1 */ | |
771 | MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), | |
772 | /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ | |
773 | MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), | |
09c717c9 | 774 | {}, |
a9e821b8 SPR |
775 | }; |
776 | #endif | |
777 | ||
c9460dcb SP |
778 | const struct arm64_cpu_capabilities arm64_errata[] = { |
779 | #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE | |
c0a01b84 | 780 | { |
357dd8a2 | 781 | .desc = "ARM errata 826319, 827319, 824069, or 819472", |
c0a01b84 | 782 | .capability = ARM64_WORKAROUND_CLEAN_CACHE, |
c9460dcb | 783 | ERRATA_MIDR_RANGE_LIST(workaround_clean_cache), |
c0cda3b8 | 784 | .cpu_enable = cpu_enable_cache_maint_trap, |
c0a01b84 AP |
785 | }, |
786 | #endif | |
787 | #ifdef CONFIG_ARM64_ERRATUM_832075 | |
301bcfac | 788 | { |
5afaa1fc AP |
789 | /* Cortex-A57 r0p0 - r1p2 */ |
790 | .desc = "ARM erratum 832075", | |
791 | .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, | |
5e7951ce SP |
792 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, |
793 | 0, 0, | |
794 | 1, 2), | |
5afaa1fc | 795 | }, |
905e8c5d | 796 | #endif |
498cd5c3 MZ |
797 | #ifdef CONFIG_ARM64_ERRATUM_834220 |
798 | { | |
799 | /* Cortex-A57 r0p0 - r1p2 */ | |
800 | .desc = "ARM erratum 834220", | |
801 | .capability = ARM64_WORKAROUND_834220, | |
5e7951ce SP |
802 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, |
803 | 0, 0, | |
804 | 1, 2), | |
498cd5c3 MZ |
805 | }, |
806 | #endif | |
ca79acca AB |
807 | #ifdef CONFIG_ARM64_ERRATUM_843419 |
808 | { | |
ca79acca AB |
809 | .desc = "ARM erratum 843419", |
810 | .capability = ARM64_WORKAROUND_843419, | |
1cf45b8f FF |
811 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
812 | .matches = cpucap_multi_entry_cap_matches, | |
813 | .match_list = erratum_843419_list, | |
498cd5c3 MZ |
814 | }, |
815 | #endif | |
905e8c5d WD |
816 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
817 | { | |
905e8c5d WD |
818 | .desc = "ARM erratum 845719", |
819 | .capability = ARM64_WORKAROUND_845719, | |
bfc97f9f | 820 | ERRATA_MIDR_RANGE_LIST(erratum_845719_list), |
905e8c5d | 821 | }, |
6d4e11c5 RR |
822 | #endif |
823 | #ifdef CONFIG_CAVIUM_ERRATUM_23154 | |
824 | { | |
825 | /* Cavium ThunderX, pass 1.x */ | |
826 | .desc = "Cavium erratum 23154", | |
827 | .capability = ARM64_WORKAROUND_CAVIUM_23154, | |
5e7951ce | 828 | ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1), |
6d4e11c5 | 829 | }, |
104a0c02 AP |
830 | #endif |
831 | #ifdef CONFIG_CAVIUM_ERRATUM_27456 | |
832 | { | |
47c459be GK |
833 | .desc = "Cavium erratum 27456", |
834 | .capability = ARM64_WORKAROUND_CAVIUM_27456, | |
f58cdf7e | 835 | ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus), |
47c459be | 836 | }, |
690a3415 DD |
837 | #endif |
838 | #ifdef CONFIG_CAVIUM_ERRATUM_30115 | |
839 | { | |
690a3415 DD |
840 | .desc = "Cavium erratum 30115", |
841 | .capability = ARM64_WORKAROUND_CAVIUM_30115, | |
f58cdf7e | 842 | ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus), |
690a3415 | 843 | }, |
c0a01b84 | 844 | #endif |
116c81f4 | 845 | { |
880f7cc4 | 846 | .desc = "Mismatched cache type (CTR_EL0)", |
314d53d2 SP |
847 | .capability = ARM64_MISMATCHED_CACHE_TYPE, |
848 | .matches = has_mismatched_cache_type, | |
5b4747c5 | 849 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
c0cda3b8 | 850 | .cpu_enable = cpu_enable_trap_ctr_access, |
116c81f4 | 851 | }, |
38fd94b0 CC |
852 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
853 | { | |
a3dcea2c | 854 | .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003", |
bb487118 | 855 | .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, |
d4af3c4b | 856 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
1e013d06 | 857 | .matches = cpucap_multi_entry_cap_matches, |
a3dcea2c | 858 | .match_list = qcom_erratum_1003_list, |
bb487118 | 859 | }, |
38fd94b0 | 860 | #endif |
ce8c80c5 | 861 | #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI |
d9ff80f8 | 862 | { |
357dd8a2 | 863 | .desc = "Qualcomm erratum 1009, or ARM erratum 1286807", |
d9ff80f8 | 864 | .capability = ARM64_WORKAROUND_REPEAT_TLBI, |
36c602dc BA |
865 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
866 | .matches = cpucap_multi_entry_cap_matches, | |
867 | .match_list = arm64_repeat_tlbi_list, | |
d9ff80f8 | 868 | }, |
eeb1efbc MZ |
869 | #endif |
870 | #ifdef CONFIG_ARM64_ERRATUM_858921 | |
871 | { | |
872 | /* Cortex-A73 all versions */ | |
873 | .desc = "ARM erratum 858921", | |
874 | .capability = ARM64_WORKAROUND_858921, | |
5e7951ce | 875 | ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), |
eeb1efbc | 876 | }, |
aa6acde6 | 877 | #endif |
aa6acde6 | 878 | { |
18fce561 | 879 | .desc = "Branch predictor hardening", |
688f1e4b | 880 | .capability = ARM64_SPECTRE_V2, |
73f38166 MZ |
881 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
882 | .matches = check_branch_predictor, | |
18fce561 | 883 | .cpu_enable = cpu_enable_branch_predictor_hardening, |
f3d795d9 | 884 | }, |
a59a2edb | 885 | #ifdef CONFIG_RANDOMIZE_BASE |
4b472ffd | 886 | { |
8892b718 | 887 | .desc = "EL2 vector hardening", |
4b472ffd | 888 | .capability = ARM64_HARDEN_EL2_VECTORS, |
f75e2294 | 889 | ERRATA_MIDR_RANGE_LIST(ca57_a72), |
4b472ffd | 890 | }, |
a725e3dd | 891 | #endif |
a725e3dd MZ |
892 | { |
893 | .desc = "Speculative Store Bypass Disable", | |
894 | .capability = ARM64_SSBD, | |
895 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, | |
896 | .matches = has_ssbd_mitigation, | |
39533e12 | 897 | .cpu_enable = cpu_enable_ssbd_mitigation, |
526e065d | 898 | .midr_range_list = arm64_ssb_cpus, |
a725e3dd | 899 | }, |
a5325089 | 900 | #ifdef CONFIG_ARM64_ERRATUM_1418040 |
95b861a4 | 901 | { |
a5325089 MZ |
902 | .desc = "ARM erratum 1418040", |
903 | .capability = ARM64_WORKAROUND_1418040, | |
904 | ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), | |
bf87bb08 MZ |
905 | .type = (ARM64_CPUCAP_SCOPE_LOCAL_CPU | |
906 | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU), | |
95b861a4 | 907 | }, |
8b2cca9a | 908 | #endif |
02ab1f50 | 909 | #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT |
8b2cca9a | 910 | { |
c350717e | 911 | .desc = "ARM errata 1165522, 1319367, or 1530923", |
02ab1f50 AS |
912 | .capability = ARM64_WORKAROUND_SPECULATIVE_AT, |
913 | ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list), | |
8b2cca9a | 914 | }, |
969f5ea6 WD |
915 | #endif |
916 | #ifdef CONFIG_ARM64_ERRATUM_1463225 | |
917 | { | |
918 | .desc = "ARM erratum 1463225", | |
919 | .capability = ARM64_WORKAROUND_1463225, | |
920 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, | |
921 | .matches = has_cortex_a76_erratum_1463225, | |
a9e821b8 | 922 | .midr_range_list = erratum_1463225, |
969f5ea6 | 923 | }, |
93916beb MZ |
924 | #endif |
925 | #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219 | |
926 | { | |
927 | .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)", | |
928 | .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM, | |
929 | ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), | |
930 | .matches = needs_tx2_tvm_workaround, | |
931 | }, | |
9405447e MZ |
932 | { |
933 | .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)", | |
934 | .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM, | |
935 | ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), | |
936 | }, | |
6a036afb | 937 | #endif |
05460849 JM |
938 | #ifdef CONFIG_ARM64_ERRATUM_1542419 |
939 | { | |
940 | /* we depend on the firmware portion for correctness */ | |
941 | .desc = "ARM erratum 1542419 (kernel portion)", | |
942 | .capability = ARM64_WORKAROUND_1542419, | |
943 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, | |
944 | .matches = has_neoverse_n1_erratum_1542419, | |
945 | .cpu_enable = cpu_enable_trap_ctr_access, | |
946 | }, | |
d9ff80f8 | 947 | #endif |
5afaa1fc | 948 | { |
301bcfac | 949 | } |
e116a375 | 950 | }; |
3891ebcc | 951 | |
d2532e27 JL |
952 | ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, |
953 | char *buf) | |
954 | { | |
c118bbb5 AP |
955 | switch (get_spectre_v2_workaround_state()) { |
956 | case ARM64_BP_HARDEN_NOT_REQUIRED: | |
d2532e27 | 957 | return sprintf(buf, "Not affected\n"); |
c118bbb5 | 958 | case ARM64_BP_HARDEN_WA_NEEDED: |
d2532e27 | 959 | return sprintf(buf, "Mitigation: Branch predictor hardening\n"); |
c118bbb5 AP |
960 | case ARM64_BP_HARDEN_UNKNOWN: |
961 | default: | |
962 | return sprintf(buf, "Vulnerable\n"); | |
963 | } | |
d2532e27 | 964 | } |
526e065d JL |
965 | |
966 | ssize_t cpu_show_spec_store_bypass(struct device *dev, | |
967 | struct device_attribute *attr, char *buf) | |
968 | { | |
969 | if (__ssb_safe) | |
970 | return sprintf(buf, "Not affected\n"); | |
971 | ||
972 | switch (ssbd_state) { | |
973 | case ARM64_SSBD_KERNEL: | |
974 | case ARM64_SSBD_FORCE_ENABLE: | |
6e5f0927 | 975 | return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n"); |
526e065d JL |
976 | } |
977 | ||
978 | return sprintf(buf, "Vulnerable\n"); | |
979 | } |