Merge tag 'pci-v6.16-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci
[linux-block.git] / arch / arm64 / kernel / cpu_errata.c
CommitLineData
caab277b 1// SPDX-License-Identifier: GPL-2.0-only
e116a375
AP
2/*
3 * Contains CPU specific errata definitions
4 *
5 * Copyright (C) 2014 ARM Ltd.
e116a375
AP
6 */
7
94a5d879 8#include <linux/arm-smccc.h>
e116a375 9#include <linux/types.h>
a111b7c0 10#include <linux/cpu.h>
e116a375
AP
11#include <asm/cpu.h>
12#include <asm/cputype.h>
13#include <asm/cpufeature.h>
4db61fef 14#include <asm/kvm_asm.h>
93916beb 15#include <asm/smp_plat.h>
e116a375 16
86edf6bd
SK
17static u64 target_impl_cpu_num;
18static struct target_impl_cpu *target_impl_cpus;
19
20bool cpu_errata_set_target_impl(u64 num, void *impl_cpus)
21{
22 if (target_impl_cpu_num || !num || !impl_cpus)
23 return false;
24
25 target_impl_cpu_num = num;
26 target_impl_cpus = impl_cpus;
27 return true;
28}
29
c8c2647e
SK
30static inline bool is_midr_in_range(struct midr_range const *range)
31{
86edf6bd
SK
32 int i;
33
34 if (!target_impl_cpu_num)
35 return midr_is_cpu_model_range(read_cpuid_id(), range->model,
36 range->rv_min, range->rv_max);
37
38 for (i = 0; i < target_impl_cpu_num; i++) {
39 if (midr_is_cpu_model_range(target_impl_cpus[i].midr,
40 range->model,
41 range->rv_min, range->rv_max))
42 return true;
43 }
44 return false;
c8c2647e
SK
45}
46
47bool is_midr_in_range_list(struct midr_range const *ranges)
48{
49 while (ranges->model)
50 if (is_midr_in_range(ranges++))
51 return true;
52 return false;
53}
54EXPORT_SYMBOL_GPL(is_midr_in_range_list);
55
301bcfac 56static bool __maybe_unused
e3121298
SK
57__is_affected_midr_range(const struct arm64_cpu_capabilities *entry,
58 u32 midr, u32 revidr)
301bcfac 59{
e8002e02 60 const struct arm64_midr_revidr *fix;
e3121298 61 if (!is_midr_in_range(&entry->midr_range))
e8002e02
AB
62 return false;
63
64 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
e8002e02
AB
65 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
66 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
67 return false;
e8002e02 68 return true;
301bcfac
AP
69}
70
e3121298
SK
71static bool __maybe_unused
72is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
73{
86edf6bd
SK
74 int i;
75
76 if (!target_impl_cpu_num) {
77 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
78 return __is_affected_midr_range(entry, read_cpuid_id(),
79 read_cpuid(REVIDR_EL1));
80 }
81
82 for (i = 0; i < target_impl_cpu_num; i++) {
83 if (__is_affected_midr_range(entry, target_impl_cpus[i].midr,
84 target_impl_cpus[i].midr))
85 return true;
86 }
87 return false;
e3121298
SK
88}
89
be5b2998
SP
90static bool __maybe_unused
91is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
92 int scope)
301bcfac 93{
92406f0c 94 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
e3121298 95 return is_midr_in_range_list(entry->midr_range_list);
301bcfac
AP
96}
97
bb487118
SB
98static bool __maybe_unused
99is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
100{
101 u32 model;
102
103 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
104
105 model = read_cpuid_id();
106 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
107 MIDR_ARCHITECTURE_MASK;
108
1df31050 109 return model == entry->midr_range.model;
bb487118
SB
110}
111
116c81f4 112static bool
314d53d2
SP
113has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
114 int scope)
116c81f4 115{
1602df02
SP
116 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
117 u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
118 u64 ctr_raw, ctr_real;
314d53d2 119
116c81f4 120 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
1602df02
SP
121
122 /*
123 * We want to make sure that all the CPUs in the system expose
124 * a consistent CTR_EL0 to make sure that applications behaves
125 * correctly with migration.
126 *
127 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
128 *
129 * 1) It is safe if the system doesn't support IDC, as CPU anyway
130 * reports IDC = 0, consistent with the rest.
131 *
132 * 2) If the system has IDC, it is still safe as we trap CTR_EL0
133 * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
134 *
135 * So, we need to make sure either the raw CTR_EL0 or the effective
136 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
137 */
138 ctr_raw = read_cpuid_cachetype() & mask;
139 ctr_real = read_cpuid_effective_cachetype() & mask;
140
141 return (ctr_real != sys) && (ctr_raw != sys);
116c81f4
SP
142}
143
c0cda3b8 144static void
05460849 145cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
116c81f4 146{
4afe8e79 147 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
05460849 148 bool enable_uct_trap = false;
4afe8e79
SP
149
150 /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
151 if ((read_cpuid_cachetype() & mask) !=
152 (arm64_ftr_reg_ctrel0.sys_val & mask))
05460849
JM
153 enable_uct_trap = true;
154
155 /* ... or if the system is affected by an erratum */
156 if (cap->capability == ARM64_WORKAROUND_1542419)
157 enable_uct_trap = true;
158
159 if (enable_uct_trap)
4afe8e79 160 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
116c81f4
SP
161}
162
969f5ea6 163#ifdef CONFIG_ARM64_ERRATUM_1463225
969f5ea6
WD
164static bool
165has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
166 int scope)
167{
a9e821b8 168 return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
969f5ea6
WD
169}
170#endif
171
b8925ee2
WD
172static void __maybe_unused
173cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
174{
175 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
176}
177
5e7951ce
SP
178#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
179 .matches = is_affected_midr_range, \
1df31050 180 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
5e7951ce
SP
181
182#define CAP_MIDR_ALL_VERSIONS(model) \
183 .matches = is_affected_midr_range, \
1df31050 184 .midr_range = MIDR_ALL_VERSIONS(model)
06f1494f 185
e8002e02
AB
186#define MIDR_FIXED(rev, revidr_mask) \
187 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
188
5e7951ce
SP
189#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
190 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
191 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
192
be5b2998
SP
193#define CAP_MIDR_RANGE_LIST(list) \
194 .matches = is_affected_midr_range_list, \
195 .midr_range_list = list
196
5e7951ce
SP
197/* Errata affecting a range of revisions of given model variant */
198#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
199 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
200
201/* Errata affecting a single variant/revision of a model */
202#define ERRATA_MIDR_REV(model, var, rev) \
203 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
204
205/* Errata affecting all variants/revisions of a given a model */
206#define ERRATA_MIDR_ALL_VERSIONS(model) \
207 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
208 CAP_MIDR_ALL_VERSIONS(model)
209
be5b2998
SP
210/* Errata affecting a list of midr ranges, with same work around */
211#define ERRATA_MIDR_RANGE_LIST(midr_list) \
212 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
213 CAP_MIDR_RANGE_LIST(midr_list)
214
93916beb
MZ
215static const __maybe_unused struct midr_range tx2_family_cpus[] = {
216 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
217 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
218 {},
219};
220
221static bool __maybe_unused
222needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
223 int scope)
224{
225 int i;
226
227 if (!is_affected_midr_range_list(entry, scope) ||
228 !is_hyp_mode_available())
229 return false;
230
231 for_each_possible_cpu(i) {
232 if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
233 return true;
234 }
235
236 return false;
237}
238
05460849
JM
239static bool __maybe_unused
240has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
241 int scope)
242{
5b345e39 243 bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT);
05460849
JM
244 const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
245
246 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
e3121298 247 return is_midr_in_range(&range) && has_dic;
05460849 248}
8892b718 249
e1231aac
OU
250static const struct midr_range impdef_pmuv3_cpus[] = {
251 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
252 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
253 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_PRO),
254 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
255 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
256 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
257 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
258 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
259 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
260 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
261 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
262 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
263 {},
264};
265
266static bool has_impdef_pmuv3(const struct arm64_cpu_capabilities *entry, int scope)
267{
268 u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
269 unsigned int pmuver;
270
271 if (!is_kernel_in_hyp_mode())
272 return false;
273
274 pmuver = cpuid_feature_extract_unsigned_field(dfr0,
275 ID_AA64DFR0_EL1_PMUVer_SHIFT);
276 if (pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
277 return false;
278
1b1d1b17 279 return is_midr_in_range_list(impdef_pmuv3_cpus);
e1231aac
OU
280}
281
282static void cpu_enable_impdef_pmuv3_traps(const struct arm64_cpu_capabilities *__unused)
283{
284 sysreg_clear_set_s(SYS_HACR_EL2, 0, BIT(56));
285}
286
ce8c80c5 287#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
36c602dc 288static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
ce8c80c5 289#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
36c602dc
BA
290 {
291 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
292 },
293 {
294 .midr_range.model = MIDR_QCOM_KRYO,
295 .matches = is_kryo_midr,
296 },
ce8c80c5
CM
297#endif
298#ifdef CONFIG_ARM64_ERRATUM_1286807
36c602dc
BA
299 {
300 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
5e1e0874
ZY
301 },
302 {
51f559d6
S
303 /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
304 ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
36c602dc 305 },
39fdb65f 306#endif
171df580
JM
307#ifdef CONFIG_ARM64_ERRATUM_2441007
308 {
309 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
310 },
311#endif
39fdb65f
JM
312#ifdef CONFIG_ARM64_ERRATUM_2441009
313 {
314 /* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */
315 ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
316 },
ce8c80c5
CM
317#endif
318 {},
319};
ce8c80c5
CM
320#endif
321
24a147bc 322#ifdef CONFIG_CAVIUM_ERRATUM_23154
710c8d6c 323static const struct midr_range cavium_erratum_23154_cpus[] = {
24a147bc
LC
324 MIDR_ALL_VERSIONS(MIDR_THUNDERX),
325 MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX),
326 MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX),
327 MIDR_ALL_VERSIONS(MIDR_OCTX2_98XX),
328 MIDR_ALL_VERSIONS(MIDR_OCTX2_96XX),
329 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XX),
330 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXN),
331 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXMM),
332 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXO),
f90205b9 333 {},
24a147bc
LC
334};
335#endif
336
f58cdf7e 337#ifdef CONFIG_CAVIUM_ERRATUM_27456
117c3b21 338static const struct midr_range cavium_erratum_27456_cpus[] = {
f58cdf7e
SP
339 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
340 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
341 /* Cavium ThunderX, T81 pass 1.0 */
342 MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
343 {},
344};
345#endif
346
347#ifdef CONFIG_CAVIUM_ERRATUM_30115
348static const struct midr_range cavium_erratum_30115_cpus[] = {
349 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
350 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
351 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
352 MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
353 /* Cavium ThunderX, T83 pass 1.0 */
354 MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
355 {},
356};
357#endif
358
a3dcea2c
SP
359#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
360static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
361 {
362 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
363 },
364 {
365 .midr_range.model = MIDR_QCOM_KRYO,
366 .matches = is_kryo_midr,
367 },
368 {},
369};
370#endif
371
c9460dcb
SP
372#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
373static const struct midr_range workaround_clean_cache[] = {
c0a01b84
AP
374#if defined(CONFIG_ARM64_ERRATUM_826319) || \
375 defined(CONFIG_ARM64_ERRATUM_827319) || \
376 defined(CONFIG_ARM64_ERRATUM_824069)
c9460dcb
SP
377 /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
378 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
379#endif
380#ifdef CONFIG_ARM64_ERRATUM_819472
381 /* Cortex-A53 r0p[01] : ARM errata 819472 */
382 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
c0a01b84 383#endif
c9460dcb
SP
384 {},
385};
386#endif
387
a5325089
MZ
388#ifdef CONFIG_ARM64_ERRATUM_1418040
389/*
390 * - 1188873 affects r0p0 to r2p0
391 * - 1418040 affects r0p0 to r3p1
392 */
393static const struct midr_range erratum_1418040_list[] = {
394 /* Cortex-A76 r0p0 to r3p1 */
395 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
396 /* Neoverse-N1 r0p0 to r3p1 */
397 MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
a9e821b8
SPR
398 /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
399 MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
6989303a
MZ
400 {},
401};
402#endif
403
bfc97f9f
DB
404#ifdef CONFIG_ARM64_ERRATUM_845719
405static const struct midr_range erratum_845719_list[] = {
406 /* Cortex-A53 r0p[01234] */
407 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
408 /* Brahma-B53 r0p[0] */
409 MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
23c21641
KD
410 /* Kryo2XX Silver rAp4 */
411 MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
bfc97f9f
DB
412 {},
413};
414#endif
415
1cf45b8f
FF
416#ifdef CONFIG_ARM64_ERRATUM_843419
417static const struct arm64_cpu_capabilities erratum_843419_list[] = {
418 {
419 /* Cortex-A53 r0p[01234] */
420 .matches = is_affected_midr_range,
421 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
422 MIDR_FIXED(0x4, BIT(8)),
423 },
424 {
425 /* Brahma-B53 r0p[0] */
426 .matches = is_affected_midr_range,
427 ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
428 },
429 {},
430};
431#endif
432
02ab1f50
AS
433#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
434static const struct midr_range erratum_speculative_at_list[] = {
e85d68fa
SP
435#ifdef CONFIG_ARM64_ERRATUM_1165522
436 /* Cortex A76 r0p0 to r2p0 */
437 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
275fa0ea 438#endif
02ab1f50
AS
439#ifdef CONFIG_ARM64_ERRATUM_1319367
440 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
441 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
442#endif
275fa0ea
SP
443#ifdef CONFIG_ARM64_ERRATUM_1530923
444 /* Cortex A55 r0p0 to r2p0 */
445 MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
9b23d95c
SPR
446 /* Kryo4xx Silver (rdpe => r1p0) */
447 MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
e85d68fa
SP
448#endif
449 {},
450};
451#endif
452
a9e821b8
SPR
453#ifdef CONFIG_ARM64_ERRATUM_1463225
454static const struct midr_range erratum_1463225[] = {
455 /* Cortex-A76 r0p0 - r3p1 */
456 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
457 /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
458 MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
09c717c9 459 {},
a9e821b8
SPR
460};
461#endif
462
b9d216fc
SP
463#ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
464static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
465#ifdef CONFIG_ARM64_ERRATUM_2139208
466 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
fb091ff3 467 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
b9d216fc
SP
468#endif
469#ifdef CONFIG_ARM64_ERRATUM_2119858
470 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
eb30d838 471 MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
b9d216fc
SP
472#endif
473 {},
474};
475#endif /* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */
476
fa82d0b4
SP
477#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
478static const struct midr_range tsb_flush_fail_cpus[] = {
479#ifdef CONFIG_ARM64_ERRATUM_2067961
480 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
fb091ff3 481 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
fa82d0b4
SP
482#endif
483#ifdef CONFIG_ARM64_ERRATUM_2054223
484 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
485#endif
486 {},
487};
488#endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
489
8d81b2a3
SP
490#ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
491static struct midr_range trbe_write_out_of_range_cpus[] = {
492#ifdef CONFIG_ARM64_ERRATUM_2253138
493 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
fb091ff3 494 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
8d81b2a3
SP
495#endif
496#ifdef CONFIG_ARM64_ERRATUM_2224489
497 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
eb30d838 498 MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
8d81b2a3
SP
499#endif
500 {},
501};
502#endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
503
44b3834b
JM
504#ifdef CONFIG_ARM64_ERRATUM_1742098
505static struct midr_range broken_aarch32_aes[] = {
506 MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
507 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
508 {},
509};
510#endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
511
f827bcda
RH
512#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
513static const struct midr_range erratum_spec_unpriv_load_list[] = {
514#ifdef CONFIG_ARM64_ERRATUM_3117295
515 MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
516#endif
517#ifdef CONFIG_ARM64_ERRATUM_2966298
518 /* Cortex-A520 r0p0 to r0p1 */
519 MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
520#endif
521 {},
522};
523#endif
524
7187bb7d 525#ifdef CONFIG_ARM64_ERRATUM_3194386
ec768766 526static const struct midr_range erratum_spec_ssbs_list[] = {
adeec61a
MR
527 MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
528 MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
529 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
530 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
75b3c43e 531 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
081eb793 532 MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
75b3c43e 533 MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
adeec61a
MR
534 MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
535 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
536 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
75b3c43e
MR
537 MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
538 MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
7187bb7d 539 MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
75b3c43e 540 MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
3eddb108 541 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
adeec61a 542 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
75b3c43e 543 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
081eb793 544 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N3),
adeec61a 545 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
75b3c43e 546 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
adeec61a 547 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
7187bb7d
MR
548 {}
549};
550#endif
551
db0d8a84
SP
552#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
553static const struct midr_range erratum_ac03_cpu_38_list[] = {
554 MIDR_ALL_VERSIONS(MIDR_AMPERE1),
555 MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
556 {},
557};
558#endif
559
fed55f49
SP
560#ifdef CONFIG_AMPERE_ERRATUM_AC04_CPU_23
561static const struct midr_range erratum_ac04_cpu_23_list[] = {
562 MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
563 {},
564};
565#endif
566
c9460dcb
SP
567const struct arm64_cpu_capabilities arm64_errata[] = {
568#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
c0a01b84 569 {
357dd8a2 570 .desc = "ARM errata 826319, 827319, 824069, or 819472",
c0a01b84 571 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
c9460dcb 572 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
c0cda3b8 573 .cpu_enable = cpu_enable_cache_maint_trap,
c0a01b84
AP
574 },
575#endif
576#ifdef CONFIG_ARM64_ERRATUM_832075
301bcfac 577 {
5afaa1fc
AP
578 /* Cortex-A57 r0p0 - r1p2 */
579 .desc = "ARM erratum 832075",
580 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
5e7951ce
SP
581 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
582 0, 0,
583 1, 2),
5afaa1fc 584 },
905e8c5d 585#endif
498cd5c3
MZ
586#ifdef CONFIG_ARM64_ERRATUM_834220
587 {
588 /* Cortex-A57 r0p0 - r1p2 */
589 .desc = "ARM erratum 834220",
590 .capability = ARM64_WORKAROUND_834220,
5e7951ce
SP
591 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
592 0, 0,
593 1, 2),
498cd5c3
MZ
594 },
595#endif
ca79acca
AB
596#ifdef CONFIG_ARM64_ERRATUM_843419
597 {
ca79acca
AB
598 .desc = "ARM erratum 843419",
599 .capability = ARM64_WORKAROUND_843419,
1cf45b8f
FF
600 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
601 .matches = cpucap_multi_entry_cap_matches,
602 .match_list = erratum_843419_list,
498cd5c3
MZ
603 },
604#endif
905e8c5d
WD
605#ifdef CONFIG_ARM64_ERRATUM_845719
606 {
905e8c5d
WD
607 .desc = "ARM erratum 845719",
608 .capability = ARM64_WORKAROUND_845719,
bfc97f9f 609 ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
905e8c5d 610 },
6d4e11c5
RR
611#endif
612#ifdef CONFIG_CAVIUM_ERRATUM_23154
613 {
24a147bc 614 .desc = "Cavium errata 23154 and 38545",
6d4e11c5 615 .capability = ARM64_WORKAROUND_CAVIUM_23154,
24a147bc
LC
616 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
617 ERRATA_MIDR_RANGE_LIST(cavium_erratum_23154_cpus),
6d4e11c5 618 },
104a0c02
AP
619#endif
620#ifdef CONFIG_CAVIUM_ERRATUM_27456
621 {
47c459be
GK
622 .desc = "Cavium erratum 27456",
623 .capability = ARM64_WORKAROUND_CAVIUM_27456,
f58cdf7e 624 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
47c459be 625 },
690a3415
DD
626#endif
627#ifdef CONFIG_CAVIUM_ERRATUM_30115
628 {
690a3415
DD
629 .desc = "Cavium erratum 30115",
630 .capability = ARM64_WORKAROUND_CAVIUM_30115,
f58cdf7e 631 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
690a3415 632 },
c0a01b84 633#endif
116c81f4 634 {
880f7cc4 635 .desc = "Mismatched cache type (CTR_EL0)",
314d53d2
SP
636 .capability = ARM64_MISMATCHED_CACHE_TYPE,
637 .matches = has_mismatched_cache_type,
5b4747c5 638 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
c0cda3b8 639 .cpu_enable = cpu_enable_trap_ctr_access,
116c81f4 640 },
38fd94b0
CC
641#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
642 {
a3dcea2c 643 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
bb487118 644 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
d4af3c4b 645 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
1e013d06 646 .matches = cpucap_multi_entry_cap_matches,
a3dcea2c 647 .match_list = qcom_erratum_1003_list,
bb487118 648 },
38fd94b0 649#endif
ce8c80c5 650#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
d9ff80f8 651 {
39fdb65f 652 .desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009",
d9ff80f8 653 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
36c602dc
BA
654 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
655 .matches = cpucap_multi_entry_cap_matches,
656 .match_list = arm64_repeat_tlbi_list,
d9ff80f8 657 },
eeb1efbc
MZ
658#endif
659#ifdef CONFIG_ARM64_ERRATUM_858921
660 {
661 /* Cortex-A73 all versions */
662 .desc = "ARM erratum 858921",
663 .capability = ARM64_WORKAROUND_858921,
5e7951ce 664 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
eeb1efbc 665 },
aa6acde6 666#endif
aa6acde6 667 {
d4647f0a 668 .desc = "Spectre-v2",
688f1e4b 669 .capability = ARM64_SPECTRE_V2,
73f38166 670 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
d4647f0a
WD
671 .matches = has_spectre_v2,
672 .cpu_enable = spectre_v2_enable_mitigation,
f3d795d9 673 },
a59a2edb 674#ifdef CONFIG_RANDOMIZE_BASE
4b472ffd 675 {
b881cdce 676 /* Must come after the Spectre-v2 entry */
c4792b6d
WD
677 .desc = "Spectre-v3a",
678 .capability = ARM64_SPECTRE_V3A,
cd1f56b9
WD
679 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
680 .matches = has_spectre_v3a,
c4792b6d 681 .cpu_enable = spectre_v3a_enable_mitigation,
4b472ffd 682 },
a725e3dd 683#endif
a725e3dd 684 {
c2876207 685 .desc = "Spectre-v4",
9b0955ba 686 .capability = ARM64_SPECTRE_V4,
a725e3dd 687 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
c2876207
WD
688 .matches = has_spectre_v4,
689 .cpu_enable = spectre_v4_enable_mitigation,
a725e3dd 690 },
558c303c
JM
691 {
692 .desc = "Spectre-BHB",
693 .capability = ARM64_SPECTRE_BHB,
694 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
695 .matches = is_spectre_bhb_affected,
696 .cpu_enable = spectre_bhb_enable_mitigation,
697 },
a5325089 698#ifdef CONFIG_ARM64_ERRATUM_1418040
95b861a4 699 {
a5325089
MZ
700 .desc = "ARM erratum 1418040",
701 .capability = ARM64_WORKAROUND_1418040,
702 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
ed888cb0
MZ
703 /*
704 * We need to allow affected CPUs to come in late, but
705 * also need the non-affected CPUs to be able to come
706 * in at any point in time. Wonderful.
707 */
708 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
95b861a4 709 },
8b2cca9a 710#endif
02ab1f50 711#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
8b2cca9a 712 {
c350717e 713 .desc = "ARM errata 1165522, 1319367, or 1530923",
02ab1f50
AS
714 .capability = ARM64_WORKAROUND_SPECULATIVE_AT,
715 ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
8b2cca9a 716 },
969f5ea6
WD
717#endif
718#ifdef CONFIG_ARM64_ERRATUM_1463225
719 {
720 .desc = "ARM erratum 1463225",
721 .capability = ARM64_WORKAROUND_1463225,
722 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
723 .matches = has_cortex_a76_erratum_1463225,
a9e821b8 724 .midr_range_list = erratum_1463225,
969f5ea6 725 },
93916beb
MZ
726#endif
727#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
728 {
729 .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
730 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
731 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
732 .matches = needs_tx2_tvm_workaround,
733 },
9405447e
MZ
734 {
735 .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
736 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
737 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
738 },
6a036afb 739#endif
05460849
JM
740#ifdef CONFIG_ARM64_ERRATUM_1542419
741 {
742 /* we depend on the firmware portion for correctness */
743 .desc = "ARM erratum 1542419 (kernel portion)",
744 .capability = ARM64_WORKAROUND_1542419,
745 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
746 .matches = has_neoverse_n1_erratum_1542419,
747 .cpu_enable = cpu_enable_trap_ctr_access,
748 },
96d389ca
RH
749#endif
750#ifdef CONFIG_ARM64_ERRATUM_1508412
751 {
752 /* we depend on the firmware portion for correctness */
753 .desc = "ARM erratum 1508412 (kernel portion)",
754 .capability = ARM64_WORKAROUND_1508412,
755 ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
756 0, 0,
757 1, 0),
758 },
20109a85
RW
759#endif
760#ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
761 {
762 /* NVIDIA Carmel */
763 .desc = "NVIDIA Carmel CNP erratum",
764 .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
765 ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
766 },
b9d216fc
SP
767#endif
768#ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
769 {
770 /*
771 * The erratum work around is handled within the TRBE
772 * driver and can be applied per-cpu. So, we can allow
773 * a late CPU to come online with this erratum.
774 */
775 .desc = "ARM erratum 2119858 or 2139208",
776 .capability = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
777 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
778 CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus),
779 },
fa82d0b4
SP
780#endif
781#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
782 {
783 .desc = "ARM erratum 2067961 or 2054223",
784 .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE,
785 ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus),
786 },
8d81b2a3
SP
787#endif
788#ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
789 {
790 .desc = "ARM erratum 2253138 or 2224489",
791 .capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
792 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
793 CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
794 },
607a9afa 795#endif
5db568e7
AK
796#ifdef CONFIG_ARM64_ERRATUM_2645198
797 {
798 .desc = "ARM erratum 2645198",
799 .capability = ARM64_WORKAROUND_2645198,
800 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715)
801 },
802#endif
1dd498e5
JM
803#ifdef CONFIG_ARM64_ERRATUM_2077057
804 {
805 .desc = "ARM erratum 2077057",
806 .capability = ARM64_WORKAROUND_2077057,
1dd498e5
JM
807 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
808 },
809#endif
607a9afa
AK
810#ifdef CONFIG_ARM64_ERRATUM_2064142
811 {
812 .desc = "ARM erratum 2064142",
813 .capability = ARM64_WORKAROUND_2064142,
814
815 /* Cortex-A510 r0p0 - r0p2 */
816 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
817 },
d9ff80f8 818#endif
e89d120c
IV
819#ifdef CONFIG_ARM64_ERRATUM_2457168
820 {
821 .desc = "ARM erratum 2457168",
822 .capability = ARM64_WORKAROUND_2457168,
823 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
824
825 /* Cortex-A510 r0p0-r1p1 */
826 CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
827 },
828#endif
3bd94a87
AK
829#ifdef CONFIG_ARM64_ERRATUM_2038923
830 {
831 .desc = "ARM erratum 2038923",
832 .capability = ARM64_WORKAROUND_2038923,
833
834 /* Cortex-A510 r0p0 - r0p2 */
835 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
836 },
708e8af4
AK
837#endif
838#ifdef CONFIG_ARM64_ERRATUM_1902691
839 {
840 .desc = "ARM erratum 1902691",
841 .capability = ARM64_WORKAROUND_1902691,
842
843 /* Cortex-A510 r0p0 - r0p1 */
844 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1)
845 },
44b3834b
JM
846#endif
847#ifdef CONFIG_ARM64_ERRATUM_1742098
848 {
849 .desc = "ARM erratum 1742098",
850 .capability = ARM64_WORKAROUND_1742098,
851 CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
852 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
853 },
1bdb0fbb
JM
854#endif
855#ifdef CONFIG_ARM64_ERRATUM_2658417
856 {
857 .desc = "ARM erratum 2658417",
858 .capability = ARM64_WORKAROUND_2658417,
859 /* Cortex-A510 r0p0 - r1p1 */
860 ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
861 MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
1bdb0fbb 862 },
6df696cd 863#endif
ec768766 864#ifdef CONFIG_ARM64_ERRATUM_3194386
7187bb7d 865 {
75b3c43e 866 .desc = "SSBS not fully self-synchronizing",
7187bb7d
MR
867 .capability = ARM64_WORKAROUND_SPECULATIVE_SSBS,
868 ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list),
869 },
870#endif
546b7cde 871#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
471470bc 872 {
f827bcda 873 .desc = "ARM errata 2966298, 3117295",
546b7cde 874 .capability = ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD,
471470bc 875 /* Cortex-A520 r0p0 - r0p1 */
f827bcda 876 ERRATA_MIDR_RANGE_LIST(erratum_spec_unpriv_load_list),
471470bc
RH
877 },
878#endif
6df696cd
OU
879#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
880 {
881 .desc = "AmpereOne erratum AC03_CPU_38",
882 .capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
db0d8a84 883 ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list),
6df696cd 884 },
fed55f49
SP
885#endif
886#ifdef CONFIG_AMPERE_ERRATUM_AC04_CPU_23
887 {
888 .desc = "AmpereOne erratum AC04_CPU_23",
889 .capability = ARM64_WORKAROUND_AMPERE_AC04_CPU_23,
890 ERRATA_MIDR_RANGE_LIST(erratum_ac04_cpu_23_list),
891 },
d9ff80f8 892#endif
0bc9a9e8
MZ
893 {
894 .desc = "Broken CNTVOFF_EL2",
895 .capability = ARM64_WORKAROUND_QCOM_ORYON_CNTVOFF,
896 ERRATA_MIDR_RANGE_LIST(((const struct midr_range[]) {
897 MIDR_ALL_VERSIONS(MIDR_QCOM_ORYON_X1),
898 {}
899 })),
900 },
e1231aac
OU
901 {
902 .desc = "Apple IMPDEF PMUv3 Traps",
903 .capability = ARM64_WORKAROUND_PMUV3_IMPDEF_TRAPS,
904 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
905 .matches = has_impdef_pmuv3,
906 .cpu_enable = cpu_enable_impdef_pmuv3_traps,
907 },
5afaa1fc 908 {
301bcfac 909 }
e116a375 910};