59d723c9ab8f5a14b0cfda6faee574b6d7c2cac2
[linux-block.git] / arch / arm64 / kernel / cpu_errata.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Contains CPU specific errata definitions
4  *
5  * Copyright (C) 2014 ARM Ltd.
6  */
7
8 #include <linux/arm-smccc.h>
9 #include <linux/types.h>
10 #include <linux/cpu.h>
11 #include <asm/cpu.h>
12 #include <asm/cputype.h>
13 #include <asm/cpufeature.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/smp_plat.h>
16
17 static u64 target_impl_cpu_num;
18 static struct target_impl_cpu *target_impl_cpus;
19
20 bool cpu_errata_set_target_impl(u64 num, void *impl_cpus)
21 {
22         if (target_impl_cpu_num || !num || !impl_cpus)
23                 return false;
24
25         target_impl_cpu_num = num;
26         target_impl_cpus = impl_cpus;
27         return true;
28 }
29
30 static inline bool is_midr_in_range(struct midr_range const *range)
31 {
32         int i;
33
34         if (!target_impl_cpu_num)
35                 return midr_is_cpu_model_range(read_cpuid_id(), range->model,
36                                                range->rv_min, range->rv_max);
37
38         for (i = 0; i < target_impl_cpu_num; i++) {
39                 if (midr_is_cpu_model_range(target_impl_cpus[i].midr,
40                                             range->model,
41                                             range->rv_min, range->rv_max))
42                         return true;
43         }
44         return false;
45 }
46
47 bool is_midr_in_range_list(struct midr_range const *ranges)
48 {
49         while (ranges->model)
50                 if (is_midr_in_range(ranges++))
51                         return true;
52         return false;
53 }
54 EXPORT_SYMBOL_GPL(is_midr_in_range_list);
55
56 static bool __maybe_unused
57 __is_affected_midr_range(const struct arm64_cpu_capabilities *entry,
58                          u32 midr, u32 revidr)
59 {
60         const struct arm64_midr_revidr *fix;
61         if (!is_midr_in_range(&entry->midr_range))
62                 return false;
63
64         midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
65         for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
66                 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
67                         return false;
68         return true;
69 }
70
71 static bool __maybe_unused
72 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
73 {
74         int i;
75
76         if (!target_impl_cpu_num) {
77                 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
78                 return __is_affected_midr_range(entry, read_cpuid_id(),
79                                                 read_cpuid(REVIDR_EL1));
80         }
81
82         for (i = 0; i < target_impl_cpu_num; i++) {
83                 if (__is_affected_midr_range(entry, target_impl_cpus[i].midr,
84                                              target_impl_cpus[i].midr))
85                         return true;
86         }
87         return false;
88 }
89
90 static bool __maybe_unused
91 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
92                             int scope)
93 {
94         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
95         return is_midr_in_range_list(entry->midr_range_list);
96 }
97
98 static bool __maybe_unused
99 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
100 {
101         u32 model;
102
103         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
104
105         model = read_cpuid_id();
106         model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
107                  MIDR_ARCHITECTURE_MASK;
108
109         return model == entry->midr_range.model;
110 }
111
112 static bool
113 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
114                           int scope)
115 {
116         u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
117         u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
118         u64 ctr_raw, ctr_real;
119
120         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
121
122         /*
123          * We want to make sure that all the CPUs in the system expose
124          * a consistent CTR_EL0 to make sure that applications behaves
125          * correctly with migration.
126          *
127          * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
128          *
129          * 1) It is safe if the system doesn't support IDC, as CPU anyway
130          *    reports IDC = 0, consistent with the rest.
131          *
132          * 2) If the system has IDC, it is still safe as we trap CTR_EL0
133          *    access on this CPU via the ARM64_HAS_CACHE_IDC capability.
134          *
135          * So, we need to make sure either the raw CTR_EL0 or the effective
136          * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
137          */
138         ctr_raw = read_cpuid_cachetype() & mask;
139         ctr_real = read_cpuid_effective_cachetype() & mask;
140
141         return (ctr_real != sys) && (ctr_raw != sys);
142 }
143
144 static void
145 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
146 {
147         u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
148         bool enable_uct_trap = false;
149
150         /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
151         if ((read_cpuid_cachetype() & mask) !=
152             (arm64_ftr_reg_ctrel0.sys_val & mask))
153                 enable_uct_trap = true;
154
155         /* ... or if the system is affected by an erratum */
156         if (cap->capability == ARM64_WORKAROUND_1542419)
157                 enable_uct_trap = true;
158
159         if (enable_uct_trap)
160                 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
161 }
162
163 #ifdef CONFIG_ARM64_ERRATUM_1463225
164 static bool
165 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
166                                int scope)
167 {
168         return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
169 }
170 #endif
171
172 static void __maybe_unused
173 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
174 {
175         sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
176 }
177
178 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)       \
179         .matches = is_affected_midr_range,                      \
180         .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
181
182 #define CAP_MIDR_ALL_VERSIONS(model)                                    \
183         .matches = is_affected_midr_range,                              \
184         .midr_range = MIDR_ALL_VERSIONS(model)
185
186 #define MIDR_FIXED(rev, revidr_mask) \
187         .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
188
189 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)            \
190         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                         \
191         CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
192
193 #define CAP_MIDR_RANGE_LIST(list)                               \
194         .matches = is_affected_midr_range_list,                 \
195         .midr_range_list = list
196
197 /* Errata affecting a range of revisions of  given model variant */
198 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)      \
199         ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
200
201 /* Errata affecting a single variant/revision of a model */
202 #define ERRATA_MIDR_REV(model, var, rev)        \
203         ERRATA_MIDR_RANGE(model, var, rev, var, rev)
204
205 /* Errata affecting all variants/revisions of a given a model */
206 #define ERRATA_MIDR_ALL_VERSIONS(model)                         \
207         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
208         CAP_MIDR_ALL_VERSIONS(model)
209
210 /* Errata affecting a list of midr ranges, with same work around */
211 #define ERRATA_MIDR_RANGE_LIST(midr_list)                       \
212         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
213         CAP_MIDR_RANGE_LIST(midr_list)
214
215 static const __maybe_unused struct midr_range tx2_family_cpus[] = {
216         MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
217         MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
218         {},
219 };
220
221 static bool __maybe_unused
222 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
223                          int scope)
224 {
225         int i;
226
227         if (!is_affected_midr_range_list(entry, scope) ||
228             !is_hyp_mode_available())
229                 return false;
230
231         for_each_possible_cpu(i) {
232                 if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
233                         return true;
234         }
235
236         return false;
237 }
238
239 static bool __maybe_unused
240 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
241                                 int scope)
242 {
243         bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT);
244         const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
245
246         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
247         return is_midr_in_range(&range) && has_dic;
248 }
249
250 static const struct midr_range impdef_pmuv3_cpus[] = {
251         MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
252         MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
253         MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_PRO),
254         MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
255         MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
256         MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
257         MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
258         MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
259         MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
260         MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
261         MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
262         MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
263         {},
264 };
265
266 static bool has_impdef_pmuv3(const struct arm64_cpu_capabilities *entry, int scope)
267 {
268         u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
269         unsigned int pmuver;
270
271         if (!is_kernel_in_hyp_mode())
272                 return false;
273
274         pmuver = cpuid_feature_extract_unsigned_field(dfr0,
275                                                       ID_AA64DFR0_EL1_PMUVer_SHIFT);
276         if (pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
277                 return false;
278
279         return is_midr_in_range_list(impdef_pmuv3_cpus);
280 }
281
282 static void cpu_enable_impdef_pmuv3_traps(const struct arm64_cpu_capabilities *__unused)
283 {
284         sysreg_clear_set_s(SYS_HACR_EL2, 0, BIT(56));
285 }
286
287 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
288 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
289 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
290         {
291                 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
292         },
293         {
294                 .midr_range.model = MIDR_QCOM_KRYO,
295                 .matches = is_kryo_midr,
296         },
297 #endif
298 #ifdef CONFIG_ARM64_ERRATUM_1286807
299         {
300                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
301         },
302         {
303                 /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
304                 ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
305         },
306 #endif
307 #ifdef CONFIG_ARM64_ERRATUM_2441007
308         {
309                 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
310         },
311 #endif
312 #ifdef CONFIG_ARM64_ERRATUM_2441009
313         {
314                 /* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */
315                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
316         },
317 #endif
318         {},
319 };
320 #endif
321
322 #ifdef CONFIG_CAVIUM_ERRATUM_23154
323 static const struct midr_range cavium_erratum_23154_cpus[] = {
324         MIDR_ALL_VERSIONS(MIDR_THUNDERX),
325         MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX),
326         MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX),
327         MIDR_ALL_VERSIONS(MIDR_OCTX2_98XX),
328         MIDR_ALL_VERSIONS(MIDR_OCTX2_96XX),
329         MIDR_ALL_VERSIONS(MIDR_OCTX2_95XX),
330         MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXN),
331         MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXMM),
332         MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXO),
333         {},
334 };
335 #endif
336
337 #ifdef CONFIG_CAVIUM_ERRATUM_27456
338 static const struct midr_range cavium_erratum_27456_cpus[] = {
339         /* Cavium ThunderX, T88 pass 1.x - 2.1 */
340         MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
341         /* Cavium ThunderX, T81 pass 1.0 */
342         MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
343         {},
344 };
345 #endif
346
347 #ifdef CONFIG_CAVIUM_ERRATUM_30115
348 static const struct midr_range cavium_erratum_30115_cpus[] = {
349         /* Cavium ThunderX, T88 pass 1.x - 2.2 */
350         MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
351         /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
352         MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
353         /* Cavium ThunderX, T83 pass 1.0 */
354         MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
355         {},
356 };
357 #endif
358
359 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
360 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
361         {
362                 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
363         },
364         {
365                 .midr_range.model = MIDR_QCOM_KRYO,
366                 .matches = is_kryo_midr,
367         },
368         {},
369 };
370 #endif
371
372 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
373 static const struct midr_range workaround_clean_cache[] = {
374 #if     defined(CONFIG_ARM64_ERRATUM_826319) || \
375         defined(CONFIG_ARM64_ERRATUM_827319) || \
376         defined(CONFIG_ARM64_ERRATUM_824069)
377         /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
378         MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
379 #endif
380 #ifdef  CONFIG_ARM64_ERRATUM_819472
381         /* Cortex-A53 r0p[01] : ARM errata 819472 */
382         MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
383 #endif
384         {},
385 };
386 #endif
387
388 #ifdef CONFIG_ARM64_ERRATUM_1418040
389 /*
390  * - 1188873 affects r0p0 to r2p0
391  * - 1418040 affects r0p0 to r3p1
392  */
393 static const struct midr_range erratum_1418040_list[] = {
394         /* Cortex-A76 r0p0 to r3p1 */
395         MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
396         /* Neoverse-N1 r0p0 to r3p1 */
397         MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
398         /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
399         MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
400         {},
401 };
402 #endif
403
404 #ifdef CONFIG_ARM64_ERRATUM_845719
405 static const struct midr_range erratum_845719_list[] = {
406         /* Cortex-A53 r0p[01234] */
407         MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
408         /* Brahma-B53 r0p[0] */
409         MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
410         /* Kryo2XX Silver rAp4 */
411         MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
412         {},
413 };
414 #endif
415
416 #ifdef CONFIG_ARM64_ERRATUM_843419
417 static const struct arm64_cpu_capabilities erratum_843419_list[] = {
418         {
419                 /* Cortex-A53 r0p[01234] */
420                 .matches = is_affected_midr_range,
421                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
422                 MIDR_FIXED(0x4, BIT(8)),
423         },
424         {
425                 /* Brahma-B53 r0p[0] */
426                 .matches = is_affected_midr_range,
427                 ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
428         },
429         {},
430 };
431 #endif
432
433 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
434 static const struct midr_range erratum_speculative_at_list[] = {
435 #ifdef CONFIG_ARM64_ERRATUM_1165522
436         /* Cortex A76 r0p0 to r2p0 */
437         MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
438 #endif
439 #ifdef CONFIG_ARM64_ERRATUM_1319367
440         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
441         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
442 #endif
443 #ifdef CONFIG_ARM64_ERRATUM_1530923
444         /* Cortex A55 r0p0 to r2p0 */
445         MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
446         /* Kryo4xx Silver (rdpe => r1p0) */
447         MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
448 #endif
449         {},
450 };
451 #endif
452
453 #ifdef CONFIG_ARM64_ERRATUM_1463225
454 static const struct midr_range erratum_1463225[] = {
455         /* Cortex-A76 r0p0 - r3p1 */
456         MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
457         /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
458         MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
459         {},
460 };
461 #endif
462
463 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
464 static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
465 #ifdef CONFIG_ARM64_ERRATUM_2139208
466         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
467         MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
468 #endif
469 #ifdef CONFIG_ARM64_ERRATUM_2119858
470         MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
471         MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
472 #endif
473         {},
474 };
475 #endif  /* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */
476
477 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
478 static const struct midr_range tsb_flush_fail_cpus[] = {
479 #ifdef CONFIG_ARM64_ERRATUM_2067961
480         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
481         MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
482 #endif
483 #ifdef CONFIG_ARM64_ERRATUM_2054223
484         MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
485 #endif
486         {},
487 };
488 #endif  /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
489
490 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
491 static struct midr_range trbe_write_out_of_range_cpus[] = {
492 #ifdef CONFIG_ARM64_ERRATUM_2253138
493         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
494         MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
495 #endif
496 #ifdef CONFIG_ARM64_ERRATUM_2224489
497         MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
498         MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
499 #endif
500         {},
501 };
502 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
503
504 #ifdef CONFIG_ARM64_ERRATUM_1742098
505 static struct midr_range broken_aarch32_aes[] = {
506         MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
507         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
508         {},
509 };
510 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
511
512 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
513 static const struct midr_range erratum_spec_unpriv_load_list[] = {
514 #ifdef CONFIG_ARM64_ERRATUM_3117295
515         MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
516 #endif
517 #ifdef CONFIG_ARM64_ERRATUM_2966298
518         /* Cortex-A520 r0p0 to r0p1 */
519         MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
520 #endif
521         {},
522 };
523 #endif
524
525 #ifdef CONFIG_ARM64_ERRATUM_3194386
526 static const struct midr_range erratum_spec_ssbs_list[] = {
527         MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
528         MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
529         MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
530         MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
531         MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
532         MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
533         MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
534         MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
535         MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
536         MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
537         MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
538         MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
539         MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
540         MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
541         MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
542         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
543         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
544         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N3),
545         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
546         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
547         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
548         {}
549 };
550 #endif
551
552 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
553 static const struct midr_range erratum_ac03_cpu_38_list[] = {
554         MIDR_ALL_VERSIONS(MIDR_AMPERE1),
555         MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
556         {},
557 };
558 #endif
559
560 #ifdef CONFIG_AMPERE_ERRATUM_AC04_CPU_23
561 static const struct midr_range erratum_ac04_cpu_23_list[] = {
562         MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
563         {},
564 };
565 #endif
566
567 const struct arm64_cpu_capabilities arm64_errata[] = {
568 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
569         {
570                 .desc = "ARM errata 826319, 827319, 824069, or 819472",
571                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
572                 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
573                 .cpu_enable = cpu_enable_cache_maint_trap,
574         },
575 #endif
576 #ifdef CONFIG_ARM64_ERRATUM_832075
577         {
578         /* Cortex-A57 r0p0 - r1p2 */
579                 .desc = "ARM erratum 832075",
580                 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
581                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
582                                   0, 0,
583                                   1, 2),
584         },
585 #endif
586 #ifdef CONFIG_ARM64_ERRATUM_834220
587         {
588         /* Cortex-A57 r0p0 - r1p2 */
589                 .desc = "ARM erratum 834220",
590                 .capability = ARM64_WORKAROUND_834220,
591                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
592                                   0, 0,
593                                   1, 2),
594         },
595 #endif
596 #ifdef CONFIG_ARM64_ERRATUM_843419
597         {
598                 .desc = "ARM erratum 843419",
599                 .capability = ARM64_WORKAROUND_843419,
600                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
601                 .matches = cpucap_multi_entry_cap_matches,
602                 .match_list = erratum_843419_list,
603         },
604 #endif
605 #ifdef CONFIG_ARM64_ERRATUM_845719
606         {
607                 .desc = "ARM erratum 845719",
608                 .capability = ARM64_WORKAROUND_845719,
609                 ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
610         },
611 #endif
612 #ifdef CONFIG_CAVIUM_ERRATUM_23154
613         {
614                 .desc = "Cavium errata 23154 and 38545",
615                 .capability = ARM64_WORKAROUND_CAVIUM_23154,
616                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
617                 ERRATA_MIDR_RANGE_LIST(cavium_erratum_23154_cpus),
618         },
619 #endif
620 #ifdef CONFIG_CAVIUM_ERRATUM_27456
621         {
622                 .desc = "Cavium erratum 27456",
623                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
624                 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
625         },
626 #endif
627 #ifdef CONFIG_CAVIUM_ERRATUM_30115
628         {
629                 .desc = "Cavium erratum 30115",
630                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
631                 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
632         },
633 #endif
634         {
635                 .desc = "Mismatched cache type (CTR_EL0)",
636                 .capability = ARM64_MISMATCHED_CACHE_TYPE,
637                 .matches = has_mismatched_cache_type,
638                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
639                 .cpu_enable = cpu_enable_trap_ctr_access,
640         },
641 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
642         {
643                 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
644                 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
645                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
646                 .matches = cpucap_multi_entry_cap_matches,
647                 .match_list = qcom_erratum_1003_list,
648         },
649 #endif
650 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
651         {
652                 .desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009",
653                 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
654                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
655                 .matches = cpucap_multi_entry_cap_matches,
656                 .match_list = arm64_repeat_tlbi_list,
657         },
658 #endif
659 #ifdef CONFIG_ARM64_ERRATUM_858921
660         {
661         /* Cortex-A73 all versions */
662                 .desc = "ARM erratum 858921",
663                 .capability = ARM64_WORKAROUND_858921,
664                 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
665         },
666 #endif
667         {
668                 .desc = "Spectre-v2",
669                 .capability = ARM64_SPECTRE_V2,
670                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
671                 .matches = has_spectre_v2,
672                 .cpu_enable = spectre_v2_enable_mitigation,
673         },
674 #ifdef CONFIG_RANDOMIZE_BASE
675         {
676         /* Must come after the Spectre-v2 entry */
677                 .desc = "Spectre-v3a",
678                 .capability = ARM64_SPECTRE_V3A,
679                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
680                 .matches = has_spectre_v3a,
681                 .cpu_enable = spectre_v3a_enable_mitigation,
682         },
683 #endif
684         {
685                 .desc = "Spectre-v4",
686                 .capability = ARM64_SPECTRE_V4,
687                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
688                 .matches = has_spectre_v4,
689                 .cpu_enable = spectre_v4_enable_mitigation,
690         },
691         {
692                 .desc = "Spectre-BHB",
693                 .capability = ARM64_SPECTRE_BHB,
694                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
695                 .matches = is_spectre_bhb_affected,
696                 .cpu_enable = spectre_bhb_enable_mitigation,
697         },
698 #ifdef CONFIG_ARM64_ERRATUM_1418040
699         {
700                 .desc = "ARM erratum 1418040",
701                 .capability = ARM64_WORKAROUND_1418040,
702                 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
703                 /*
704                  * We need to allow affected CPUs to come in late, but
705                  * also need the non-affected CPUs to be able to come
706                  * in at any point in time. Wonderful.
707                  */
708                 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
709         },
710 #endif
711 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
712         {
713                 .desc = "ARM errata 1165522, 1319367, or 1530923",
714                 .capability = ARM64_WORKAROUND_SPECULATIVE_AT,
715                 ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
716         },
717 #endif
718 #ifdef CONFIG_ARM64_ERRATUM_1463225
719         {
720                 .desc = "ARM erratum 1463225",
721                 .capability = ARM64_WORKAROUND_1463225,
722                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
723                 .matches = has_cortex_a76_erratum_1463225,
724                 .midr_range_list = erratum_1463225,
725         },
726 #endif
727 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
728         {
729                 .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
730                 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
731                 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
732                 .matches = needs_tx2_tvm_workaround,
733         },
734         {
735                 .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
736                 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
737                 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
738         },
739 #endif
740 #ifdef CONFIG_ARM64_ERRATUM_1542419
741         {
742                 /* we depend on the firmware portion for correctness */
743                 .desc = "ARM erratum 1542419 (kernel portion)",
744                 .capability = ARM64_WORKAROUND_1542419,
745                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
746                 .matches = has_neoverse_n1_erratum_1542419,
747                 .cpu_enable = cpu_enable_trap_ctr_access,
748         },
749 #endif
750 #ifdef CONFIG_ARM64_ERRATUM_1508412
751         {
752                 /* we depend on the firmware portion for correctness */
753                 .desc = "ARM erratum 1508412 (kernel portion)",
754                 .capability = ARM64_WORKAROUND_1508412,
755                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
756                                   0, 0,
757                                   1, 0),
758         },
759 #endif
760 #ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
761         {
762                 /* NVIDIA Carmel */
763                 .desc = "NVIDIA Carmel CNP erratum",
764                 .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
765                 ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
766         },
767 #endif
768 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
769         {
770                 /*
771                  * The erratum work around is handled within the TRBE
772                  * driver and can be applied per-cpu. So, we can allow
773                  * a late CPU to come online with this erratum.
774                  */
775                 .desc = "ARM erratum 2119858 or 2139208",
776                 .capability = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
777                 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
778                 CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus),
779         },
780 #endif
781 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
782         {
783                 .desc = "ARM erratum 2067961 or 2054223",
784                 .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE,
785                 ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus),
786         },
787 #endif
788 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
789         {
790                 .desc = "ARM erratum 2253138 or 2224489",
791                 .capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
792                 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
793                 CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
794         },
795 #endif
796 #ifdef CONFIG_ARM64_ERRATUM_2645198
797         {
798                 .desc = "ARM erratum 2645198",
799                 .capability = ARM64_WORKAROUND_2645198,
800                 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715)
801         },
802 #endif
803 #ifdef CONFIG_ARM64_ERRATUM_2077057
804         {
805                 .desc = "ARM erratum 2077057",
806                 .capability = ARM64_WORKAROUND_2077057,
807                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
808         },
809 #endif
810 #ifdef CONFIG_ARM64_ERRATUM_2064142
811         {
812                 .desc = "ARM erratum 2064142",
813                 .capability = ARM64_WORKAROUND_2064142,
814
815                 /* Cortex-A510 r0p0 - r0p2 */
816                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
817         },
818 #endif
819 #ifdef CONFIG_ARM64_ERRATUM_2457168
820         {
821                 .desc = "ARM erratum 2457168",
822                 .capability = ARM64_WORKAROUND_2457168,
823                 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
824
825                 /* Cortex-A510 r0p0-r1p1 */
826                 CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
827         },
828 #endif
829 #ifdef CONFIG_ARM64_ERRATUM_2038923
830         {
831                 .desc = "ARM erratum 2038923",
832                 .capability = ARM64_WORKAROUND_2038923,
833
834                 /* Cortex-A510 r0p0 - r0p2 */
835                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
836         },
837 #endif
838 #ifdef CONFIG_ARM64_ERRATUM_1902691
839         {
840                 .desc = "ARM erratum 1902691",
841                 .capability = ARM64_WORKAROUND_1902691,
842
843                 /* Cortex-A510 r0p0 - r0p1 */
844                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1)
845         },
846 #endif
847 #ifdef CONFIG_ARM64_ERRATUM_1742098
848         {
849                 .desc = "ARM erratum 1742098",
850                 .capability = ARM64_WORKAROUND_1742098,
851                 CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
852                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
853         },
854 #endif
855 #ifdef CONFIG_ARM64_ERRATUM_2658417
856         {
857                 .desc = "ARM erratum 2658417",
858                 .capability = ARM64_WORKAROUND_2658417,
859                 /* Cortex-A510 r0p0 - r1p1 */
860                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
861                 MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
862         },
863 #endif
864 #ifdef CONFIG_ARM64_ERRATUM_3194386
865         {
866                 .desc = "SSBS not fully self-synchronizing",
867                 .capability = ARM64_WORKAROUND_SPECULATIVE_SSBS,
868                 ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list),
869         },
870 #endif
871 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
872         {
873                 .desc = "ARM errata 2966298, 3117295",
874                 .capability = ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD,
875                 /* Cortex-A520 r0p0 - r0p1 */
876                 ERRATA_MIDR_RANGE_LIST(erratum_spec_unpriv_load_list),
877         },
878 #endif
879 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
880         {
881                 .desc = "AmpereOne erratum AC03_CPU_38",
882                 .capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
883                 ERRATA_MIDR_RANGE_LIST(erratum_ac03_cpu_38_list),
884         },
885 #endif
886 #ifdef CONFIG_AMPERE_ERRATUM_AC04_CPU_23
887         {
888                 .desc = "AmpereOne erratum AC04_CPU_23",
889                 .capability = ARM64_WORKAROUND_AMPERE_AC04_CPU_23,
890                 ERRATA_MIDR_RANGE_LIST(erratum_ac04_cpu_23_list),
891         },
892 #endif
893         {
894                 .desc = "Broken CNTVOFF_EL2",
895                 .capability = ARM64_WORKAROUND_QCOM_ORYON_CNTVOFF,
896                 ERRATA_MIDR_RANGE_LIST(((const struct midr_range[]) {
897                                         MIDR_ALL_VERSIONS(MIDR_QCOM_ORYON_X1),
898                                         {}
899                                 })),
900         },
901         {
902                 .desc = "Apple IMPDEF PMUv3 Traps",
903                 .capability = ARM64_WORKAROUND_PMUV3_IMPDEF_TRAPS,
904                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
905                 .matches = has_impdef_pmuv3,
906                 .cpu_enable = cpu_enable_impdef_pmuv3_traps,
907         },
908         {
909         }
910 };