arm64: add sysfs vulnerability show for meltdown
[linux-block.git] / arch / arm64 / kernel / cpu_errata.c
CommitLineData
e116a375
AP
1/*
2 * Contains CPU specific errata definitions
3 *
4 * Copyright (C) 2014 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
94a5d879
AB
19#include <linux/arm-smccc.h>
20#include <linux/psci.h>
e116a375
AP
21#include <linux/types.h>
22#include <asm/cpu.h>
23#include <asm/cputype.h>
24#include <asm/cpufeature.h>
25
301bcfac 26static bool __maybe_unused
92406f0c 27is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
301bcfac 28{
e8002e02
AB
29 const struct arm64_midr_revidr *fix;
30 u32 midr = read_cpuid_id(), revidr;
31
92406f0c 32 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
1df31050 33 if (!is_midr_in_range(midr, &entry->midr_range))
e8002e02
AB
34 return false;
35
36 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
37 revidr = read_cpuid(REVIDR_EL1);
38 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
39 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
40 return false;
41
42 return true;
301bcfac
AP
43}
44
be5b2998
SP
45static bool __maybe_unused
46is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
47 int scope)
301bcfac 48{
92406f0c 49 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
be5b2998 50 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
301bcfac
AP
51}
52
bb487118
SB
53static bool __maybe_unused
54is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
55{
56 u32 model;
57
58 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
59
60 model = read_cpuid_id();
61 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
62 MIDR_ARCHITECTURE_MASK;
63
1df31050 64 return model == entry->midr_range.model;
bb487118
SB
65}
66
116c81f4 67static bool
314d53d2
SP
68has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
69 int scope)
116c81f4 70{
1602df02
SP
71 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
72 u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
73 u64 ctr_raw, ctr_real;
314d53d2 74
116c81f4 75 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
1602df02
SP
76
77 /*
78 * We want to make sure that all the CPUs in the system expose
79 * a consistent CTR_EL0 to make sure that applications behaves
80 * correctly with migration.
81 *
82 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
83 *
84 * 1) It is safe if the system doesn't support IDC, as CPU anyway
85 * reports IDC = 0, consistent with the rest.
86 *
87 * 2) If the system has IDC, it is still safe as we trap CTR_EL0
88 * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
89 *
90 * So, we need to make sure either the raw CTR_EL0 or the effective
91 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
92 */
93 ctr_raw = read_cpuid_cachetype() & mask;
94 ctr_real = read_cpuid_effective_cachetype() & mask;
95
96 return (ctr_real != sys) && (ctr_raw != sys);
116c81f4
SP
97}
98
c0cda3b8
DM
99static void
100cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
116c81f4 101{
4afe8e79
SP
102 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
103
104 /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
105 if ((read_cpuid_cachetype() & mask) !=
106 (arm64_ftr_reg_ctrel0.sys_val & mask))
107 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
116c81f4
SP
108}
109
4205a89b
MZ
110atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
111
0f15adbb
WD
112#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
113#include <asm/mmu_context.h>
114#include <asm/cacheflush.h>
115
116DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
117
e8b22d0f 118#ifdef CONFIG_KVM_INDIRECT_VECTORS
b092201e
MZ
119extern char __smccc_workaround_1_smc_start[];
120extern char __smccc_workaround_1_smc_end[];
aa6acde6 121
0f15adbb
WD
122static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
123 const char *hyp_vecs_end)
124{
125 void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
126 int i;
127
128 for (i = 0; i < SZ_2K; i += 0x80)
129 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
130
3b8c9f1c 131 __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
0f15adbb
WD
132}
133
134static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
135 const char *hyp_vecs_start,
136 const char *hyp_vecs_end)
137{
d8797b12 138 static DEFINE_RAW_SPINLOCK(bp_lock);
0f15adbb
WD
139 int cpu, slot = -1;
140
4debef55
JM
141 /*
142 * enable_smccc_arch_workaround_1() passes NULL for the hyp_vecs
143 * start/end if we're a guest. Skip the hyp-vectors work.
144 */
145 if (!hyp_vecs_start) {
146 __this_cpu_write(bp_hardening_data.fn, fn);
147 return;
148 }
149
d8797b12 150 raw_spin_lock(&bp_lock);
0f15adbb
WD
151 for_each_possible_cpu(cpu) {
152 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
153 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
154 break;
155 }
156 }
157
158 if (slot == -1) {
4205a89b
MZ
159 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
160 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
0f15adbb
WD
161 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
162 }
163
164 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
165 __this_cpu_write(bp_hardening_data.fn, fn);
d8797b12 166 raw_spin_unlock(&bp_lock);
0f15adbb
WD
167}
168#else
b092201e
MZ
169#define __smccc_workaround_1_smc_start NULL
170#define __smccc_workaround_1_smc_end NULL
aa6acde6 171
0f15adbb
WD
172static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
173 const char *hyp_vecs_start,
174 const char *hyp_vecs_end)
175{
176 __this_cpu_write(bp_hardening_data.fn, fn);
177}
e8b22d0f 178#endif /* CONFIG_KVM_INDIRECT_VECTORS */
0f15adbb
WD
179
180static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
181 bp_hardening_cb_t fn,
182 const char *hyp_vecs_start,
183 const char *hyp_vecs_end)
184{
185 u64 pfr0;
186
187 if (!entry->matches(entry, SCOPE_LOCAL_CPU))
188 return;
189
190 pfr0 = read_cpuid(ID_AA64PFR0_EL1);
191 if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
192 return;
193
194 __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
195}
aa6acde6 196
b092201e
MZ
197#include <uapi/linux/psci.h>
198#include <linux/arm-smccc.h>
aa6acde6
WD
199#include <linux/psci.h>
200
b092201e
MZ
201static void call_smc_arch_workaround_1(void)
202{
203 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
204}
205
206static void call_hvc_arch_workaround_1(void)
207{
208 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
209}
210
4bc352ff
SD
211static void qcom_link_stack_sanitization(void)
212{
213 u64 tmp;
214
215 asm volatile("mov %0, x30 \n"
216 ".rept 16 \n"
217 "bl . + 4 \n"
218 ".endr \n"
219 "mov x30, %0 \n"
220 : "=&r" (tmp));
221}
222
e5ce5e72
JL
223static bool __nospectre_v2;
224static int __init parse_nospectre_v2(char *str)
225{
226 __nospectre_v2 = true;
227 return 0;
228}
229early_param("nospectre_v2", parse_nospectre_v2);
230
c0cda3b8
DM
231static void
232enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
b092201e
MZ
233{
234 bp_hardening_cb_t cb;
235 void *smccc_start, *smccc_end;
236 struct arm_smccc_res res;
4bc352ff 237 u32 midr = read_cpuid_id();
b092201e
MZ
238
239 if (!entry->matches(entry, SCOPE_LOCAL_CPU))
c0cda3b8 240 return;
b092201e 241
e5ce5e72
JL
242 if (__nospectre_v2) {
243 pr_info_once("spectrev2 mitigation disabled by command line option\n");
244 return;
245 }
246
b092201e 247 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
c0cda3b8 248 return;
b092201e
MZ
249
250 switch (psci_ops.conduit) {
251 case PSCI_CONDUIT_HVC:
252 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
253 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
e21da1c9 254 if ((int)res.a0 < 0)
c0cda3b8 255 return;
b092201e 256 cb = call_hvc_arch_workaround_1;
22765f30
MZ
257 /* This is a guest, no need to patch KVM vectors */
258 smccc_start = NULL;
259 smccc_end = NULL;
b092201e
MZ
260 break;
261
262 case PSCI_CONDUIT_SMC:
263 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
264 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
e21da1c9 265 if ((int)res.a0 < 0)
c0cda3b8 266 return;
b092201e
MZ
267 cb = call_smc_arch_workaround_1;
268 smccc_start = __smccc_workaround_1_smc_start;
269 smccc_end = __smccc_workaround_1_smc_end;
270 break;
271
272 default:
c0cda3b8 273 return;
b092201e
MZ
274 }
275
4bc352ff
SD
276 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
277 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
278 cb = qcom_link_stack_sanitization;
279
b092201e
MZ
280 install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
281
c0cda3b8 282 return;
aa6acde6 283}
0f15adbb
WD
284#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
285
8e290624 286#ifdef CONFIG_ARM64_SSBD
5cf9ce6e
MZ
287DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
288
a43ae4df
MZ
289int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
290
291static const struct ssbd_options {
292 const char *str;
293 int state;
294} ssbd_options[] = {
295 { "force-on", ARM64_SSBD_FORCE_ENABLE, },
296 { "force-off", ARM64_SSBD_FORCE_DISABLE, },
297 { "kernel", ARM64_SSBD_KERNEL, },
298};
299
300static int __init ssbd_cfg(char *buf)
301{
302 int i;
303
304 if (!buf || !buf[0])
305 return -EINVAL;
306
307 for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
308 int len = strlen(ssbd_options[i].str);
309
310 if (strncmp(buf, ssbd_options[i].str, len))
311 continue;
312
313 ssbd_state = ssbd_options[i].state;
314 return 0;
315 }
316
317 return -EINVAL;
318}
319early_param("ssbd", ssbd_cfg);
320
8e290624
MZ
321void __init arm64_update_smccc_conduit(struct alt_instr *alt,
322 __le32 *origptr, __le32 *updptr,
323 int nr_inst)
324{
325 u32 insn;
326
327 BUG_ON(nr_inst != 1);
328
329 switch (psci_ops.conduit) {
330 case PSCI_CONDUIT_HVC:
331 insn = aarch64_insn_get_hvc_value();
332 break;
333 case PSCI_CONDUIT_SMC:
334 insn = aarch64_insn_get_smc_value();
335 break;
336 default:
337 return;
338 }
339
340 *updptr = cpu_to_le32(insn);
341}
a725e3dd 342
986372c4
MZ
343void __init arm64_enable_wa2_handling(struct alt_instr *alt,
344 __le32 *origptr, __le32 *updptr,
345 int nr_inst)
346{
347 BUG_ON(nr_inst != 1);
348 /*
349 * Only allow mitigation on EL1 entry/exit and guest
350 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
351 * be flipped.
352 */
353 if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
354 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
355}
356
647d0519 357void arm64_set_ssbd_mitigation(bool state)
a725e3dd 358{
8f04e8e6
WD
359 if (this_cpu_has_cap(ARM64_SSBS)) {
360 if (state)
361 asm volatile(SET_PSTATE_SSBS(0));
362 else
363 asm volatile(SET_PSTATE_SSBS(1));
364 return;
365 }
366
a725e3dd
MZ
367 switch (psci_ops.conduit) {
368 case PSCI_CONDUIT_HVC:
369 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
370 break;
371
372 case PSCI_CONDUIT_SMC:
373 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
374 break;
375
376 default:
377 WARN_ON_ONCE(1);
378 break;
379 }
380}
381
382static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
383 int scope)
384{
385 struct arm_smccc_res res;
a43ae4df
MZ
386 bool required = true;
387 s32 val;
a725e3dd
MZ
388
389 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
390
8f04e8e6
WD
391 if (this_cpu_has_cap(ARM64_SSBS)) {
392 required = false;
393 goto out_printmsg;
394 }
395
a43ae4df
MZ
396 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
397 ssbd_state = ARM64_SSBD_UNKNOWN;
a725e3dd 398 return false;
a43ae4df 399 }
a725e3dd 400
a725e3dd
MZ
401 switch (psci_ops.conduit) {
402 case PSCI_CONDUIT_HVC:
403 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
404 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
a725e3dd
MZ
405 break;
406
407 case PSCI_CONDUIT_SMC:
408 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
409 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
a725e3dd
MZ
410 break;
411
412 default:
a43ae4df
MZ
413 ssbd_state = ARM64_SSBD_UNKNOWN;
414 return false;
a725e3dd
MZ
415 }
416
a43ae4df
MZ
417 val = (s32)res.a0;
418
419 switch (val) {
420 case SMCCC_RET_NOT_SUPPORTED:
421 ssbd_state = ARM64_SSBD_UNKNOWN;
422 return false;
423
424 case SMCCC_RET_NOT_REQUIRED:
425 pr_info_once("%s mitigation not required\n", entry->desc);
426 ssbd_state = ARM64_SSBD_MITIGATED;
427 return false;
428
429 case SMCCC_RET_SUCCESS:
430 required = true;
431 break;
432
433 case 1: /* Mitigation not required on this CPU */
434 required = false;
435 break;
436
437 default:
438 WARN_ON(1);
439 return false;
440 }
441
442 switch (ssbd_state) {
443 case ARM64_SSBD_FORCE_DISABLE:
a43ae4df
MZ
444 arm64_set_ssbd_mitigation(false);
445 required = false;
446 break;
447
448 case ARM64_SSBD_KERNEL:
449 if (required) {
450 __this_cpu_write(arm64_ssbd_callback_required, 1);
451 arm64_set_ssbd_mitigation(true);
452 }
453 break;
454
455 case ARM64_SSBD_FORCE_ENABLE:
a725e3dd 456 arm64_set_ssbd_mitigation(true);
a43ae4df
MZ
457 required = true;
458 break;
459
460 default:
461 WARN_ON(1);
462 break;
a725e3dd
MZ
463 }
464
8f04e8e6
WD
465out_printmsg:
466 switch (ssbd_state) {
467 case ARM64_SSBD_FORCE_DISABLE:
468 pr_info_once("%s disabled from command-line\n", entry->desc);
469 break;
470
471 case ARM64_SSBD_FORCE_ENABLE:
472 pr_info_once("%s forced from command-line\n", entry->desc);
473 break;
474 }
475
a43ae4df 476 return required;
a725e3dd 477}
8e290624
MZ
478#endif /* CONFIG_ARM64_SSBD */
479
b8925ee2
WD
480static void __maybe_unused
481cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
482{
483 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
484}
485
5e7951ce
SP
486#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
487 .matches = is_affected_midr_range, \
1df31050 488 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
5e7951ce
SP
489
490#define CAP_MIDR_ALL_VERSIONS(model) \
491 .matches = is_affected_midr_range, \
1df31050 492 .midr_range = MIDR_ALL_VERSIONS(model)
06f1494f 493
e8002e02
AB
494#define MIDR_FIXED(rev, revidr_mask) \
495 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
496
5e7951ce
SP
497#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
498 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
499 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
500
be5b2998
SP
501#define CAP_MIDR_RANGE_LIST(list) \
502 .matches = is_affected_midr_range_list, \
503 .midr_range_list = list
504
5e7951ce
SP
505/* Errata affecting a range of revisions of given model variant */
506#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
507 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
508
509/* Errata affecting a single variant/revision of a model */
510#define ERRATA_MIDR_REV(model, var, rev) \
511 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
512
513/* Errata affecting all variants/revisions of a given a model */
514#define ERRATA_MIDR_ALL_VERSIONS(model) \
515 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
516 CAP_MIDR_ALL_VERSIONS(model)
517
be5b2998
SP
518/* Errata affecting a list of midr ranges, with same work around */
519#define ERRATA_MIDR_RANGE_LIST(midr_list) \
520 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
521 CAP_MIDR_RANGE_LIST(midr_list)
522
523#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
524
525/*
526 * List of CPUs where we need to issue a psci call to
527 * harden the branch predictor.
528 */
529static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
530 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
531 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
532 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
533 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
534 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
535 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
be5b2998
SP
536 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
537 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
0583a4ef 538 MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
be5b2998
SP
539 {},
540};
541
542#endif
06f1494f 543
8892b718
MZ
544#ifdef CONFIG_HARDEN_EL2_VECTORS
545
546static const struct midr_range arm64_harden_el2_vectors[] = {
547 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
548 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
549 {},
550};
551
dc6ed61d
MZ
552#endif
553
ce8c80c5
CM
554#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
555
556static const struct midr_range arm64_repeat_tlbi_cpus[] = {
557#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
558 MIDR_RANGE(MIDR_QCOM_FALKOR_V1, 0, 0, 0, 0),
559#endif
560#ifdef CONFIG_ARM64_ERRATUM_1286807
561 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
562#endif
563 {},
564};
565
566#endif
567
f58cdf7e 568#ifdef CONFIG_CAVIUM_ERRATUM_27456
b89d82ef 569const struct midr_range cavium_erratum_27456_cpus[] = {
f58cdf7e
SP
570 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
571 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
572 /* Cavium ThunderX, T81 pass 1.0 */
573 MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
574 {},
575};
576#endif
577
578#ifdef CONFIG_CAVIUM_ERRATUM_30115
579static const struct midr_range cavium_erratum_30115_cpus[] = {
580 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
581 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
582 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
583 MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
584 /* Cavium ThunderX, T83 pass 1.0 */
585 MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
586 {},
587};
588#endif
589
a3dcea2c
SP
590#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
591static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
592 {
593 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
594 },
595 {
596 .midr_range.model = MIDR_QCOM_KRYO,
597 .matches = is_kryo_midr,
598 },
599 {},
600};
601#endif
602
c9460dcb
SP
603#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
604static const struct midr_range workaround_clean_cache[] = {
c0a01b84
AP
605#if defined(CONFIG_ARM64_ERRATUM_826319) || \
606 defined(CONFIG_ARM64_ERRATUM_827319) || \
607 defined(CONFIG_ARM64_ERRATUM_824069)
c9460dcb
SP
608 /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
609 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
610#endif
611#ifdef CONFIG_ARM64_ERRATUM_819472
612 /* Cortex-A53 r0p[01] : ARM errata 819472 */
613 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
c0a01b84 614#endif
c9460dcb
SP
615 {},
616};
617#endif
618
619const struct arm64_cpu_capabilities arm64_errata[] = {
620#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
c0a01b84 621 {
c9460dcb 622 .desc = "ARM errata 826319, 827319, 824069, 819472",
c0a01b84 623 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
c9460dcb 624 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
c0cda3b8 625 .cpu_enable = cpu_enable_cache_maint_trap,
c0a01b84
AP
626 },
627#endif
628#ifdef CONFIG_ARM64_ERRATUM_832075
301bcfac 629 {
5afaa1fc
AP
630 /* Cortex-A57 r0p0 - r1p2 */
631 .desc = "ARM erratum 832075",
632 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
5e7951ce
SP
633 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
634 0, 0,
635 1, 2),
5afaa1fc 636 },
905e8c5d 637#endif
498cd5c3
MZ
638#ifdef CONFIG_ARM64_ERRATUM_834220
639 {
640 /* Cortex-A57 r0p0 - r1p2 */
641 .desc = "ARM erratum 834220",
642 .capability = ARM64_WORKAROUND_834220,
5e7951ce
SP
643 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
644 0, 0,
645 1, 2),
498cd5c3
MZ
646 },
647#endif
ca79acca
AB
648#ifdef CONFIG_ARM64_ERRATUM_843419
649 {
650 /* Cortex-A53 r0p[01234] */
651 .desc = "ARM erratum 843419",
652 .capability = ARM64_WORKAROUND_843419,
5e7951ce 653 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
ca79acca 654 MIDR_FIXED(0x4, BIT(8)),
498cd5c3
MZ
655 },
656#endif
905e8c5d
WD
657#ifdef CONFIG_ARM64_ERRATUM_845719
658 {
659 /* Cortex-A53 r0p[01234] */
660 .desc = "ARM erratum 845719",
661 .capability = ARM64_WORKAROUND_845719,
5e7951ce 662 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
905e8c5d 663 },
6d4e11c5
RR
664#endif
665#ifdef CONFIG_CAVIUM_ERRATUM_23154
666 {
667 /* Cavium ThunderX, pass 1.x */
668 .desc = "Cavium erratum 23154",
669 .capability = ARM64_WORKAROUND_CAVIUM_23154,
5e7951ce 670 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
6d4e11c5 671 },
104a0c02
AP
672#endif
673#ifdef CONFIG_CAVIUM_ERRATUM_27456
674 {
47c459be
GK
675 .desc = "Cavium erratum 27456",
676 .capability = ARM64_WORKAROUND_CAVIUM_27456,
f58cdf7e 677 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
47c459be 678 },
690a3415
DD
679#endif
680#ifdef CONFIG_CAVIUM_ERRATUM_30115
681 {
690a3415
DD
682 .desc = "Cavium erratum 30115",
683 .capability = ARM64_WORKAROUND_CAVIUM_30115,
f58cdf7e 684 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
690a3415 685 },
c0a01b84 686#endif
116c81f4 687 {
880f7cc4 688 .desc = "Mismatched cache type (CTR_EL0)",
314d53d2
SP
689 .capability = ARM64_MISMATCHED_CACHE_TYPE,
690 .matches = has_mismatched_cache_type,
5b4747c5 691 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
c0cda3b8 692 .cpu_enable = cpu_enable_trap_ctr_access,
116c81f4 693 },
38fd94b0
CC
694#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
695 {
a3dcea2c 696 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
bb487118 697 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
1e013d06 698 .matches = cpucap_multi_entry_cap_matches,
a3dcea2c 699 .match_list = qcom_erratum_1003_list,
bb487118 700 },
38fd94b0 701#endif
ce8c80c5 702#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
d9ff80f8 703 {
ce8c80c5 704 .desc = "Qualcomm erratum 1009, ARM erratum 1286807",
d9ff80f8 705 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
ce8c80c5 706 ERRATA_MIDR_RANGE_LIST(arm64_repeat_tlbi_cpus),
d9ff80f8 707 },
eeb1efbc
MZ
708#endif
709#ifdef CONFIG_ARM64_ERRATUM_858921
710 {
711 /* Cortex-A73 all versions */
712 .desc = "ARM erratum 858921",
713 .capability = ARM64_WORKAROUND_858921,
5e7951ce 714 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
eeb1efbc 715 },
aa6acde6
WD
716#endif
717#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
718 {
719 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
4bc352ff
SD
720 .cpu_enable = enable_smccc_arch_workaround_1,
721 ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
f3d795d9 722 },
4b472ffd
MZ
723#endif
724#ifdef CONFIG_HARDEN_EL2_VECTORS
725 {
8892b718 726 .desc = "EL2 vector hardening",
4b472ffd 727 .capability = ARM64_HARDEN_EL2_VECTORS,
8892b718 728 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
4b472ffd 729 },
a725e3dd
MZ
730#endif
731#ifdef CONFIG_ARM64_SSBD
732 {
733 .desc = "Speculative Store Bypass Disable",
734 .capability = ARM64_SSBD,
735 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
736 .matches = has_ssbd_mitigation,
737 },
95b861a4
MZ
738#endif
739#ifdef CONFIG_ARM64_ERRATUM_1188873
740 {
741 /* Cortex-A76 r0p0 to r2p0 */
742 .desc = "ARM erratum 1188873",
743 .capability = ARM64_WORKAROUND_1188873,
744 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
745 },
8b2cca9a
MZ
746#endif
747#ifdef CONFIG_ARM64_ERRATUM_1165522
748 {
749 /* Cortex-A76 r0p0 to r2p0 */
750 .desc = "ARM erratum 1165522",
751 .capability = ARM64_WORKAROUND_1165522,
752 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
753 },
d9ff80f8 754#endif
5afaa1fc 755 {
301bcfac 756 }
e116a375 757};
3891ebcc
MYK
758
759ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
760 char *buf)
761{
762 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
763}