Commit | Line | Data |
---|---|---|
e116a375 AP |
1 | /* |
2 | * Contains CPU specific errata definitions | |
3 | * | |
4 | * Copyright (C) 2014 ARM Ltd. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
17 | */ | |
18 | ||
94a5d879 AB |
19 | #include <linux/arm-smccc.h> |
20 | #include <linux/psci.h> | |
e116a375 AP |
21 | #include <linux/types.h> |
22 | #include <asm/cpu.h> | |
23 | #include <asm/cputype.h> | |
24 | #include <asm/cpufeature.h> | |
25 | ||
301bcfac | 26 | static bool __maybe_unused |
92406f0c | 27 | is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) |
301bcfac | 28 | { |
e8002e02 AB |
29 | const struct arm64_midr_revidr *fix; |
30 | u32 midr = read_cpuid_id(), revidr; | |
31 | ||
92406f0c | 32 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
1df31050 | 33 | if (!is_midr_in_range(midr, &entry->midr_range)) |
e8002e02 AB |
34 | return false; |
35 | ||
36 | midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; | |
37 | revidr = read_cpuid(REVIDR_EL1); | |
38 | for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++) | |
39 | if (midr == fix->midr_rv && (revidr & fix->revidr_mask)) | |
40 | return false; | |
41 | ||
42 | return true; | |
301bcfac AP |
43 | } |
44 | ||
be5b2998 SP |
45 | static bool __maybe_unused |
46 | is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry, | |
47 | int scope) | |
301bcfac | 48 | { |
92406f0c | 49 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
be5b2998 | 50 | return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list); |
301bcfac AP |
51 | } |
52 | ||
bb487118 SB |
53 | static bool __maybe_unused |
54 | is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) | |
55 | { | |
56 | u32 model; | |
57 | ||
58 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
59 | ||
60 | model = read_cpuid_id(); | |
61 | model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | | |
62 | MIDR_ARCHITECTURE_MASK; | |
63 | ||
1df31050 | 64 | return model == entry->midr_range.model; |
bb487118 SB |
65 | } |
66 | ||
116c81f4 | 67 | static bool |
314d53d2 SP |
68 | has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, |
69 | int scope) | |
116c81f4 | 70 | { |
1602df02 SP |
71 | u64 mask = arm64_ftr_reg_ctrel0.strict_mask; |
72 | u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask; | |
73 | u64 ctr_raw, ctr_real; | |
314d53d2 | 74 | |
116c81f4 | 75 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
1602df02 SP |
76 | |
77 | /* | |
78 | * We want to make sure that all the CPUs in the system expose | |
79 | * a consistent CTR_EL0 to make sure that applications behaves | |
80 | * correctly with migration. | |
81 | * | |
82 | * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 : | |
83 | * | |
84 | * 1) It is safe if the system doesn't support IDC, as CPU anyway | |
85 | * reports IDC = 0, consistent with the rest. | |
86 | * | |
87 | * 2) If the system has IDC, it is still safe as we trap CTR_EL0 | |
88 | * access on this CPU via the ARM64_HAS_CACHE_IDC capability. | |
89 | * | |
90 | * So, we need to make sure either the raw CTR_EL0 or the effective | |
91 | * CTR_EL0 matches the system's copy to allow a secondary CPU to boot. | |
92 | */ | |
93 | ctr_raw = read_cpuid_cachetype() & mask; | |
94 | ctr_real = read_cpuid_effective_cachetype() & mask; | |
95 | ||
96 | return (ctr_real != sys) && (ctr_raw != sys); | |
116c81f4 SP |
97 | } |
98 | ||
c0cda3b8 DM |
99 | static void |
100 | cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused) | |
116c81f4 | 101 | { |
25be597a | 102 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); |
116c81f4 SP |
103 | } |
104 | ||
4205a89b MZ |
105 | atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); |
106 | ||
0f15adbb WD |
107 | #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR |
108 | #include <asm/mmu_context.h> | |
109 | #include <asm/cacheflush.h> | |
110 | ||
111 | DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); | |
112 | ||
e8b22d0f | 113 | #ifdef CONFIG_KVM_INDIRECT_VECTORS |
b092201e MZ |
114 | extern char __smccc_workaround_1_smc_start[]; |
115 | extern char __smccc_workaround_1_smc_end[]; | |
aa6acde6 | 116 | |
0f15adbb WD |
117 | static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, |
118 | const char *hyp_vecs_end) | |
119 | { | |
120 | void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K); | |
121 | int i; | |
122 | ||
123 | for (i = 0; i < SZ_2K; i += 0x80) | |
124 | memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); | |
125 | ||
3b8c9f1c | 126 | __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); |
0f15adbb WD |
127 | } |
128 | ||
129 | static void __install_bp_hardening_cb(bp_hardening_cb_t fn, | |
130 | const char *hyp_vecs_start, | |
131 | const char *hyp_vecs_end) | |
132 | { | |
0f15adbb WD |
133 | static DEFINE_SPINLOCK(bp_lock); |
134 | int cpu, slot = -1; | |
135 | ||
136 | spin_lock(&bp_lock); | |
137 | for_each_possible_cpu(cpu) { | |
138 | if (per_cpu(bp_hardening_data.fn, cpu) == fn) { | |
139 | slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); | |
140 | break; | |
141 | } | |
142 | } | |
143 | ||
144 | if (slot == -1) { | |
4205a89b MZ |
145 | slot = atomic_inc_return(&arm64_el2_vector_last_slot); |
146 | BUG_ON(slot >= BP_HARDEN_EL2_SLOTS); | |
0f15adbb WD |
147 | __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); |
148 | } | |
149 | ||
150 | __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); | |
151 | __this_cpu_write(bp_hardening_data.fn, fn); | |
152 | spin_unlock(&bp_lock); | |
153 | } | |
154 | #else | |
b092201e MZ |
155 | #define __smccc_workaround_1_smc_start NULL |
156 | #define __smccc_workaround_1_smc_end NULL | |
aa6acde6 | 157 | |
0f15adbb WD |
158 | static void __install_bp_hardening_cb(bp_hardening_cb_t fn, |
159 | const char *hyp_vecs_start, | |
160 | const char *hyp_vecs_end) | |
161 | { | |
162 | __this_cpu_write(bp_hardening_data.fn, fn); | |
163 | } | |
e8b22d0f | 164 | #endif /* CONFIG_KVM_INDIRECT_VECTORS */ |
0f15adbb WD |
165 | |
166 | static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, | |
167 | bp_hardening_cb_t fn, | |
168 | const char *hyp_vecs_start, | |
169 | const char *hyp_vecs_end) | |
170 | { | |
171 | u64 pfr0; | |
172 | ||
173 | if (!entry->matches(entry, SCOPE_LOCAL_CPU)) | |
174 | return; | |
175 | ||
176 | pfr0 = read_cpuid(ID_AA64PFR0_EL1); | |
177 | if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT)) | |
178 | return; | |
179 | ||
180 | __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end); | |
181 | } | |
aa6acde6 | 182 | |
b092201e MZ |
183 | #include <uapi/linux/psci.h> |
184 | #include <linux/arm-smccc.h> | |
aa6acde6 WD |
185 | #include <linux/psci.h> |
186 | ||
b092201e MZ |
187 | static void call_smc_arch_workaround_1(void) |
188 | { | |
189 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); | |
190 | } | |
191 | ||
192 | static void call_hvc_arch_workaround_1(void) | |
193 | { | |
194 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); | |
195 | } | |
196 | ||
4bc352ff SD |
197 | static void qcom_link_stack_sanitization(void) |
198 | { | |
199 | u64 tmp; | |
200 | ||
201 | asm volatile("mov %0, x30 \n" | |
202 | ".rept 16 \n" | |
203 | "bl . + 4 \n" | |
204 | ".endr \n" | |
205 | "mov x30, %0 \n" | |
206 | : "=&r" (tmp)); | |
207 | } | |
208 | ||
c0cda3b8 DM |
209 | static void |
210 | enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) | |
b092201e MZ |
211 | { |
212 | bp_hardening_cb_t cb; | |
213 | void *smccc_start, *smccc_end; | |
214 | struct arm_smccc_res res; | |
4bc352ff | 215 | u32 midr = read_cpuid_id(); |
b092201e MZ |
216 | |
217 | if (!entry->matches(entry, SCOPE_LOCAL_CPU)) | |
c0cda3b8 | 218 | return; |
b092201e MZ |
219 | |
220 | if (psci_ops.smccc_version == SMCCC_VERSION_1_0) | |
c0cda3b8 | 221 | return; |
b092201e MZ |
222 | |
223 | switch (psci_ops.conduit) { | |
224 | case PSCI_CONDUIT_HVC: | |
225 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, | |
226 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); | |
e21da1c9 | 227 | if ((int)res.a0 < 0) |
c0cda3b8 | 228 | return; |
b092201e | 229 | cb = call_hvc_arch_workaround_1; |
22765f30 MZ |
230 | /* This is a guest, no need to patch KVM vectors */ |
231 | smccc_start = NULL; | |
232 | smccc_end = NULL; | |
b092201e MZ |
233 | break; |
234 | ||
235 | case PSCI_CONDUIT_SMC: | |
236 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, | |
237 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); | |
e21da1c9 | 238 | if ((int)res.a0 < 0) |
c0cda3b8 | 239 | return; |
b092201e MZ |
240 | cb = call_smc_arch_workaround_1; |
241 | smccc_start = __smccc_workaround_1_smc_start; | |
242 | smccc_end = __smccc_workaround_1_smc_end; | |
243 | break; | |
244 | ||
245 | default: | |
c0cda3b8 | 246 | return; |
b092201e MZ |
247 | } |
248 | ||
4bc352ff SD |
249 | if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || |
250 | ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) | |
251 | cb = qcom_link_stack_sanitization; | |
252 | ||
b092201e MZ |
253 | install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); |
254 | ||
c0cda3b8 | 255 | return; |
aa6acde6 | 256 | } |
0f15adbb WD |
257 | #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ |
258 | ||
8e290624 | 259 | #ifdef CONFIG_ARM64_SSBD |
5cf9ce6e MZ |
260 | DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); |
261 | ||
a43ae4df MZ |
262 | int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; |
263 | ||
264 | static const struct ssbd_options { | |
265 | const char *str; | |
266 | int state; | |
267 | } ssbd_options[] = { | |
268 | { "force-on", ARM64_SSBD_FORCE_ENABLE, }, | |
269 | { "force-off", ARM64_SSBD_FORCE_DISABLE, }, | |
270 | { "kernel", ARM64_SSBD_KERNEL, }, | |
271 | }; | |
272 | ||
273 | static int __init ssbd_cfg(char *buf) | |
274 | { | |
275 | int i; | |
276 | ||
277 | if (!buf || !buf[0]) | |
278 | return -EINVAL; | |
279 | ||
280 | for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) { | |
281 | int len = strlen(ssbd_options[i].str); | |
282 | ||
283 | if (strncmp(buf, ssbd_options[i].str, len)) | |
284 | continue; | |
285 | ||
286 | ssbd_state = ssbd_options[i].state; | |
287 | return 0; | |
288 | } | |
289 | ||
290 | return -EINVAL; | |
291 | } | |
292 | early_param("ssbd", ssbd_cfg); | |
293 | ||
8e290624 MZ |
294 | void __init arm64_update_smccc_conduit(struct alt_instr *alt, |
295 | __le32 *origptr, __le32 *updptr, | |
296 | int nr_inst) | |
297 | { | |
298 | u32 insn; | |
299 | ||
300 | BUG_ON(nr_inst != 1); | |
301 | ||
302 | switch (psci_ops.conduit) { | |
303 | case PSCI_CONDUIT_HVC: | |
304 | insn = aarch64_insn_get_hvc_value(); | |
305 | break; | |
306 | case PSCI_CONDUIT_SMC: | |
307 | insn = aarch64_insn_get_smc_value(); | |
308 | break; | |
309 | default: | |
310 | return; | |
311 | } | |
312 | ||
313 | *updptr = cpu_to_le32(insn); | |
314 | } | |
a725e3dd | 315 | |
986372c4 MZ |
316 | void __init arm64_enable_wa2_handling(struct alt_instr *alt, |
317 | __le32 *origptr, __le32 *updptr, | |
318 | int nr_inst) | |
319 | { | |
320 | BUG_ON(nr_inst != 1); | |
321 | /* | |
322 | * Only allow mitigation on EL1 entry/exit and guest | |
323 | * ARCH_WORKAROUND_2 handling if the SSBD state allows it to | |
324 | * be flipped. | |
325 | */ | |
326 | if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL) | |
327 | *updptr = cpu_to_le32(aarch64_insn_gen_nop()); | |
328 | } | |
329 | ||
647d0519 | 330 | void arm64_set_ssbd_mitigation(bool state) |
a725e3dd | 331 | { |
8f04e8e6 WD |
332 | if (this_cpu_has_cap(ARM64_SSBS)) { |
333 | if (state) | |
334 | asm volatile(SET_PSTATE_SSBS(0)); | |
335 | else | |
336 | asm volatile(SET_PSTATE_SSBS(1)); | |
337 | return; | |
338 | } | |
339 | ||
a725e3dd MZ |
340 | switch (psci_ops.conduit) { |
341 | case PSCI_CONDUIT_HVC: | |
342 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); | |
343 | break; | |
344 | ||
345 | case PSCI_CONDUIT_SMC: | |
346 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); | |
347 | break; | |
348 | ||
349 | default: | |
350 | WARN_ON_ONCE(1); | |
351 | break; | |
352 | } | |
353 | } | |
354 | ||
355 | static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, | |
356 | int scope) | |
357 | { | |
358 | struct arm_smccc_res res; | |
a43ae4df MZ |
359 | bool required = true; |
360 | s32 val; | |
a725e3dd MZ |
361 | |
362 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
363 | ||
8f04e8e6 WD |
364 | if (this_cpu_has_cap(ARM64_SSBS)) { |
365 | required = false; | |
366 | goto out_printmsg; | |
367 | } | |
368 | ||
a43ae4df MZ |
369 | if (psci_ops.smccc_version == SMCCC_VERSION_1_0) { |
370 | ssbd_state = ARM64_SSBD_UNKNOWN; | |
a725e3dd | 371 | return false; |
a43ae4df | 372 | } |
a725e3dd | 373 | |
a725e3dd MZ |
374 | switch (psci_ops.conduit) { |
375 | case PSCI_CONDUIT_HVC: | |
376 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, | |
377 | ARM_SMCCC_ARCH_WORKAROUND_2, &res); | |
a725e3dd MZ |
378 | break; |
379 | ||
380 | case PSCI_CONDUIT_SMC: | |
381 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, | |
382 | ARM_SMCCC_ARCH_WORKAROUND_2, &res); | |
a725e3dd MZ |
383 | break; |
384 | ||
385 | default: | |
a43ae4df MZ |
386 | ssbd_state = ARM64_SSBD_UNKNOWN; |
387 | return false; | |
a725e3dd MZ |
388 | } |
389 | ||
a43ae4df MZ |
390 | val = (s32)res.a0; |
391 | ||
392 | switch (val) { | |
393 | case SMCCC_RET_NOT_SUPPORTED: | |
394 | ssbd_state = ARM64_SSBD_UNKNOWN; | |
395 | return false; | |
396 | ||
397 | case SMCCC_RET_NOT_REQUIRED: | |
398 | pr_info_once("%s mitigation not required\n", entry->desc); | |
399 | ssbd_state = ARM64_SSBD_MITIGATED; | |
400 | return false; | |
401 | ||
402 | case SMCCC_RET_SUCCESS: | |
403 | required = true; | |
404 | break; | |
405 | ||
406 | case 1: /* Mitigation not required on this CPU */ | |
407 | required = false; | |
408 | break; | |
409 | ||
410 | default: | |
411 | WARN_ON(1); | |
412 | return false; | |
413 | } | |
414 | ||
415 | switch (ssbd_state) { | |
416 | case ARM64_SSBD_FORCE_DISABLE: | |
a43ae4df MZ |
417 | arm64_set_ssbd_mitigation(false); |
418 | required = false; | |
419 | break; | |
420 | ||
421 | case ARM64_SSBD_KERNEL: | |
422 | if (required) { | |
423 | __this_cpu_write(arm64_ssbd_callback_required, 1); | |
424 | arm64_set_ssbd_mitigation(true); | |
425 | } | |
426 | break; | |
427 | ||
428 | case ARM64_SSBD_FORCE_ENABLE: | |
a725e3dd | 429 | arm64_set_ssbd_mitigation(true); |
a43ae4df MZ |
430 | required = true; |
431 | break; | |
432 | ||
433 | default: | |
434 | WARN_ON(1); | |
435 | break; | |
a725e3dd MZ |
436 | } |
437 | ||
8f04e8e6 WD |
438 | out_printmsg: |
439 | switch (ssbd_state) { | |
440 | case ARM64_SSBD_FORCE_DISABLE: | |
441 | pr_info_once("%s disabled from command-line\n", entry->desc); | |
442 | break; | |
443 | ||
444 | case ARM64_SSBD_FORCE_ENABLE: | |
445 | pr_info_once("%s forced from command-line\n", entry->desc); | |
446 | break; | |
447 | } | |
448 | ||
a43ae4df | 449 | return required; |
a725e3dd | 450 | } |
8e290624 MZ |
451 | #endif /* CONFIG_ARM64_SSBD */ |
452 | ||
b8925ee2 WD |
453 | static void __maybe_unused |
454 | cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) | |
455 | { | |
456 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); | |
457 | } | |
458 | ||
5e7951ce SP |
459 | #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ |
460 | .matches = is_affected_midr_range, \ | |
1df31050 | 461 | .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) |
5e7951ce SP |
462 | |
463 | #define CAP_MIDR_ALL_VERSIONS(model) \ | |
464 | .matches = is_affected_midr_range, \ | |
1df31050 | 465 | .midr_range = MIDR_ALL_VERSIONS(model) |
06f1494f | 466 | |
e8002e02 AB |
467 | #define MIDR_FIXED(rev, revidr_mask) \ |
468 | .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}} | |
469 | ||
5e7951ce SP |
470 | #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ |
471 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ | |
472 | CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) | |
473 | ||
be5b2998 SP |
474 | #define CAP_MIDR_RANGE_LIST(list) \ |
475 | .matches = is_affected_midr_range_list, \ | |
476 | .midr_range_list = list | |
477 | ||
5e7951ce SP |
478 | /* Errata affecting a range of revisions of given model variant */ |
479 | #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \ | |
480 | ERRATA_MIDR_RANGE(m, var, r_min, var, r_max) | |
481 | ||
482 | /* Errata affecting a single variant/revision of a model */ | |
483 | #define ERRATA_MIDR_REV(model, var, rev) \ | |
484 | ERRATA_MIDR_RANGE(model, var, rev, var, rev) | |
485 | ||
486 | /* Errata affecting all variants/revisions of a given a model */ | |
487 | #define ERRATA_MIDR_ALL_VERSIONS(model) \ | |
488 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ | |
489 | CAP_MIDR_ALL_VERSIONS(model) | |
490 | ||
be5b2998 SP |
491 | /* Errata affecting a list of midr ranges, with same work around */ |
492 | #define ERRATA_MIDR_RANGE_LIST(midr_list) \ | |
493 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ | |
494 | CAP_MIDR_RANGE_LIST(midr_list) | |
495 | ||
ba7d9233 SP |
496 | /* |
497 | * Generic helper for handling capabilties with multiple (match,enable) pairs | |
498 | * of call backs, sharing the same capability bit. | |
499 | * Iterate over each entry to see if at least one matches. | |
500 | */ | |
12eb3691 WD |
501 | static bool __maybe_unused |
502 | multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope) | |
ba7d9233 SP |
503 | { |
504 | const struct arm64_cpu_capabilities *caps; | |
505 | ||
506 | for (caps = entry->match_list; caps->matches; caps++) | |
507 | if (caps->matches(caps, scope)) | |
508 | return true; | |
509 | ||
510 | return false; | |
511 | } | |
512 | ||
513 | /* | |
514 | * Take appropriate action for all matching entries in the shared capability | |
515 | * entry. | |
516 | */ | |
12eb3691 | 517 | static void __maybe_unused |
ba7d9233 SP |
518 | multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry) |
519 | { | |
520 | const struct arm64_cpu_capabilities *caps; | |
301bcfac | 521 | |
ba7d9233 SP |
522 | for (caps = entry->match_list; caps->matches; caps++) |
523 | if (caps->matches(caps, SCOPE_LOCAL_CPU) && | |
524 | caps->cpu_enable) | |
525 | caps->cpu_enable(caps); | |
526 | } | |
527 | ||
be5b2998 SP |
528 | #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR |
529 | ||
530 | /* | |
531 | * List of CPUs where we need to issue a psci call to | |
532 | * harden the branch predictor. | |
533 | */ | |
534 | static const struct midr_range arm64_bp_harden_smccc_cpus[] = { | |
535 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), | |
536 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), | |
537 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), | |
538 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), | |
539 | MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), | |
540 | MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), | |
be5b2998 SP |
541 | MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), |
542 | MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), | |
0583a4ef | 543 | MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER), |
be5b2998 SP |
544 | {}, |
545 | }; | |
546 | ||
547 | #endif | |
06f1494f | 548 | |
8892b718 MZ |
549 | #ifdef CONFIG_HARDEN_EL2_VECTORS |
550 | ||
551 | static const struct midr_range arm64_harden_el2_vectors[] = { | |
552 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), | |
553 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), | |
554 | {}, | |
555 | }; | |
556 | ||
dc6ed61d MZ |
557 | #endif |
558 | ||
359b7064 | 559 | const struct arm64_cpu_capabilities arm64_errata[] = { |
c0a01b84 AP |
560 | #if defined(CONFIG_ARM64_ERRATUM_826319) || \ |
561 | defined(CONFIG_ARM64_ERRATUM_827319) || \ | |
562 | defined(CONFIG_ARM64_ERRATUM_824069) | |
301bcfac AP |
563 | { |
564 | /* Cortex-A53 r0p[012] */ | |
565 | .desc = "ARM errata 826319, 827319, 824069", | |
566 | .capability = ARM64_WORKAROUND_CLEAN_CACHE, | |
5e7951ce | 567 | ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), |
c0cda3b8 | 568 | .cpu_enable = cpu_enable_cache_maint_trap, |
301bcfac | 569 | }, |
c0a01b84 AP |
570 | #endif |
571 | #ifdef CONFIG_ARM64_ERRATUM_819472 | |
572 | { | |
573 | /* Cortex-A53 r0p[01] */ | |
574 | .desc = "ARM errata 819472", | |
575 | .capability = ARM64_WORKAROUND_CLEAN_CACHE, | |
5e7951ce | 576 | ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), |
c0cda3b8 | 577 | .cpu_enable = cpu_enable_cache_maint_trap, |
c0a01b84 AP |
578 | }, |
579 | #endif | |
580 | #ifdef CONFIG_ARM64_ERRATUM_832075 | |
301bcfac | 581 | { |
5afaa1fc AP |
582 | /* Cortex-A57 r0p0 - r1p2 */ |
583 | .desc = "ARM erratum 832075", | |
584 | .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, | |
5e7951ce SP |
585 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, |
586 | 0, 0, | |
587 | 1, 2), | |
5afaa1fc | 588 | }, |
905e8c5d | 589 | #endif |
498cd5c3 MZ |
590 | #ifdef CONFIG_ARM64_ERRATUM_834220 |
591 | { | |
592 | /* Cortex-A57 r0p0 - r1p2 */ | |
593 | .desc = "ARM erratum 834220", | |
594 | .capability = ARM64_WORKAROUND_834220, | |
5e7951ce SP |
595 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, |
596 | 0, 0, | |
597 | 1, 2), | |
498cd5c3 MZ |
598 | }, |
599 | #endif | |
ca79acca AB |
600 | #ifdef CONFIG_ARM64_ERRATUM_843419 |
601 | { | |
602 | /* Cortex-A53 r0p[01234] */ | |
603 | .desc = "ARM erratum 843419", | |
604 | .capability = ARM64_WORKAROUND_843419, | |
5e7951ce | 605 | ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), |
ca79acca | 606 | MIDR_FIXED(0x4, BIT(8)), |
498cd5c3 MZ |
607 | }, |
608 | #endif | |
905e8c5d WD |
609 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
610 | { | |
611 | /* Cortex-A53 r0p[01234] */ | |
612 | .desc = "ARM erratum 845719", | |
613 | .capability = ARM64_WORKAROUND_845719, | |
5e7951ce | 614 | ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), |
905e8c5d | 615 | }, |
6d4e11c5 RR |
616 | #endif |
617 | #ifdef CONFIG_CAVIUM_ERRATUM_23154 | |
618 | { | |
619 | /* Cavium ThunderX, pass 1.x */ | |
620 | .desc = "Cavium erratum 23154", | |
621 | .capability = ARM64_WORKAROUND_CAVIUM_23154, | |
5e7951ce | 622 | ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1), |
6d4e11c5 | 623 | }, |
104a0c02 AP |
624 | #endif |
625 | #ifdef CONFIG_CAVIUM_ERRATUM_27456 | |
626 | { | |
627 | /* Cavium ThunderX, T88 pass 1.x - 2.1 */ | |
628 | .desc = "Cavium erratum 27456", | |
629 | .capability = ARM64_WORKAROUND_CAVIUM_27456, | |
5e7951ce SP |
630 | ERRATA_MIDR_RANGE(MIDR_THUNDERX, |
631 | 0, 0, | |
632 | 1, 1), | |
104a0c02 | 633 | }, |
47c459be GK |
634 | { |
635 | /* Cavium ThunderX, T81 pass 1.0 */ | |
636 | .desc = "Cavium erratum 27456", | |
637 | .capability = ARM64_WORKAROUND_CAVIUM_27456, | |
5e7951ce | 638 | ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), |
47c459be | 639 | }, |
690a3415 DD |
640 | #endif |
641 | #ifdef CONFIG_CAVIUM_ERRATUM_30115 | |
642 | { | |
643 | /* Cavium ThunderX, T88 pass 1.x - 2.2 */ | |
644 | .desc = "Cavium erratum 30115", | |
645 | .capability = ARM64_WORKAROUND_CAVIUM_30115, | |
5e7951ce SP |
646 | ERRATA_MIDR_RANGE(MIDR_THUNDERX, |
647 | 0, 0, | |
648 | 1, 2), | |
690a3415 DD |
649 | }, |
650 | { | |
651 | /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ | |
652 | .desc = "Cavium erratum 30115", | |
653 | .capability = ARM64_WORKAROUND_CAVIUM_30115, | |
5e7951ce | 654 | ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), |
690a3415 DD |
655 | }, |
656 | { | |
657 | /* Cavium ThunderX, T83 pass 1.0 */ | |
658 | .desc = "Cavium erratum 30115", | |
659 | .capability = ARM64_WORKAROUND_CAVIUM_30115, | |
5e7951ce | 660 | ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), |
690a3415 | 661 | }, |
c0a01b84 | 662 | #endif |
116c81f4 | 663 | { |
880f7cc4 | 664 | .desc = "Mismatched cache type (CTR_EL0)", |
314d53d2 SP |
665 | .capability = ARM64_MISMATCHED_CACHE_TYPE, |
666 | .matches = has_mismatched_cache_type, | |
5b4747c5 | 667 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
c0cda3b8 | 668 | .cpu_enable = cpu_enable_trap_ctr_access, |
116c81f4 | 669 | }, |
38fd94b0 CC |
670 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
671 | { | |
672 | .desc = "Qualcomm Technologies Falkor erratum 1003", | |
673 | .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, | |
5e7951ce | 674 | ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), |
38fd94b0 | 675 | }, |
bb487118 SB |
676 | { |
677 | .desc = "Qualcomm Technologies Kryo erratum 1003", | |
678 | .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, | |
5b4747c5 | 679 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
1df31050 | 680 | .midr_range.model = MIDR_QCOM_KRYO, |
bb487118 SB |
681 | .matches = is_kryo_midr, |
682 | }, | |
38fd94b0 | 683 | #endif |
d9ff80f8 CC |
684 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 |
685 | { | |
686 | .desc = "Qualcomm Technologies Falkor erratum 1009", | |
687 | .capability = ARM64_WORKAROUND_REPEAT_TLBI, | |
5e7951ce | 688 | ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), |
d9ff80f8 | 689 | }, |
eeb1efbc MZ |
690 | #endif |
691 | #ifdef CONFIG_ARM64_ERRATUM_858921 | |
692 | { | |
693 | /* Cortex-A73 all versions */ | |
694 | .desc = "ARM erratum 858921", | |
695 | .capability = ARM64_WORKAROUND_858921, | |
5e7951ce | 696 | ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), |
eeb1efbc | 697 | }, |
aa6acde6 WD |
698 | #endif |
699 | #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR | |
700 | { | |
701 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, | |
4bc352ff SD |
702 | .cpu_enable = enable_smccc_arch_workaround_1, |
703 | ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus), | |
f3d795d9 | 704 | }, |
4b472ffd MZ |
705 | #endif |
706 | #ifdef CONFIG_HARDEN_EL2_VECTORS | |
707 | { | |
8892b718 | 708 | .desc = "EL2 vector hardening", |
4b472ffd | 709 | .capability = ARM64_HARDEN_EL2_VECTORS, |
8892b718 | 710 | ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors), |
4b472ffd | 711 | }, |
a725e3dd MZ |
712 | #endif |
713 | #ifdef CONFIG_ARM64_SSBD | |
714 | { | |
715 | .desc = "Speculative Store Bypass Disable", | |
716 | .capability = ARM64_SSBD, | |
717 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, | |
718 | .matches = has_ssbd_mitigation, | |
719 | }, | |
95b861a4 MZ |
720 | #endif |
721 | #ifdef CONFIG_ARM64_ERRATUM_1188873 | |
722 | { | |
723 | /* Cortex-A76 r0p0 to r2p0 */ | |
724 | .desc = "ARM erratum 1188873", | |
725 | .capability = ARM64_WORKAROUND_1188873, | |
726 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), | |
727 | }, | |
d9ff80f8 | 728 | #endif |
5afaa1fc | 729 | { |
301bcfac | 730 | } |
e116a375 | 731 | }; |