Merge tag 'gpio-v4.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux...
[linux-2.6-block.git] / arch / arm64 / kernel / cpufeature.c
CommitLineData
359b7064
MZ
1/*
2 * Contains CPU feature definitions
3 *
4 * Copyright (C) 2015 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
9cdf8ec4 19#define pr_fmt(fmt) "CPU features: " fmt
359b7064 20
3c739b57
SP
21#include <linux/bsearch.h>
22#include <linux/sort.h>
359b7064
MZ
23#include <linux/types.h>
24#include <asm/cpu.h>
25#include <asm/cpufeature.h>
dbb4e152 26#include <asm/cpu_ops.h>
13f417f3 27#include <asm/mmu_context.h>
338d4f49 28#include <asm/processor.h>
cdcf817b 29#include <asm/sysreg.h>
d88701be 30#include <asm/virt.h>
359b7064 31
9cdf8ec4
SP
32unsigned long elf_hwcap __read_mostly;
33EXPORT_SYMBOL_GPL(elf_hwcap);
34
35#ifdef CONFIG_COMPAT
36#define COMPAT_ELF_HWCAP_DEFAULT \
37 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
38 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
39 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
40 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
41 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
42 COMPAT_HWCAP_LPAE)
43unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
44unsigned int compat_elf_hwcap2 __read_mostly;
45#endif
46
47DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
48
4f0a606b 49#define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
3c739b57 50 { \
4f0a606b 51 .sign = SIGNED, \
3c739b57
SP
52 .strict = STRICT, \
53 .type = TYPE, \
54 .shift = SHIFT, \
55 .width = WIDTH, \
56 .safe_val = SAFE_VAL, \
57 }
58
0710cfdb 59/* Define a feature with unsigned values */
4f0a606b 60#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
4f0a606b
SP
61 __ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
62
0710cfdb
SP
63/* Define a feature with a signed value */
64#define S_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
65 __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
66
3c739b57
SP
67#define ARM64_FTR_END \
68 { \
69 .width = 0, \
70 }
71
70544196
JM
72/* meta feature for alternatives */
73static bool __maybe_unused
74cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry);
75
3c739b57
SP
76static struct arm64_ftr_bits ftr_id_aa64isar0[] = {
77 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
78 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
79 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0),
80 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
81 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
82 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
83 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
84 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
85 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */
86 ARM64_FTR_END,
87};
88
89static struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
90 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
91 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0),
92 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
0710cfdb
SP
93 S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
94 S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
3c739b57
SP
95 /* Linux doesn't care about the EL3 */
96 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0),
97 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0),
98 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
99 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
100 ARM64_FTR_END,
101};
102
103static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
104 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
0710cfdb
SP
105 S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
106 S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
3c739b57
SP
107 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
108 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
109 /* Linux shouldn't care about secure memory */
110 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
111 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
112 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
113 /*
114 * Differing PARange is fine as long as all peripherals and memory are mapped
115 * within the minimum PARange of all CPUs
116 */
0710cfdb 117 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
3c739b57
SP
118 ARM64_FTR_END,
119};
120
121static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
122 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
123 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
124 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
125 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
126 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
127 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
128 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
129 ARM64_FTR_END,
130};
131
406e3087
JM
132static struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
133 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
134 ARM64_FTR_END,
135};
136
3c739b57 137static struct arm64_ftr_bits ftr_ctr[] = {
0710cfdb 138 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
3c739b57 139 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
0710cfdb
SP
140 ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
141 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
142 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
3c739b57
SP
143 /*
144 * Linux can handle differing I-cache policies. Userspace JITs will
145 * make use of *minLine
146 */
0710cfdb 147 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */
3c739b57 148 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */
0710cfdb 149 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
3c739b57
SP
150 ARM64_FTR_END,
151};
152
153static struct arm64_ftr_bits ftr_id_mmfr0[] = {
0710cfdb 154 S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0xf), /* InnerShr */
3c739b57
SP
155 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0), /* FCSE */
156 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */
157 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 4, 0), /* TCM */
158 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* ShareLvl */
0710cfdb 159 S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0xf), /* OuterShr */
3c739b57
SP
160 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* PMSA */
161 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* VMSA */
162 ARM64_FTR_END,
163};
164
165static struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
166 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
0710cfdb
SP
167 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
168 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
169 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
170 S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
171 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
172 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
3c739b57
SP
173 ARM64_FTR_END,
174};
175
176static struct arm64_ftr_bits ftr_mvfr2[] = {
177 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0), /* RAZ */
178 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* FPMisc */
179 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* SIMDMisc */
180 ARM64_FTR_END,
181};
182
183static struct arm64_ftr_bits ftr_dczid[] = {
184 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 5, 27, 0), /* RAZ */
185 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */
186 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */
187 ARM64_FTR_END,
188};
189
190
191static struct arm64_ftr_bits ftr_id_isar5[] = {
192 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0),
193 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 20, 4, 0), /* RAZ */
194 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0),
195 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA2_SHIFT, 4, 0),
196 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA1_SHIFT, 4, 0),
197 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_AES_SHIFT, 4, 0),
198 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SEVL_SHIFT, 4, 0),
199 ARM64_FTR_END,
200};
201
202static struct arm64_ftr_bits ftr_id_mmfr4[] = {
203 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0), /* RAZ */
204 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* ac2 */
205 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */
206 ARM64_FTR_END,
207};
208
209static struct arm64_ftr_bits ftr_id_pfr0[] = {
210 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 16, 0), /* RAZ */
211 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* State3 */
212 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0), /* State2 */
213 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* State1 */
214 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* State0 */
215 ARM64_FTR_END,
216};
217
e5343503
SP
218static struct arm64_ftr_bits ftr_id_dfr0[] = {
219 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
0710cfdb 220 S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
e5343503
SP
221 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
222 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
223 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
224 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
225 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
226 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
227 ARM64_FTR_END,
228};
229
3c739b57
SP
230/*
231 * Common ftr bits for a 32bit register with all hidden, strict
232 * attributes, with 4bit feature fields and a default safe value of
233 * 0. Covers the following 32bit registers:
234 * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
235 */
236static struct arm64_ftr_bits ftr_generic_32bits[] = {
237 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
238 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
239 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
240 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
241 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
242 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
243 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
244 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
245 ARM64_FTR_END,
246};
247
248static struct arm64_ftr_bits ftr_generic[] = {
249 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0),
250 ARM64_FTR_END,
251};
252
253static struct arm64_ftr_bits ftr_generic32[] = {
254 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 32, 0),
255 ARM64_FTR_END,
256};
257
258static struct arm64_ftr_bits ftr_aa64raz[] = {
259 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0),
260 ARM64_FTR_END,
261};
262
263#define ARM64_FTR_REG(id, table) \
264 { \
265 .sys_id = id, \
266 .name = #id, \
267 .ftr_bits = &((table)[0]), \
268 }
269
270static struct arm64_ftr_reg arm64_ftr_regs[] = {
271
272 /* Op1 = 0, CRn = 0, CRm = 1 */
273 ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
274 ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
e5343503 275 ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
3c739b57
SP
276 ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
277 ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
278 ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
279 ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
280
281 /* Op1 = 0, CRn = 0, CRm = 2 */
282 ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
283 ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
284 ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
285 ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
286 ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
287 ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
288 ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
289
290 /* Op1 = 0, CRn = 0, CRm = 3 */
291 ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
292 ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
293 ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
294
295 /* Op1 = 0, CRn = 0, CRm = 4 */
296 ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
297 ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_aa64raz),
298
299 /* Op1 = 0, CRn = 0, CRm = 5 */
300 ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
301 ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_generic),
302
303 /* Op1 = 0, CRn = 0, CRm = 6 */
304 ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
305 ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_aa64raz),
306
307 /* Op1 = 0, CRn = 0, CRm = 7 */
308 ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
309 ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
406e3087 310 ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
3c739b57
SP
311
312 /* Op1 = 3, CRn = 0, CRm = 0 */
313 ARM64_FTR_REG(SYS_CTR_EL0, ftr_ctr),
314 ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
315
316 /* Op1 = 3, CRn = 14, CRm = 0 */
317 ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_generic32),
318};
319
320static int search_cmp_ftr_reg(const void *id, const void *regp)
321{
322 return (int)(unsigned long)id - (int)((const struct arm64_ftr_reg *)regp)->sys_id;
323}
324
325/*
326 * get_arm64_ftr_reg - Lookup a feature register entry using its
327 * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
328 * ascending order of sys_id , we use binary search to find a matching
329 * entry.
330 *
331 * returns - Upon success, matching ftr_reg entry for id.
332 * - NULL on failure. It is upto the caller to decide
333 * the impact of a failure.
334 */
335static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
336{
337 return bsearch((const void *)(unsigned long)sys_id,
338 arm64_ftr_regs,
339 ARRAY_SIZE(arm64_ftr_regs),
340 sizeof(arm64_ftr_regs[0]),
341 search_cmp_ftr_reg);
342}
343
344static u64 arm64_ftr_set_value(struct arm64_ftr_bits *ftrp, s64 reg, s64 ftr_val)
345{
346 u64 mask = arm64_ftr_mask(ftrp);
347
348 reg &= ~mask;
349 reg |= (ftr_val << ftrp->shift) & mask;
350 return reg;
351}
352
353static s64 arm64_ftr_safe_value(struct arm64_ftr_bits *ftrp, s64 new, s64 cur)
354{
355 s64 ret = 0;
356
357 switch (ftrp->type) {
358 case FTR_EXACT:
359 ret = ftrp->safe_val;
360 break;
361 case FTR_LOWER_SAFE:
362 ret = new < cur ? new : cur;
363 break;
364 case FTR_HIGHER_SAFE:
365 ret = new > cur ? new : cur;
366 break;
367 default:
368 BUG();
369 }
370
371 return ret;
372}
373
374static int __init sort_cmp_ftr_regs(const void *a, const void *b)
375{
376 return ((const struct arm64_ftr_reg *)a)->sys_id -
377 ((const struct arm64_ftr_reg *)b)->sys_id;
378}
379
380static void __init swap_ftr_regs(void *a, void *b, int size)
381{
382 struct arm64_ftr_reg tmp = *(struct arm64_ftr_reg *)a;
383 *(struct arm64_ftr_reg *)a = *(struct arm64_ftr_reg *)b;
384 *(struct arm64_ftr_reg *)b = tmp;
385}
386
387static void __init sort_ftr_regs(void)
388{
389 /* Keep the array sorted so that we can do the binary search */
390 sort(arm64_ftr_regs,
391 ARRAY_SIZE(arm64_ftr_regs),
392 sizeof(arm64_ftr_regs[0]),
393 sort_cmp_ftr_regs,
394 swap_ftr_regs);
395}
396
397/*
398 * Initialise the CPU feature register from Boot CPU values.
399 * Also initiliases the strict_mask for the register.
400 */
401static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
402{
403 u64 val = 0;
404 u64 strict_mask = ~0x0ULL;
405 struct arm64_ftr_bits *ftrp;
406 struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
407
408 BUG_ON(!reg);
409
410 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
411 s64 ftr_new = arm64_ftr_value(ftrp, new);
412
413 val = arm64_ftr_set_value(ftrp, val, ftr_new);
414 if (!ftrp->strict)
415 strict_mask &= ~arm64_ftr_mask(ftrp);
416 }
417 reg->sys_val = val;
418 reg->strict_mask = strict_mask;
419}
420
421void __init init_cpu_features(struct cpuinfo_arm64 *info)
422{
423 /* Before we start using the tables, make sure it is sorted */
424 sort_ftr_regs();
425
426 init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
427 init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
428 init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
429 init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
430 init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
431 init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
432 init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
433 init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
434 init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
406e3087 435 init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
3c739b57
SP
436 init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
437 init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
438 init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
439 init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
440 init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
441 init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
442 init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
443 init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
444 init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
445 init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
446 init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
447 init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
448 init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
449 init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
450 init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
451 init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
452 init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
453 init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
3c739b57
SP
454}
455
3086d391 456static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
3c739b57
SP
457{
458 struct arm64_ftr_bits *ftrp;
3c739b57
SP
459
460 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
461 s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
462 s64 ftr_new = arm64_ftr_value(ftrp, new);
463
464 if (ftr_cur == ftr_new)
465 continue;
466 /* Find a safe value */
467 ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
468 reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
469 }
470
471}
472
3086d391 473static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
cdcf817b 474{
3086d391
SP
475 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
476
477 BUG_ON(!regp);
478 update_cpu_ftr_reg(regp, val);
479 if ((boot & regp->strict_mask) == (val & regp->strict_mask))
480 return 0;
481 pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
482 regp->name, boot, cpu, val);
483 return 1;
484}
485
486/*
487 * Update system wide CPU feature registers with the values from a
488 * non-boot CPU. Also performs SANITY checks to make sure that there
489 * aren't any insane variations from that of the boot CPU.
490 */
491void update_cpu_features(int cpu,
492 struct cpuinfo_arm64 *info,
493 struct cpuinfo_arm64 *boot)
494{
495 int taint = 0;
496
497 /*
498 * The kernel can handle differing I-cache policies, but otherwise
499 * caches should look identical. Userspace JITs will make use of
500 * *minLine.
501 */
502 taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
503 info->reg_ctr, boot->reg_ctr);
504
505 /*
506 * Userspace may perform DC ZVA instructions. Mismatched block sizes
507 * could result in too much or too little memory being zeroed if a
508 * process is preempted and migrated between CPUs.
509 */
510 taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
511 info->reg_dczid, boot->reg_dczid);
512
513 /* If different, timekeeping will be broken (especially with KVM) */
514 taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
515 info->reg_cntfrq, boot->reg_cntfrq);
516
517 /*
518 * The kernel uses self-hosted debug features and expects CPUs to
519 * support identical debug features. We presently need CTX_CMPs, WRPs,
520 * and BRPs to be identical.
521 * ID_AA64DFR1 is currently RES0.
522 */
523 taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
524 info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
525 taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
526 info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
527 /*
528 * Even in big.LITTLE, processors should be identical instruction-set
529 * wise.
530 */
531 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
532 info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
533 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
534 info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
535
536 /*
537 * Differing PARange support is fine as long as all peripherals and
538 * memory are mapped within the minimum PARange of all CPUs.
539 * Linux should not care about secure memory.
540 */
541 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
542 info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
543 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
544 info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
406e3087
JM
545 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
546 info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
3086d391
SP
547
548 /*
549 * EL3 is not our concern.
550 * ID_AA64PFR1 is currently RES0.
551 */
552 taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
553 info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
554 taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
555 info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
556
557 /*
558 * If we have AArch32, we care about 32-bit features for compat. These
559 * registers should be RES0 otherwise.
560 */
561 taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
562 info->reg_id_dfr0, boot->reg_id_dfr0);
563 taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
564 info->reg_id_isar0, boot->reg_id_isar0);
565 taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
566 info->reg_id_isar1, boot->reg_id_isar1);
567 taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
568 info->reg_id_isar2, boot->reg_id_isar2);
569 taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
570 info->reg_id_isar3, boot->reg_id_isar3);
571 taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
572 info->reg_id_isar4, boot->reg_id_isar4);
573 taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
574 info->reg_id_isar5, boot->reg_id_isar5);
575
576 /*
577 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
578 * ACTLR formats could differ across CPUs and therefore would have to
579 * be trapped for virtualization anyway.
580 */
581 taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
582 info->reg_id_mmfr0, boot->reg_id_mmfr0);
583 taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
584 info->reg_id_mmfr1, boot->reg_id_mmfr1);
585 taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
586 info->reg_id_mmfr2, boot->reg_id_mmfr2);
587 taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
588 info->reg_id_mmfr3, boot->reg_id_mmfr3);
589 taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
590 info->reg_id_pfr0, boot->reg_id_pfr0);
591 taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
592 info->reg_id_pfr1, boot->reg_id_pfr1);
593 taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
594 info->reg_mvfr0, boot->reg_mvfr0);
595 taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
596 info->reg_mvfr1, boot->reg_mvfr1);
597 taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
598 info->reg_mvfr2, boot->reg_mvfr2);
599
600 /*
601 * Mismatched CPU features are a recipe for disaster. Don't even
602 * pretend to support them.
603 */
604 WARN_TAINT_ONCE(taint, TAINT_CPU_OUT_OF_SPEC,
605 "Unsupported CPU feature variation.\n");
cdcf817b
SP
606}
607
b3f15378
SP
608u64 read_system_reg(u32 id)
609{
610 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
611
612 /* We shouldn't get a request for an unsupported register */
613 BUG_ON(!regp);
614 return regp->sys_val;
615}
359b7064 616
963fcd40
MZ
617#include <linux/irqchip/arm-gic-v3.h>
618
18ffa046
JM
619static bool
620feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
621{
28c5dcb2 622 int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
18ffa046
JM
623
624 return val >= entry->min_field_value;
625}
626
da8d02d1
SP
627static bool
628has_cpuid_feature(const struct arm64_cpu_capabilities *entry)
629{
630 u64 val;
94a9e04a 631
da8d02d1
SP
632 val = read_system_reg(entry->sys_reg);
633 return feature_matches(val, entry);
634}
338d4f49 635
963fcd40
MZ
636static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry)
637{
638 bool has_sre;
639
2dc10ad8 640 if (!has_cpuid_feature(entry))
963fcd40
MZ
641 return false;
642
643 has_sre = gic_enable_sre();
644 if (!has_sre)
645 pr_warn_once("%s present but disabled by higher exception level\n",
646 entry->desc);
647
648 return has_sre;
649}
650
d5370f75
WD
651static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry)
652{
653 u32 midr = read_cpuid_id();
654 u32 rv_min, rv_max;
655
656 /* Cavium ThunderX pass 1.x and 2.x */
657 rv_min = 0;
658 rv_max = (1 << MIDR_VARIANT_SHIFT) | MIDR_REVISION_MASK;
659
660 return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max);
661}
662
d88701be
MZ
663static bool runs_at_el2(const struct arm64_cpu_capabilities *entry)
664{
665 return is_kernel_in_hyp_mode();
666}
667
359b7064 668static const struct arm64_cpu_capabilities arm64_features[] = {
94a9e04a
MZ
669 {
670 .desc = "GIC system register CPU interface",
671 .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
963fcd40 672 .matches = has_useable_gicv3_cpuif,
da8d02d1
SP
673 .sys_reg = SYS_ID_AA64PFR0_EL1,
674 .field_pos = ID_AA64PFR0_GIC_SHIFT,
ff96f7bc 675 .sign = FTR_UNSIGNED,
18ffa046 676 .min_field_value = 1,
94a9e04a 677 },
338d4f49
JM
678#ifdef CONFIG_ARM64_PAN
679 {
680 .desc = "Privileged Access Never",
681 .capability = ARM64_HAS_PAN,
da8d02d1
SP
682 .matches = has_cpuid_feature,
683 .sys_reg = SYS_ID_AA64MMFR1_EL1,
684 .field_pos = ID_AA64MMFR1_PAN_SHIFT,
ff96f7bc 685 .sign = FTR_UNSIGNED,
338d4f49
JM
686 .min_field_value = 1,
687 .enable = cpu_enable_pan,
688 },
689#endif /* CONFIG_ARM64_PAN */
2e94da13
WD
690#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
691 {
692 .desc = "LSE atomic instructions",
693 .capability = ARM64_HAS_LSE_ATOMICS,
da8d02d1
SP
694 .matches = has_cpuid_feature,
695 .sys_reg = SYS_ID_AA64ISAR0_EL1,
696 .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
ff96f7bc 697 .sign = FTR_UNSIGNED,
2e94da13
WD
698 .min_field_value = 2,
699 },
700#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
d5370f75
WD
701 {
702 .desc = "Software prefetching using PRFM",
703 .capability = ARM64_HAS_NO_HW_PREFETCH,
704 .matches = has_no_hw_prefetch,
705 },
57f4959b
JM
706#ifdef CONFIG_ARM64_UAO
707 {
708 .desc = "User Access Override",
709 .capability = ARM64_HAS_UAO,
710 .matches = has_cpuid_feature,
711 .sys_reg = SYS_ID_AA64MMFR2_EL1,
712 .field_pos = ID_AA64MMFR2_UAO_SHIFT,
713 .min_field_value = 1,
714 .enable = cpu_enable_uao,
715 },
716#endif /* CONFIG_ARM64_UAO */
70544196
JM
717#ifdef CONFIG_ARM64_PAN
718 {
719 .capability = ARM64_ALT_PAN_NOT_UAO,
720 .matches = cpufeature_pan_not_uao,
721 },
722#endif /* CONFIG_ARM64_PAN */
d88701be
MZ
723 {
724 .desc = "Virtualization Host Extensions",
725 .capability = ARM64_HAS_VIRT_HOST_EXTN,
726 .matches = runs_at_el2,
727 },
359b7064
MZ
728 {},
729};
730
ff96f7bc 731#define HWCAP_CAP(reg, field, s, min_value, type, cap) \
37b01d53
SP
732 { \
733 .desc = #cap, \
734 .matches = has_cpuid_feature, \
735 .sys_reg = reg, \
736 .field_pos = field, \
ff96f7bc 737 .sign = s, \
37b01d53
SP
738 .min_field_value = min_value, \
739 .hwcap_type = type, \
740 .hwcap = cap, \
741 }
742
743static const struct arm64_cpu_capabilities arm64_hwcaps[] = {
ff96f7bc
SP
744 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
745 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
746 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
747 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
748 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
749 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
750 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
bf500618 751 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
ff96f7bc 752 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
bf500618 753 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
37b01d53 754#ifdef CONFIG_COMPAT
ff96f7bc
SP
755 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
756 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
757 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
758 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
759 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
37b01d53
SP
760#endif
761 {},
762};
763
a7c61a34 764static void __init cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
37b01d53
SP
765{
766 switch (cap->hwcap_type) {
767 case CAP_HWCAP:
768 elf_hwcap |= cap->hwcap;
769 break;
770#ifdef CONFIG_COMPAT
771 case CAP_COMPAT_HWCAP:
772 compat_elf_hwcap |= (u32)cap->hwcap;
773 break;
774 case CAP_COMPAT_HWCAP2:
775 compat_elf_hwcap2 |= (u32)cap->hwcap;
776 break;
777#endif
778 default:
779 WARN_ON(1);
780 break;
781 }
782}
783
784/* Check if we have a particular HWCAP enabled */
3d6d1035 785static bool __maybe_unused cpus_have_hwcap(const struct arm64_cpu_capabilities *cap)
37b01d53
SP
786{
787 bool rc;
788
789 switch (cap->hwcap_type) {
790 case CAP_HWCAP:
791 rc = (elf_hwcap & cap->hwcap) != 0;
792 break;
793#ifdef CONFIG_COMPAT
794 case CAP_COMPAT_HWCAP:
795 rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
796 break;
797 case CAP_COMPAT_HWCAP2:
798 rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
799 break;
800#endif
801 default:
802 WARN_ON(1);
803 rc = false;
804 }
805
806 return rc;
807}
808
a7c61a34 809static void __init setup_cpu_hwcaps(void)
37b01d53
SP
810{
811 int i;
812 const struct arm64_cpu_capabilities *hwcaps = arm64_hwcaps;
813
644c2ae1 814 for (i = 0; hwcaps[i].matches; i++)
37b01d53
SP
815 if (hwcaps[i].matches(&hwcaps[i]))
816 cap_set_hwcap(&hwcaps[i]);
817}
818
ce8b602c 819void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
359b7064
MZ
820 const char *info)
821{
822 int i;
823
644c2ae1 824 for (i = 0; caps[i].matches; i++) {
359b7064
MZ
825 if (!caps[i].matches(&caps[i]))
826 continue;
827
644c2ae1 828 if (!cpus_have_cap(caps[i].capability) && caps[i].desc)
359b7064
MZ
829 pr_info("%s %s\n", info, caps[i].desc);
830 cpus_set_cap(caps[i].capability);
831 }
ce8b602c
SP
832}
833
834/*
dbb4e152
SP
835 * Run through the enabled capabilities and enable() it on all active
836 * CPUs
ce8b602c 837 */
a7c61a34
JZ
838static void __init
839enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
ce8b602c
SP
840{
841 int i;
1c076303 842
644c2ae1 843 for (i = 0; caps[i].matches; i++)
dbb4e152
SP
844 if (caps[i].enable && cpus_have_cap(caps[i].capability))
845 on_each_cpu(caps[i].enable, NULL, true);
846}
847
dbb4e152
SP
848/*
849 * Flag to indicate if we have computed the system wide
850 * capabilities based on the boot time active CPUs. This
851 * will be used to determine if a new booting CPU should
852 * go through the verification process to make sure that it
853 * supports the system capabilities, without using a hotplug
854 * notifier.
855 */
856static bool sys_caps_initialised;
857
858static inline void set_sys_caps_initialised(void)
859{
860 sys_caps_initialised = true;
861}
862
da8d02d1
SP
863/*
864 * __raw_read_system_reg() - Used by a STARTING cpu before cpuinfo is populated.
865 */
866static u64 __raw_read_system_reg(u32 sys_id)
867{
868 switch (sys_id) {
1cc6ed90
MR
869 case SYS_ID_PFR0_EL1: return read_cpuid(ID_PFR0_EL1);
870 case SYS_ID_PFR1_EL1: return read_cpuid(ID_PFR1_EL1);
871 case SYS_ID_DFR0_EL1: return read_cpuid(ID_DFR0_EL1);
872 case SYS_ID_MMFR0_EL1: return read_cpuid(ID_MMFR0_EL1);
873 case SYS_ID_MMFR1_EL1: return read_cpuid(ID_MMFR1_EL1);
874 case SYS_ID_MMFR2_EL1: return read_cpuid(ID_MMFR2_EL1);
875 case SYS_ID_MMFR3_EL1: return read_cpuid(ID_MMFR3_EL1);
876 case SYS_ID_ISAR0_EL1: return read_cpuid(ID_ISAR0_EL1);
877 case SYS_ID_ISAR1_EL1: return read_cpuid(ID_ISAR1_EL1);
878 case SYS_ID_ISAR2_EL1: return read_cpuid(ID_ISAR2_EL1);
879 case SYS_ID_ISAR3_EL1: return read_cpuid(ID_ISAR3_EL1);
880 case SYS_ID_ISAR4_EL1: return read_cpuid(ID_ISAR4_EL1);
881 case SYS_ID_ISAR5_EL1: return read_cpuid(ID_ISAR4_EL1);
882 case SYS_MVFR0_EL1: return read_cpuid(MVFR0_EL1);
883 case SYS_MVFR1_EL1: return read_cpuid(MVFR1_EL1);
884 case SYS_MVFR2_EL1: return read_cpuid(MVFR2_EL1);
885
886 case SYS_ID_AA64PFR0_EL1: return read_cpuid(ID_AA64PFR0_EL1);
887 case SYS_ID_AA64PFR1_EL1: return read_cpuid(ID_AA64PFR0_EL1);
888 case SYS_ID_AA64DFR0_EL1: return read_cpuid(ID_AA64DFR0_EL1);
889 case SYS_ID_AA64DFR1_EL1: return read_cpuid(ID_AA64DFR0_EL1);
890 case SYS_ID_AA64MMFR0_EL1: return read_cpuid(ID_AA64MMFR0_EL1);
891 case SYS_ID_AA64MMFR1_EL1: return read_cpuid(ID_AA64MMFR1_EL1);
892 case SYS_ID_AA64MMFR2_EL1: return read_cpuid(ID_AA64MMFR2_EL1);
893 case SYS_ID_AA64ISAR0_EL1: return read_cpuid(ID_AA64ISAR0_EL1);
894 case SYS_ID_AA64ISAR1_EL1: return read_cpuid(ID_AA64ISAR1_EL1);
895
896 case SYS_CNTFRQ_EL0: return read_cpuid(CNTFRQ_EL0);
897 case SYS_CTR_EL0: return read_cpuid(CTR_EL0);
898 case SYS_DCZID_EL0: return read_cpuid(DCZID_EL0);
da8d02d1
SP
899 default:
900 BUG();
901 return 0;
902 }
903}
904
dbb4e152 905/*
13f417f3
SP
906 * Check for CPU features that are used in early boot
907 * based on the Boot CPU value.
dbb4e152 908 */
13f417f3 909static void check_early_cpu_features(void)
dbb4e152 910{
13f417f3 911 verify_cpu_asid_bits();
dbb4e152 912}
1c076303 913
dbb4e152
SP
914/*
915 * Run through the enabled system capabilities and enable() it on this CPU.
916 * The capabilities were decided based on the available CPUs at the boot time.
917 * Any new CPU should match the system wide status of the capability. If the
918 * new CPU doesn't have a capability which the system now has enabled, we
919 * cannot do anything to fix it up and could cause unexpected failures. So
920 * we park the CPU.
921 */
922void verify_local_cpu_capabilities(void)
923{
924 int i;
925 const struct arm64_cpu_capabilities *caps;
926
13f417f3
SP
927 check_early_cpu_features();
928
dbb4e152
SP
929 /*
930 * If we haven't computed the system capabilities, there is nothing
931 * to verify.
932 */
933 if (!sys_caps_initialised)
934 return;
935
936 caps = arm64_features;
644c2ae1 937 for (i = 0; caps[i].matches; i++) {
da8d02d1 938 if (!cpus_have_cap(caps[i].capability) || !caps[i].sys_reg)
dbb4e152
SP
939 continue;
940 /*
941 * If the new CPU misses an advertised feature, we cannot proceed
942 * further, park the cpu.
943 */
ee02a159
SP
944 if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i])) {
945 pr_crit("CPU%d: missing feature: %s\n",
946 smp_processor_id(), caps[i].desc);
947 cpu_die_early();
948 }
dbb4e152
SP
949 if (caps[i].enable)
950 caps[i].enable(NULL);
1c076303 951 }
37b01d53 952
644c2ae1 953 for (i = 0, caps = arm64_hwcaps; caps[i].matches; i++) {
37b01d53
SP
954 if (!cpus_have_hwcap(&caps[i]))
955 continue;
ee02a159
SP
956 if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i])) {
957 pr_crit("CPU%d: missing HWCAP: %s\n",
958 smp_processor_id(), caps[i].desc);
959 cpu_die_early();
960 }
37b01d53 961 }
359b7064
MZ
962}
963
a7c61a34 964static void __init setup_feature_capabilities(void)
359b7064 965{
ce8b602c
SP
966 update_cpu_capabilities(arm64_features, "detected feature:");
967 enable_cpu_capabilities(arm64_features);
359b7064
MZ
968}
969
9cdf8ec4 970void __init setup_cpu_features(void)
359b7064 971{
9cdf8ec4
SP
972 u32 cwg;
973 int cls;
974
dbb4e152
SP
975 /* Set the CPU feature capabilies */
976 setup_feature_capabilities();
37b01d53 977 setup_cpu_hwcaps();
dbb4e152
SP
978
979 /* Advertise that we have computed the system capabilities */
980 set_sys_caps_initialised();
981
9cdf8ec4
SP
982 /*
983 * Check for sane CTR_EL0.CWG value.
984 */
985 cwg = cache_type_cwg();
986 cls = cache_line_size();
987 if (!cwg)
988 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
989 cls);
990 if (L1_CACHE_BYTES < cls)
991 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
992 L1_CACHE_BYTES, cls);
359b7064 993}
70544196
JM
994
995static bool __maybe_unused
996cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry)
997{
998 return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO));
999}