memblock: stop using implicit alignment to SMP_CACHE_BYTES
[linux-2.6-block.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
da58fb65 10#include <linux/efi.h>
ecea4ab6 11#include <linux/export.h>
1da177e4
LT
12#include <linux/kernel.h>
13#include <linux/stddef.h>
14#include <linux/ioport.h>
15#include <linux/delay.h>
16#include <linux/utsname.h>
17#include <linux/initrd.h>
18#include <linux/console.h>
1da177e4 19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
883a106b 21#include <linux/of_platform.h>
1da177e4 22#include <linux/init.h>
3c57fb43 23#include <linux/kexec.h>
93c02ab4 24#include <linux/of_fdt.h>
1da177e4
LT
25#include <linux/cpu.h>
26#include <linux/interrupt.h>
7bbb7940 27#include <linux/smp.h>
e119bfff 28#include <linux/proc_fs.h>
2778f620 29#include <linux/memblock.h>
2ecccf90
DM
30#include <linux/bug.h>
31#include <linux/compiler.h>
27a3f0e9 32#include <linux/sort.h>
be120397 33#include <linux/psci.h>
1da177e4 34
b86040a5 35#include <asm/unified.h>
15d07dc9 36#include <asm/cp15.h>
1da177e4 37#include <asm/cpu.h>
0ba8b9b2 38#include <asm/cputype.h>
da58fb65 39#include <asm/efi.h>
1da177e4 40#include <asm/elf.h>
2937367b 41#include <asm/early_ioremap.h>
a5f4c561 42#include <asm/fixmap.h>
1da177e4 43#include <asm/procinfo.h>
05774088 44#include <asm/psci.h>
37efe642 45#include <asm/sections.h>
1da177e4 46#include <asm/setup.h>
f00ec48f 47#include <asm/smp_plat.h>
1da177e4
LT
48#include <asm/mach-types.h>
49#include <asm/cacheflush.h>
46097c7d 50#include <asm/cachetype.h>
1da177e4 51#include <asm/tlbflush.h>
5882bfef 52#include <asm/xen/hypervisor.h>
1da177e4 53
93c02ab4 54#include <asm/prom.h>
1da177e4
LT
55#include <asm/mach/arch.h>
56#include <asm/mach/irq.h>
57#include <asm/mach/time.h>
9f97da78
DH
58#include <asm/system_info.h>
59#include <asm/system_misc.h>
5cbad0eb 60#include <asm/traps.h>
bff595c1 61#include <asm/unwind.h>
1c16d242 62#include <asm/memblock.h>
4588c34d 63#include <asm/virt.h>
1da177e4 64
4cd9d6f7 65#include "atags.h"
0fc1c832 66
1da177e4
LT
67
68#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
69char fpe_type[8];
70
71static int __init fpe_setup(char *line)
72{
73 memcpy(fpe_type, line, 8);
74 return 1;
75}
76
77__setup("fpe=", fpe_setup);
78#endif
79
ca8f0b0a 80extern void init_default_cache_policy(unsigned long);
ff69a4c8 81extern void paging_init(const struct machine_desc *desc);
b089c31c 82extern void early_mm_init(const struct machine_desc *);
374d446d 83extern void adjust_lowmem_bounds(void);
16d6d5b0 84extern enum reboot_mode reboot_mode;
ff69a4c8 85extern void setup_dma_zone(const struct machine_desc *desc);
1da177e4
LT
86
87unsigned int processor_id;
c18f6581 88EXPORT_SYMBOL(processor_id);
0385ebc0 89unsigned int __machine_arch_type __read_mostly;
1da177e4 90EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 91unsigned int cacheid __read_mostly;
c0e95878 92EXPORT_SYMBOL(cacheid);
1da177e4 93
9d20fdd5
BG
94unsigned int __atags_pointer __initdata;
95
1da177e4
LT
96unsigned int system_rev;
97EXPORT_SYMBOL(system_rev);
98
3f599875
PK
99const char *system_serial;
100EXPORT_SYMBOL(system_serial);
101
1da177e4
LT
102unsigned int system_serial_low;
103EXPORT_SYMBOL(system_serial_low);
104
105unsigned int system_serial_high;
106EXPORT_SYMBOL(system_serial_high);
107
0385ebc0 108unsigned int elf_hwcap __read_mostly;
1da177e4
LT
109EXPORT_SYMBOL(elf_hwcap);
110
b342ea4e
AB
111unsigned int elf_hwcap2 __read_mostly;
112EXPORT_SYMBOL(elf_hwcap2);
113
1da177e4
LT
114
115#ifdef MULTI_CPU
7619751f 116struct processor processor __ro_after_init;
1da177e4
LT
117#endif
118#ifdef MULTI_TLB
7619751f 119struct cpu_tlb_fns cpu_tlb __ro_after_init;
1da177e4
LT
120#endif
121#ifdef MULTI_USER
7619751f 122struct cpu_user_fns cpu_user __ro_after_init;
1da177e4
LT
123#endif
124#ifdef MULTI_CACHE
7619751f 125struct cpu_cache_fns cpu_cache __ro_after_init;
1da177e4 126#endif
953233dc 127#ifdef CONFIG_OUTER_CACHE
7619751f 128struct outer_cache_fns outer_cache __ro_after_init;
6c09f09d 129EXPORT_SYMBOL(outer_cache);
953233dc 130#endif
1da177e4 131
2ecccf90
DM
132/*
133 * Cached cpu_architecture() result for use by assembler code.
134 * C code should use the cpu_architecture() function instead of accessing this
135 * variable directly.
136 */
137int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
138
ccea7a19
RK
139struct stack {
140 u32 irq[3];
141 u32 abt[3];
142 u32 und[3];
c0e7f7ee 143 u32 fiq[3];
ccea7a19
RK
144} ____cacheline_aligned;
145
55bdd694 146#ifndef CONFIG_CPU_V7M
ccea7a19 147static struct stack stacks[NR_CPUS];
55bdd694 148#endif
ccea7a19 149
1da177e4
LT
150char elf_platform[ELF_PLATFORM_SIZE];
151EXPORT_SYMBOL(elf_platform);
152
1da177e4
LT
153static const char *cpu_name;
154static const char *machine_name;
48ab7e09 155static char __initdata cmd_line[COMMAND_LINE_SIZE];
ff69a4c8 156const struct machine_desc *machine_desc __initdata;
1da177e4 157
1da177e4
LT
158static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
159#define ENDIANNESS ((char)endian_test.l)
160
161DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
162
163/*
164 * Standard memory resources
165 */
166static struct resource mem_res[] = {
740e518e
GKH
167 {
168 .name = "Video RAM",
169 .start = 0,
170 .end = 0,
171 .flags = IORESOURCE_MEM
172 },
173 {
a36d8e5b 174 .name = "Kernel code",
740e518e
GKH
175 .start = 0,
176 .end = 0,
35d98e93 177 .flags = IORESOURCE_SYSTEM_RAM
740e518e
GKH
178 },
179 {
180 .name = "Kernel data",
181 .start = 0,
182 .end = 0,
35d98e93 183 .flags = IORESOURCE_SYSTEM_RAM
740e518e 184 }
1da177e4
LT
185};
186
187#define video_ram mem_res[0]
188#define kernel_code mem_res[1]
189#define kernel_data mem_res[2]
190
191static struct resource io_res[] = {
740e518e
GKH
192 {
193 .name = "reserved",
194 .start = 0x3bc,
195 .end = 0x3be,
196 .flags = IORESOURCE_IO | IORESOURCE_BUSY
197 },
198 {
199 .name = "reserved",
200 .start = 0x378,
201 .end = 0x37f,
202 .flags = IORESOURCE_IO | IORESOURCE_BUSY
203 },
204 {
205 .name = "reserved",
206 .start = 0x278,
207 .end = 0x27f,
208 .flags = IORESOURCE_IO | IORESOURCE_BUSY
209 }
1da177e4
LT
210};
211
212#define lp0 io_res[0]
213#define lp1 io_res[1]
214#define lp2 io_res[2]
215
1da177e4
LT
216static const char *proc_arch[] = {
217 "undefined/unknown",
218 "3",
219 "4",
220 "4T",
221 "5",
222 "5T",
223 "5TE",
224 "5TEJ",
225 "6TEJ",
6b090a25 226 "7",
55bdd694 227 "7M",
1da177e4
LT
228 "?(12)",
229 "?(13)",
230 "?(14)",
231 "?(15)",
232 "?(16)",
233 "?(17)",
234};
235
55bdd694
CM
236#ifdef CONFIG_CPU_V7M
237static int __get_cpu_architecture(void)
238{
239 return CPU_ARCH_ARMv7M;
240}
241#else
2ecccf90 242static int __get_cpu_architecture(void)
1da177e4
LT
243{
244 int cpu_arch;
245
0ba8b9b2 246 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 247 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
248 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
249 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
250 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
251 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
252 if (cpu_arch)
253 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 254 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
255 /* Revised CPUID format. Read the Memory Model Feature
256 * Register 0 and check for VMSAv7 or PMSAv7 */
526299ce 257 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
315cfe78
CM
258 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
259 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
260 cpu_arch = CPU_ARCH_ARMv7;
261 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
262 (mmfr0 & 0x000000f0) == 0x00000020)
263 cpu_arch = CPU_ARCH_ARMv6;
264 else
265 cpu_arch = CPU_ARCH_UNKNOWN;
266 } else
267 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
268
269 return cpu_arch;
270}
55bdd694 271#endif
1da177e4 272
2ecccf90
DM
273int __pure cpu_architecture(void)
274{
275 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
276
277 return __cpu_architecture;
278}
279
8925ec4c
WD
280static int cpu_has_aliasing_icache(unsigned int arch)
281{
282 int aliasing_icache;
283 unsigned int id_reg, num_sets, line_size;
284
7f94e9cc
WD
285 /* PIPT caches never alias. */
286 if (icache_is_pipt())
287 return 0;
288
8925ec4c
WD
289 /* arch specifies the register format */
290 switch (arch) {
291 case CPU_ARCH_ARMv7:
26150aa9 292 set_csselr(CSSELR_ICACHE | CSSELR_L1);
5fb31a96 293 isb();
26150aa9 294 id_reg = read_ccsidr();
8925ec4c
WD
295 line_size = 4 << ((id_reg & 0x7) + 2);
296 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
297 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
298 break;
299 case CPU_ARCH_ARMv6:
300 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
301 break;
302 default:
303 /* I-cache aliases will be handled by D-cache aliasing code */
304 aliasing_icache = 0;
305 }
306
307 return aliasing_icache;
308}
309
c0e95878
RK
310static void __init cacheid_init(void)
311{
c0e95878
RK
312 unsigned int arch = cpu_architecture();
313
f5a5c89e 314 if (arch >= CPU_ARCH_ARMv6) {
ac52e83f 315 unsigned int cachetype = read_cpuid_cachetype();
f5a5c89e 316
d360a687 317 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
f5a5c89e
JA
318 cacheid = 0;
319 } else if ((cachetype & (7 << 29)) == 4 << 29) {
b57ee99f 320 /* ARMv7 register format */
72dc53ac 321 arch = CPU_ARCH_ARMv7;
b57ee99f 322 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
323 switch (cachetype & (3 << 14)) {
324 case (1 << 14):
b57ee99f 325 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
326 break;
327 case (3 << 14):
328 cacheid |= CACHEID_PIPT;
329 break;
330 }
8925ec4c 331 } else {
72dc53ac
WD
332 arch = CPU_ARCH_ARMv6;
333 if (cachetype & (1 << 23))
334 cacheid = CACHEID_VIPT_ALIASING;
335 else
336 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 337 }
72dc53ac
WD
338 if (cpu_has_aliasing_icache(arch))
339 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
340 } else {
341 cacheid = CACHEID_VIVT;
342 }
2b4ae1f1 343
1b0f6681 344 pr_info("CPU: %s data cache, %s instruction cache\n",
2b4ae1f1
RK
345 cache_is_vivt() ? "VIVT" :
346 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 347 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
348 cache_is_vivt() ? "VIVT" :
349 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 350 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 351 icache_is_pipt() ? "PIPT" :
2b4ae1f1 352 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
353}
354
1da177e4
LT
355/*
356 * These functions re-use the assembly code in head.S, which
357 * already provide the required functionality.
358 */
0f44ba1d 359extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 360
93c02ab4 361void __init early_print(const char *str, ...)
6fc31d54
RK
362{
363 extern void printascii(const char *);
364 char buf[256];
365 va_list ap;
366
367 va_start(ap, str);
368 vsnprintf(buf, sizeof(buf), str, ap);
369 va_end(ap);
370
371#ifdef CONFIG_DEBUG_LL
372 printascii(buf);
373#endif
374 printk("%s", buf);
375}
376
42f25bdd
NP
377#ifdef CONFIG_ARM_PATCH_IDIV
378
379static inline u32 __attribute_const__ sdiv_instruction(void)
380{
381 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
382 /* "sdiv r0, r0, r1" */
383 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
384 return __opcode_to_mem_thumb32(insn);
385 }
386
387 /* "sdiv r0, r0, r1" */
388 return __opcode_to_mem_arm(0xe710f110);
389}
390
391static inline u32 __attribute_const__ udiv_instruction(void)
392{
393 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
394 /* "udiv r0, r0, r1" */
395 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
396 return __opcode_to_mem_thumb32(insn);
397 }
398
399 /* "udiv r0, r0, r1" */
400 return __opcode_to_mem_arm(0xe730f110);
401}
402
403static inline u32 __attribute_const__ bx_lr_instruction(void)
404{
405 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
406 /* "bx lr; nop" */
407 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
408 return __opcode_to_mem_thumb32(insn);
409 }
410
411 /* "bx lr" */
412 return __opcode_to_mem_arm(0xe12fff1e);
413}
414
415static void __init patch_aeabi_idiv(void)
416{
417 extern void __aeabi_uidiv(void);
418 extern void __aeabi_idiv(void);
419 uintptr_t fn_addr;
420 unsigned int mask;
421
422 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
423 if (!(elf_hwcap & mask))
424 return;
425
426 pr_info("CPU: div instructions available: patching division code\n");
427
428 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
208fae5c 429 asm ("" : "+g" (fn_addr));
42f25bdd
NP
430 ((u32 *)fn_addr)[0] = udiv_instruction();
431 ((u32 *)fn_addr)[1] = bx_lr_instruction();
432 flush_icache_range(fn_addr, fn_addr + 8);
433
434 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
208fae5c 435 asm ("" : "+g" (fn_addr));
42f25bdd
NP
436 ((u32 *)fn_addr)[0] = sdiv_instruction();
437 ((u32 *)fn_addr)[1] = bx_lr_instruction();
438 flush_icache_range(fn_addr, fn_addr + 8);
439}
440
441#else
442static inline void patch_aeabi_idiv(void) { }
443#endif
444
8164f7af
SB
445static void __init cpuid_init_hwcaps(void)
446{
b8c9592b 447 int block;
a092aedb 448 u32 isar5;
8164f7af
SB
449
450 if (cpu_architecture() < CPU_ARCH_ARMv7)
451 return;
452
b8c9592b
AB
453 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
454 if (block >= 2)
8164f7af 455 elf_hwcap |= HWCAP_IDIVA;
b8c9592b 456 if (block >= 1)
8164f7af 457 elf_hwcap |= HWCAP_IDIVT;
a469abd0
WD
458
459 /* LPAE implies atomic ldrd/strd instructions */
b8c9592b
AB
460 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
461 if (block >= 5)
a469abd0 462 elf_hwcap |= HWCAP_LPAE;
a092aedb
AB
463
464 /* check for supported v8 Crypto instructions */
465 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
466
467 block = cpuid_feature_extract_field(isar5, 4);
468 if (block >= 2)
469 elf_hwcap2 |= HWCAP2_PMULL;
470 if (block >= 1)
471 elf_hwcap2 |= HWCAP2_AES;
472
473 block = cpuid_feature_extract_field(isar5, 8);
474 if (block >= 1)
475 elf_hwcap2 |= HWCAP2_SHA1;
476
477 block = cpuid_feature_extract_field(isar5, 12);
478 if (block >= 1)
479 elf_hwcap2 |= HWCAP2_SHA2;
480
481 block = cpuid_feature_extract_field(isar5, 16);
482 if (block >= 1)
483 elf_hwcap2 |= HWCAP2_CRC32;
8164f7af
SB
484}
485
58171bf2 486static void __init elf_hwcap_fixup(void)
f159f4ed 487{
58171bf2 488 unsigned id = read_cpuid_id();
f159f4ed
TL
489
490 /*
491 * HWCAP_TLS is available only on 1136 r1p0 and later,
492 * see also kuser_get_tls_init.
493 */
58171bf2
RK
494 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
495 ((id >> 20) & 3) == 0) {
f159f4ed 496 elf_hwcap &= ~HWCAP_TLS;
58171bf2
RK
497 return;
498 }
499
500 /* Verify if CPUID scheme is implemented */
501 if ((id & 0x000f0000) != 0x000f0000)
502 return;
503
504 /*
505 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
506 * avoid advertising SWP; it may not be atomic with
507 * multiprocessing cores.
508 */
b8c9592b
AB
509 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
510 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
03f1217e 511 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
58171bf2 512 elf_hwcap &= ~HWCAP_SWP;
f159f4ed
TL
513}
514
ccea7a19
RK
515/*
516 * cpu_init - initialise one CPU.
517 *
90f1e084 518 * cpu_init sets up the per-CPU stacks.
ccea7a19 519 */
1783d457 520void notrace cpu_init(void)
ccea7a19 521{
55bdd694 522#ifndef CONFIG_CPU_V7M
ccea7a19
RK
523 unsigned int cpu = smp_processor_id();
524 struct stack *stk = &stacks[cpu];
525
526 if (cpu >= NR_CPUS) {
1b0f6681 527 pr_crit("CPU%u: bad primary CPU number\n", cpu);
ccea7a19
RK
528 BUG();
529 }
530
14318efb
RH
531 /*
532 * This only works on resume and secondary cores. For booting on the
533 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
534 */
535 set_my_cpu_offset(per_cpu_offset(cpu));
536
b69874e4
RK
537 cpu_proc_init();
538
b86040a5
CM
539 /*
540 * Define the placement constraint for the inline asm directive below.
541 * In Thumb-2, msr with an immediate value is not allowed.
542 */
543#ifdef CONFIG_THUMB2_KERNEL
544#define PLC "r"
545#else
546#define PLC "I"
547#endif
548
ccea7a19
RK
549 /*
550 * setup stacks for re-entrant exception handlers
551 */
552 __asm__ (
553 "msr cpsr_c, %1\n\t"
b86040a5
CM
554 "add r14, %0, %2\n\t"
555 "mov sp, r14\n\t"
ccea7a19 556 "msr cpsr_c, %3\n\t"
b86040a5
CM
557 "add r14, %0, %4\n\t"
558 "mov sp, r14\n\t"
ccea7a19 559 "msr cpsr_c, %5\n\t"
b86040a5
CM
560 "add r14, %0, %6\n\t"
561 "mov sp, r14\n\t"
c0e7f7ee
DT
562 "msr cpsr_c, %7\n\t"
563 "add r14, %0, %8\n\t"
564 "mov sp, r14\n\t"
565 "msr cpsr_c, %9"
ccea7a19
RK
566 :
567 : "r" (stk),
b86040a5 568 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 569 "I" (offsetof(struct stack, irq[0])),
b86040a5 570 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 571 "I" (offsetof(struct stack, abt[0])),
b86040a5 572 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 573 "I" (offsetof(struct stack, und[0])),
c0e7f7ee
DT
574 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
575 "I" (offsetof(struct stack, fiq[0])),
b86040a5 576 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 577 : "r14");
55bdd694 578#endif
ccea7a19
RK
579}
580
18d7f152 581u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
eb50439b
WD
582
583void __init smp_setup_processor_id(void)
584{
585 int i;
cb8cf4f8
LP
586 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
587 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
588
589 cpu_logical_map(0) = cpu;
cb8cf4f8 590 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
591 cpu_logical_map(i) = i == cpu ? 0 : i;
592
9394c1c6
ML
593 /*
594 * clear __my_cpu_offset on boot CPU to avoid hang caused by
595 * using percpu variable early, for example, lockdep will
596 * access percpu variable inside lock_release
597 */
598 set_my_cpu_offset(0);
599
1b0f6681 600 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
601}
602
8cf72172
LP
603struct mpidr_hash mpidr_hash;
604#ifdef CONFIG_SMP
605/**
606 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
607 * level in order to build a linear index from an
608 * MPIDR value. Resulting algorithm is a collision
609 * free hash carried out through shifting and ORing
610 */
611static void __init smp_build_mpidr_hash(void)
612{
613 u32 i, affinity;
614 u32 fs[3], bits[3], ls, mask = 0;
615 /*
616 * Pre-scan the list of MPIDRS and filter out bits that do
617 * not contribute to affinity levels, ie they never toggle.
618 */
619 for_each_possible_cpu(i)
620 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
621 pr_debug("mask of set bits 0x%x\n", mask);
622 /*
623 * Find and stash the last and first bit set at all affinity levels to
624 * check how many bits are required to represent them.
625 */
626 for (i = 0; i < 3; i++) {
627 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
628 /*
629 * Find the MSB bit and LSB bits position
630 * to determine how many bits are required
631 * to express the affinity level.
632 */
633 ls = fls(affinity);
634 fs[i] = affinity ? ffs(affinity) - 1 : 0;
635 bits[i] = ls - fs[i];
636 }
637 /*
638 * An index can be created from the MPIDR by isolating the
639 * significant bits at each affinity level and by shifting
640 * them in order to compress the 24 bits values space to a
641 * compressed set of values. This is equivalent to hashing
642 * the MPIDR through shifting and ORing. It is a collision free
643 * hash though not minimal since some levels might contain a number
644 * of CPUs that is not an exact power of 2 and their bit
645 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
646 */
647 mpidr_hash.shift_aff[0] = fs[0];
648 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
649 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
650 (bits[1] + bits[0]);
651 mpidr_hash.mask = mask;
652 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
653 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
654 mpidr_hash.shift_aff[0],
655 mpidr_hash.shift_aff[1],
656 mpidr_hash.shift_aff[2],
657 mpidr_hash.mask,
658 mpidr_hash.bits);
659 /*
660 * 4x is an arbitrary value used to warn on a hash table much bigger
661 * than expected on most systems.
662 */
663 if (mpidr_hash_size() > 4 * num_possible_cpus())
664 pr_warn("Large number of MPIDR hash buckets detected\n");
665 sync_cache_w(&mpidr_hash);
666}
667#endif
668
b69874e4
RK
669static void __init setup_processor(void)
670{
671 struct proc_info_list *list;
672
673 /*
674 * locate processor in the list of supported processor
675 * types. The linker builds this table for us from the
676 * entries in arch/arm/mm/proc-*.S
677 */
678 list = lookup_processor_type(read_cpuid_id());
679 if (!list) {
1b0f6681
OJ
680 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
681 read_cpuid_id());
b69874e4
RK
682 while (1);
683 }
684
685 cpu_name = list->cpu_name;
2ecccf90 686 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
687
688#ifdef MULTI_CPU
689 processor = *list->proc;
690#endif
691#ifdef MULTI_TLB
692 cpu_tlb = *list->tlb;
693#endif
694#ifdef MULTI_USER
695 cpu_user = *list->user;
696#endif
697#ifdef MULTI_CACHE
698 cpu_cache = *list->cache;
699#endif
700
1b0f6681
OJ
701 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
702 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
4585eaff 703 proc_arch[cpu_architecture()], get_cr());
b69874e4 704
a34dbfb0
WD
705 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
706 list->arch_name, ENDIANNESS);
707 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
708 list->elf_name, ENDIANNESS);
b69874e4 709 elf_hwcap = list->elf_hwcap;
8164f7af
SB
710
711 cpuid_init_hwcaps();
42f25bdd 712 patch_aeabi_idiv();
8164f7af 713
b69874e4 714#ifndef CONFIG_ARM_THUMB
c40e3641 715 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
b69874e4 716#endif
ca8f0b0a
RK
717#ifdef CONFIG_MMU
718 init_default_cache_policy(list->__cpu_mm_mmu_flags);
719#endif
92871b94
RH
720 erratum_a15_798181_init();
721
58171bf2 722 elf_hwcap_fixup();
b69874e4
RK
723
724 cacheid_init();
725 cpu_init();
726}
727
93c02ab4 728void __init dump_machine_table(void)
1da177e4 729{
ff69a4c8 730 const struct machine_desc *p;
1da177e4 731
6291319d
GL
732 early_print("Available machine support:\n\nID (hex)\tNAME\n");
733 for_each_machine_desc(p)
dce72dd0 734 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 735
dce72dd0 736 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 737
dce72dd0
NP
738 while (true)
739 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
740}
741
6a5014aa 742int __init arm_add_memory(u64 start, u64 size)
3a669411 743{
6d7d5da7 744 u64 aligned_start;
4b5f32ce 745
3a669411
RK
746 /*
747 * Ensure that start/size are aligned to a page boundary.
909ba297 748 * Size is rounded down, start is rounded up.
3a669411 749 */
6d7d5da7 750 aligned_start = PAGE_ALIGN(start);
909ba297
MY
751 if (aligned_start > start + size)
752 size = 0;
753 else
754 size -= aligned_start - start;
e5ab8580 755
d4a451d5 756#ifndef CONFIG_PHYS_ADDR_T_64BIT
6d7d5da7 757 if (aligned_start > ULONG_MAX) {
1b0f6681
OJ
758 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
759 (long long)start);
6d7d5da7
MD
760 return -EINVAL;
761 }
762
763 if (aligned_start + size > ULONG_MAX) {
1b0f6681
OJ
764 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
765 (long long)start);
e5ab8580
WD
766 /*
767 * To ensure bank->start + bank->size is representable in
768 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
769 * This means we lose a page after masking.
770 */
6d7d5da7 771 size = ULONG_MAX - aligned_start;
e5ab8580
WD
772 }
773#endif
774
571b1437
RK
775 if (aligned_start < PHYS_OFFSET) {
776 if (aligned_start + size <= PHYS_OFFSET) {
777 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
778 aligned_start, aligned_start + size);
779 return -EINVAL;
780 }
781
782 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
783 aligned_start, (u64)PHYS_OFFSET);
784
785 size -= PHYS_OFFSET - aligned_start;
786 aligned_start = PHYS_OFFSET;
787 }
788
1c2f87c2
LA
789 start = aligned_start;
790 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
791
792 /*
793 * Check whether this memory region has non-zero size or
794 * invalid node number.
795 */
1c2f87c2 796 if (size == 0)
4b5f32ce
NP
797 return -EINVAL;
798
1c2f87c2 799 memblock_add(start, size);
4b5f32ce 800 return 0;
3a669411
RK
801}
802
1da177e4
LT
803/*
804 * Pick out the memory size. We look for mem=size@start,
805 * where start and size are "size[KkMm]"
806 */
1c2f87c2 807
2b0d8c25 808static int __init early_mem(char *p)
1da177e4
LT
809{
810 static int usermem __initdata = 0;
6a5014aa
MD
811 u64 size;
812 u64 start;
2b0d8c25 813 char *endp;
1da177e4
LT
814
815 /*
816 * If the user specifies memory size, we
817 * blow away any automatically generated
818 * size.
819 */
820 if (usermem == 0) {
821 usermem = 1;
1c2f87c2
LA
822 memblock_remove(memblock_start_of_DRAM(),
823 memblock_end_of_DRAM() - memblock_start_of_DRAM());
1da177e4
LT
824 }
825
826 start = PHYS_OFFSET;
2b0d8c25
JK
827 size = memparse(p, &endp);
828 if (*endp == '@')
829 start = memparse(endp + 1, NULL);
1da177e4 830
1c97b73e 831 arm_add_memory(start, size);
1da177e4 832
2b0d8c25 833 return 0;
1da177e4 834}
2b0d8c25 835early_param("mem", early_mem);
1da177e4 836
ff69a4c8 837static void __init request_standard_resources(const struct machine_desc *mdesc)
1da177e4 838{
11b9369c 839 struct memblock_region *region;
1da177e4 840 struct resource *res;
1da177e4 841
37efe642 842 kernel_code.start = virt_to_phys(_text);
14c4a533 843 kernel_code.end = virt_to_phys(__init_begin - 1);
842eab40 844 kernel_data.start = virt_to_phys(_sdata);
37efe642 845 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 846
11b9369c 847 for_each_memblock(memory, region) {
966fab00
RK
848 phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
849 phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
850 unsigned long boot_alias_start;
851
852 /*
853 * Some systems have a special memory alias which is only
854 * used for booting. We need to advertise this region to
855 * kexec-tools so they know where bootable RAM is located.
856 */
857 boot_alias_start = phys_to_idmap(start);
858 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
7e1c4e27 859 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
966fab00
RK
860 res->name = "System RAM (boot alias)";
861 res->start = boot_alias_start;
862 res->end = phys_to_idmap(end);
863 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
864 request_resource(&iomem_resource, res);
865 }
866
7e1c4e27 867 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
1da177e4 868 res->name = "System RAM";
966fab00
RK
869 res->start = start;
870 res->end = end;
35d98e93 871 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1da177e4
LT
872
873 request_resource(&iomem_resource, res);
874
875 if (kernel_code.start >= res->start &&
876 kernel_code.end <= res->end)
877 request_resource(res, &kernel_code);
878 if (kernel_data.start >= res->start &&
879 kernel_data.end <= res->end)
880 request_resource(res, &kernel_data);
881 }
882
883 if (mdesc->video_start) {
884 video_ram.start = mdesc->video_start;
885 video_ram.end = mdesc->video_end;
886 request_resource(&iomem_resource, &video_ram);
887 }
888
889 /*
890 * Some machines don't have the possibility of ever
891 * possessing lp0, lp1 or lp2
892 */
893 if (mdesc->reserve_lp0)
894 request_resource(&ioport_resource, &lp0);
895 if (mdesc->reserve_lp1)
896 request_resource(&ioport_resource, &lp1);
897 if (mdesc->reserve_lp2)
898 request_resource(&ioport_resource, &lp2);
899}
900
801820be
AB
901#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
902 defined(CONFIG_EFI)
1da177e4
LT
903struct screen_info screen_info = {
904 .orig_video_lines = 30,
905 .orig_video_cols = 80,
906 .orig_video_mode = 0,
907 .orig_video_ega_bx = 0,
908 .orig_video_isVGA = 1,
909 .orig_video_points = 8
910};
4394c124 911#endif
1da177e4 912
1da177e4
LT
913static int __init customize_machine(void)
914{
883a106b
AB
915 /*
916 * customizes platform devices, or adds new ones
917 * On DT based machines, we fall back to populating the
918 * machine from the device tree, if no callback is provided,
919 * otherwise we would always need an init_machine callback.
920 */
8ff1443c
RK
921 if (machine_desc->init_machine)
922 machine_desc->init_machine();
850bea23 923
1da177e4
LT
924 return 0;
925}
926arch_initcall(customize_machine);
927
90de4137
SG
928static int __init init_machine_late(void)
929{
3f599875
PK
930 struct device_node *root;
931 int ret;
932
90de4137
SG
933 if (machine_desc->init_late)
934 machine_desc->init_late();
3f599875
PK
935
936 root = of_find_node_by_path("/");
937 if (root) {
938 ret = of_property_read_string(root, "serial-number",
939 &system_serial);
940 if (ret)
941 system_serial = NULL;
942 }
943
944 if (!system_serial)
945 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
946 system_serial_high,
947 system_serial_low);
948
90de4137
SG
949 return 0;
950}
951late_initcall(init_machine_late);
952
3c57fb43 953#ifdef CONFIG_KEXEC
61603016
RK
954/*
955 * The crash region must be aligned to 128MB to avoid
956 * zImage relocating below the reserved region.
957 */
958#define CRASH_ALIGN (128 << 20)
61603016 959
3c57fb43
MW
960static inline unsigned long long get_total_mem(void)
961{
962 unsigned long total;
963
964 total = max_low_pfn - min_low_pfn;
965 return total << PAGE_SHIFT;
966}
967
968/**
969 * reserve_crashkernel() - reserves memory are for crash kernel
970 *
971 * This function reserves memory area given in "crashkernel=" kernel command
972 * line parameter. The memory reserved is used by a dump capture kernel when
973 * primary kernel is crashing.
974 */
975static void __init reserve_crashkernel(void)
976{
977 unsigned long long crash_size, crash_base;
978 unsigned long long total_mem;
979 int ret;
980
981 total_mem = get_total_mem();
982 ret = parse_crashkernel(boot_command_line, total_mem,
983 &crash_size, &crash_base);
984 if (ret)
985 return;
986
61603016 987 if (crash_base <= 0) {
d0506a23 988 unsigned long long crash_max = idmap_to_phys((u32)~0);
67556d7a
RK
989 unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
990 if (crash_max > lowmem_max)
991 crash_max = lowmem_max;
61603016
RK
992 crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
993 crash_size, CRASH_ALIGN);
994 if (!crash_base) {
995 pr_err("crashkernel reservation failed - No suitable area found.\n");
996 return;
997 }
998 } else {
999 unsigned long long start;
1000
1001 start = memblock_find_in_range(crash_base,
1002 crash_base + crash_size,
1003 crash_size, SECTION_SIZE);
1004 if (start != crash_base) {
1005 pr_err("crashkernel reservation failed - memory is in use.\n");
1006 return;
1007 }
1008 }
1009
84f452b1 1010 ret = memblock_reserve(crash_base, crash_size);
3c57fb43 1011 if (ret < 0) {
1b0f6681
OJ
1012 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
1013 (unsigned long)crash_base);
3c57fb43
MW
1014 return;
1015 }
1016
1b0f6681
OJ
1017 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1018 (unsigned long)(crash_size >> 20),
1019 (unsigned long)(crash_base >> 20),
1020 (unsigned long)(total_mem >> 20));
3c57fb43 1021
f7f0b7dc 1022 /* The crashk resource must always be located in normal mem */
3c57fb43
MW
1023 crashk_res.start = crash_base;
1024 crashk_res.end = crash_base + crash_size - 1;
1025 insert_resource(&iomem_resource, &crashk_res);
f7f0b7dc
RK
1026
1027 if (arm_has_idmap_alias()) {
1028 /*
1029 * If we have a special RAM alias for use at boot, we
1030 * need to advertise to kexec tools where the alias is.
1031 */
1032 static struct resource crashk_boot_res = {
1033 .name = "Crash kernel (boot alias)",
1034 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1035 };
1036
1037 crashk_boot_res.start = phys_to_idmap(crash_base);
1038 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1039 insert_resource(&iomem_resource, &crashk_boot_res);
1040 }
3c57fb43
MW
1041}
1042#else
1043static inline void reserve_crashkernel(void) {}
1044#endif /* CONFIG_KEXEC */
1045
4588c34d
DM
1046void __init hyp_mode_check(void)
1047{
1048#ifdef CONFIG_ARM_VIRT_EXT
8fbac214
MR
1049 sync_boot_mode();
1050
4588c34d
DM
1051 if (is_hyp_mode_available()) {
1052 pr_info("CPU: All CPU(s) started in HYP mode.\n");
1053 pr_info("CPU: Virtualization extensions available.\n");
1054 } else if (is_hyp_mode_mismatched()) {
1055 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1056 __boot_cpu_mode & MODE_MASK);
1057 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1058 } else
1059 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1060#endif
1061}
1062
6291319d
GL
1063void __init setup_arch(char **cmdline_p)
1064{
ff69a4c8 1065 const struct machine_desc *mdesc;
6291319d 1066
6291319d 1067 setup_processor();
93c02ab4
GL
1068 mdesc = setup_machine_fdt(__atags_pointer);
1069 if (!mdesc)
b8b499c8 1070 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
99cf8f90
RK
1071 if (!mdesc) {
1072 early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1073 early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1074 __atags_pointer);
1075 if (__atags_pointer)
1076 early_print(" r2[]=%*ph\n", 16,
1077 phys_to_virt(__atags_pointer));
1078 dump_machine_table();
1079 }
1080
6291319d
GL
1081 machine_desc = mdesc;
1082 machine_name = mdesc->name;
719c9d14 1083 dump_stack_set_arch_desc("%s", mdesc->name);
6291319d 1084
16d6d5b0
RH
1085 if (mdesc->reboot_mode != REBOOT_HARD)
1086 reboot_mode = mdesc->reboot_mode;
6291319d 1087
37efe642
RK
1088 init_mm.start_code = (unsigned long) _text;
1089 init_mm.end_code = (unsigned long) _etext;
1090 init_mm.end_data = (unsigned long) _edata;
1091 init_mm.brk = (unsigned long) _end;
1da177e4 1092
48ab7e09
JK
1093 /* populate cmd_line too for later use, preserving boot_command_line */
1094 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1095 *cmdline_p = cmd_line;
2b0d8c25 1096
2937367b
AB
1097 early_fixmap_init();
1098 early_ioremap_init();
a5f4c561 1099
2b0d8c25
JK
1100 parse_early_param();
1101
1221ed10 1102#ifdef CONFIG_MMU
b089c31c 1103 early_mm_init(mdesc);
1221ed10 1104#endif
7c927322 1105 setup_dma_zone(mdesc);
9b08aaa3 1106 xen_early_init();
da58fb65 1107 efi_init();
98562656
LA
1108 /*
1109 * Make sure the calculation for lowmem/highmem is set appropriately
1110 * before reserving/allocating any mmeory
1111 */
374d446d 1112 adjust_lowmem_bounds();
1c2f87c2 1113 arm_memblock_init(mdesc);
98562656
LA
1114 /* Memory may have been removed so recalculate the bounds. */
1115 adjust_lowmem_bounds();
2778f620 1116
2937367b
AB
1117 early_ioremap_reset();
1118
4b5f32ce 1119 paging_init(mdesc);
11b9369c 1120 request_standard_resources(mdesc);
1da177e4 1121
a528721d
RK
1122 if (mdesc->restart)
1123 arm_pm_restart = mdesc->restart;
1124
93c02ab4
GL
1125 unflatten_device_tree();
1126
5587164e 1127 arm_dt_init_cpu_maps();
be120397 1128 psci_dt_init();
7bbb7940 1129#ifdef CONFIG_SMP
abcee5fb 1130 if (is_smp()) {
b382b940
JM
1131 if (!mdesc->smp_init || !mdesc->smp_init()) {
1132 if (psci_smp_available())
1133 smp_set_ops(&psci_smp_ops);
1134 else if (mdesc->smp)
1135 smp_set_ops(mdesc->smp);
1136 }
f00ec48f 1137 smp_init_cpus();
8cf72172 1138 smp_build_mpidr_hash();
abcee5fb 1139 }
7bbb7940 1140#endif
4588c34d
DM
1141
1142 if (!is_smp())
1143 hyp_mode_check();
1144
3c57fb43 1145 reserve_crashkernel();
7bbb7940 1146
4c301f9b 1147#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
52108641 1148 handle_arch_irq = mdesc->handle_irq;
1149#endif
1da177e4
LT
1150
1151#ifdef CONFIG_VT
1152#if defined(CONFIG_VGA_CONSOLE)
1153 conswitchp = &vga_con;
1154#elif defined(CONFIG_DUMMY_CONSOLE)
1155 conswitchp = &dummy_con;
1156#endif
1157#endif
dec12e62
RK
1158
1159 if (mdesc->init_early)
1160 mdesc->init_early();
1da177e4
LT
1161}
1162
1163
1164static int __init topology_init(void)
1165{
1166 int cpu;
1167
66fb8bd2
RK
1168 for_each_possible_cpu(cpu) {
1169 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
787047ee 1170 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
66fb8bd2
RK
1171 register_cpu(&cpuinfo->cpu, cpu);
1172 }
1da177e4
LT
1173
1174 return 0;
1175}
1da177e4
LT
1176subsys_initcall(topology_init);
1177
e119bfff
RK
1178#ifdef CONFIG_HAVE_PROC_CPU
1179static int __init proc_cpu_init(void)
1180{
1181 struct proc_dir_entry *res;
1182
1183 res = proc_mkdir("cpu", NULL);
1184 if (!res)
1185 return -ENOMEM;
1186 return 0;
1187}
1188fs_initcall(proc_cpu_init);
1189#endif
1190
1da177e4
LT
1191static const char *hwcap_str[] = {
1192 "swp",
1193 "half",
1194 "thumb",
1195 "26bit",
1196 "fastmult",
1197 "fpa",
1198 "vfp",
1199 "edsp",
1200 "java",
8f7f9435 1201 "iwmmxt",
99e4a6dd 1202 "crunch",
4369ae16 1203 "thumbee",
2bedbdf4 1204 "neon",
7279dc3e
CM
1205 "vfpv3",
1206 "vfpv3d16",
254cdf8e
WD
1207 "tls",
1208 "vfpv4",
1209 "idiva",
1210 "idivt",
ab8d46c0 1211 "vfpd32",
a469abd0 1212 "lpae",
e9faebc6 1213 "evtstrm",
1da177e4
LT
1214 NULL
1215};
1216
b342ea4e 1217static const char *hwcap2_str[] = {
8258a989
AB
1218 "aes",
1219 "pmull",
1220 "sha1",
1221 "sha2",
1222 "crc32",
b342ea4e
AB
1223 NULL
1224};
1225
1da177e4
LT
1226static int c_show(struct seq_file *m, void *v)
1227{
b4b8f770
LP
1228 int i, j;
1229 u32 cpuid;
1da177e4 1230
1da177e4 1231 for_each_online_cpu(i) {
15559722
RK
1232 /*
1233 * glibc reads /proc/cpuinfo to determine the number of
1234 * online processors, looking for lines beginning with
1235 * "processor". Give glibc what it expects.
1236 */
1237 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
1238 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1239 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1240 cpu_name, cpuid & 15, elf_platform);
1241
4bf9636c
PM
1242#if defined(CONFIG_SMP)
1243 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1244 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1245 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1246#else
1247 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1248 loops_per_jiffy / (500000/HZ),
1249 (loops_per_jiffy / (5000/HZ)) % 100);
1250#endif
b4b8f770
LP
1251 /* dump out the processor features */
1252 seq_puts(m, "Features\t: ");
1da177e4 1253
b4b8f770
LP
1254 for (j = 0; hwcap_str[j]; j++)
1255 if (elf_hwcap & (1 << j))
1256 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 1257
b342ea4e
AB
1258 for (j = 0; hwcap2_str[j]; j++)
1259 if (elf_hwcap2 & (1 << j))
1260 seq_printf(m, "%s ", hwcap2_str[j]);
1261
b4b8f770
LP
1262 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1263 seq_printf(m, "CPU architecture: %s\n",
1264 proc_arch[cpu_architecture()]);
1da177e4 1265
b4b8f770
LP
1266 if ((cpuid & 0x0008f000) == 0x00000000) {
1267 /* pre-ARM7 */
1268 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 1269 } else {
b4b8f770
LP
1270 if ((cpuid & 0x0008f000) == 0x00007000) {
1271 /* ARM7 */
1272 seq_printf(m, "CPU variant\t: 0x%02x\n",
1273 (cpuid >> 16) & 127);
1274 } else {
1275 /* post-ARM7 */
1276 seq_printf(m, "CPU variant\t: 0x%x\n",
1277 (cpuid >> 20) & 15);
1278 }
1279 seq_printf(m, "CPU part\t: 0x%03x\n",
1280 (cpuid >> 4) & 0xfff);
1da177e4 1281 }
b4b8f770 1282 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 1283 }
1da177e4
LT
1284
1285 seq_printf(m, "Hardware\t: %s\n", machine_name);
1286 seq_printf(m, "Revision\t: %04x\n", system_rev);
3f599875 1287 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1da177e4
LT
1288
1289 return 0;
1290}
1291
1292static void *c_start(struct seq_file *m, loff_t *pos)
1293{
1294 return *pos < 1 ? (void *)1 : NULL;
1295}
1296
1297static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1298{
1299 ++*pos;
1300 return NULL;
1301}
1302
1303static void c_stop(struct seq_file *m, void *v)
1304{
1305}
1306
2ffd6e18 1307const struct seq_operations cpuinfo_op = {
1da177e4
LT
1308 .start = c_start,
1309 .next = c_next,
1310 .stop = c_stop,
1311 .show = c_show
1312};