ARM: 7906/1: mmc: mmci: Remove unnecessary amba_set_drvdata()
[linux-2.6-block.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
ecea4ab6 10#include <linux/export.h>
1da177e4
LT
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
883a106b 21#include <linux/of_platform.h>
1da177e4 22#include <linux/init.h>
3c57fb43 23#include <linux/kexec.h>
93c02ab4 24#include <linux/of_fdt.h>
1da177e4
LT
25#include <linux/cpu.h>
26#include <linux/interrupt.h>
7bbb7940 27#include <linux/smp.h>
e119bfff 28#include <linux/proc_fs.h>
2778f620 29#include <linux/memblock.h>
2ecccf90
DM
30#include <linux/bug.h>
31#include <linux/compiler.h>
27a3f0e9 32#include <linux/sort.h>
1da177e4 33
b86040a5 34#include <asm/unified.h>
15d07dc9 35#include <asm/cp15.h>
1da177e4 36#include <asm/cpu.h>
0ba8b9b2 37#include <asm/cputype.h>
1da177e4 38#include <asm/elf.h>
1da177e4 39#include <asm/procinfo.h>
05774088 40#include <asm/psci.h>
37efe642 41#include <asm/sections.h>
1da177e4 42#include <asm/setup.h>
f00ec48f 43#include <asm/smp_plat.h>
1da177e4
LT
44#include <asm/mach-types.h>
45#include <asm/cacheflush.h>
46097c7d 46#include <asm/cachetype.h>
1da177e4
LT
47#include <asm/tlbflush.h>
48
93c02ab4 49#include <asm/prom.h>
1da177e4
LT
50#include <asm/mach/arch.h>
51#include <asm/mach/irq.h>
52#include <asm/mach/time.h>
9f97da78
DH
53#include <asm/system_info.h>
54#include <asm/system_misc.h>
5cbad0eb 55#include <asm/traps.h>
bff595c1 56#include <asm/unwind.h>
1c16d242 57#include <asm/memblock.h>
4588c34d 58#include <asm/virt.h>
1da177e4 59
4cd9d6f7 60#include "atags.h"
0fc1c832 61
1da177e4
LT
62
63#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
64char fpe_type[8];
65
66static int __init fpe_setup(char *line)
67{
68 memcpy(fpe_type, line, 8);
69 return 1;
70}
71
72__setup("fpe=", fpe_setup);
73#endif
74
ff69a4c8 75extern void paging_init(const struct machine_desc *desc);
a77e0c7b
SS
76extern void early_paging_init(const struct machine_desc *,
77 struct proc_info_list *);
0371d3f7 78extern void sanity_check_meminfo(void);
16d6d5b0 79extern enum reboot_mode reboot_mode;
ff69a4c8 80extern void setup_dma_zone(const struct machine_desc *desc);
1da177e4
LT
81
82unsigned int processor_id;
c18f6581 83EXPORT_SYMBOL(processor_id);
0385ebc0 84unsigned int __machine_arch_type __read_mostly;
1da177e4 85EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 86unsigned int cacheid __read_mostly;
c0e95878 87EXPORT_SYMBOL(cacheid);
1da177e4 88
9d20fdd5
BG
89unsigned int __atags_pointer __initdata;
90
1da177e4
LT
91unsigned int system_rev;
92EXPORT_SYMBOL(system_rev);
93
94unsigned int system_serial_low;
95EXPORT_SYMBOL(system_serial_low);
96
97unsigned int system_serial_high;
98EXPORT_SYMBOL(system_serial_high);
99
0385ebc0 100unsigned int elf_hwcap __read_mostly;
1da177e4
LT
101EXPORT_SYMBOL(elf_hwcap);
102
103
104#ifdef MULTI_CPU
0385ebc0 105struct processor processor __read_mostly;
1da177e4
LT
106#endif
107#ifdef MULTI_TLB
0385ebc0 108struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
109#endif
110#ifdef MULTI_USER
0385ebc0 111struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
112#endif
113#ifdef MULTI_CACHE
0385ebc0 114struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 115#endif
953233dc 116#ifdef CONFIG_OUTER_CACHE
0385ebc0 117struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 118EXPORT_SYMBOL(outer_cache);
953233dc 119#endif
1da177e4 120
2ecccf90
DM
121/*
122 * Cached cpu_architecture() result for use by assembler code.
123 * C code should use the cpu_architecture() function instead of accessing this
124 * variable directly.
125 */
126int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
127
ccea7a19
RK
128struct stack {
129 u32 irq[3];
130 u32 abt[3];
131 u32 und[3];
132} ____cacheline_aligned;
133
55bdd694 134#ifndef CONFIG_CPU_V7M
ccea7a19 135static struct stack stacks[NR_CPUS];
55bdd694 136#endif
ccea7a19 137
1da177e4
LT
138char elf_platform[ELF_PLATFORM_SIZE];
139EXPORT_SYMBOL(elf_platform);
140
1da177e4
LT
141static const char *cpu_name;
142static const char *machine_name;
48ab7e09 143static char __initdata cmd_line[COMMAND_LINE_SIZE];
ff69a4c8 144const struct machine_desc *machine_desc __initdata;
1da177e4 145
1da177e4
LT
146static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
147#define ENDIANNESS ((char)endian_test.l)
148
149DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
150
151/*
152 * Standard memory resources
153 */
154static struct resource mem_res[] = {
740e518e
GKH
155 {
156 .name = "Video RAM",
157 .start = 0,
158 .end = 0,
159 .flags = IORESOURCE_MEM
160 },
161 {
a36d8e5b 162 .name = "Kernel code",
740e518e
GKH
163 .start = 0,
164 .end = 0,
165 .flags = IORESOURCE_MEM
166 },
167 {
168 .name = "Kernel data",
169 .start = 0,
170 .end = 0,
171 .flags = IORESOURCE_MEM
172 }
1da177e4
LT
173};
174
175#define video_ram mem_res[0]
176#define kernel_code mem_res[1]
177#define kernel_data mem_res[2]
178
179static struct resource io_res[] = {
740e518e
GKH
180 {
181 .name = "reserved",
182 .start = 0x3bc,
183 .end = 0x3be,
184 .flags = IORESOURCE_IO | IORESOURCE_BUSY
185 },
186 {
187 .name = "reserved",
188 .start = 0x378,
189 .end = 0x37f,
190 .flags = IORESOURCE_IO | IORESOURCE_BUSY
191 },
192 {
193 .name = "reserved",
194 .start = 0x278,
195 .end = 0x27f,
196 .flags = IORESOURCE_IO | IORESOURCE_BUSY
197 }
1da177e4
LT
198};
199
200#define lp0 io_res[0]
201#define lp1 io_res[1]
202#define lp2 io_res[2]
203
1da177e4
LT
204static const char *proc_arch[] = {
205 "undefined/unknown",
206 "3",
207 "4",
208 "4T",
209 "5",
210 "5T",
211 "5TE",
212 "5TEJ",
213 "6TEJ",
6b090a25 214 "7",
55bdd694 215 "7M",
1da177e4
LT
216 "?(12)",
217 "?(13)",
218 "?(14)",
219 "?(15)",
220 "?(16)",
221 "?(17)",
222};
223
55bdd694
CM
224#ifdef CONFIG_CPU_V7M
225static int __get_cpu_architecture(void)
226{
227 return CPU_ARCH_ARMv7M;
228}
229#else
2ecccf90 230static int __get_cpu_architecture(void)
1da177e4
LT
231{
232 int cpu_arch;
233
0ba8b9b2 234 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 235 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
236 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
237 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
238 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
239 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
240 if (cpu_arch)
241 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 242 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
243 unsigned int mmfr0;
244
245 /* Revised CPUID format. Read the Memory Model Feature
246 * Register 0 and check for VMSAv7 or PMSAv7 */
247 asm("mrc p15, 0, %0, c0, c1, 4"
248 : "=r" (mmfr0));
315cfe78
CM
249 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
250 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
251 cpu_arch = CPU_ARCH_ARMv7;
252 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
253 (mmfr0 & 0x000000f0) == 0x00000020)
254 cpu_arch = CPU_ARCH_ARMv6;
255 else
256 cpu_arch = CPU_ARCH_UNKNOWN;
257 } else
258 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
259
260 return cpu_arch;
261}
55bdd694 262#endif
1da177e4 263
2ecccf90
DM
264int __pure cpu_architecture(void)
265{
266 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
267
268 return __cpu_architecture;
269}
270
8925ec4c
WD
271static int cpu_has_aliasing_icache(unsigned int arch)
272{
273 int aliasing_icache;
274 unsigned int id_reg, num_sets, line_size;
275
7f94e9cc
WD
276 /* PIPT caches never alias. */
277 if (icache_is_pipt())
278 return 0;
279
8925ec4c
WD
280 /* arch specifies the register format */
281 switch (arch) {
282 case CPU_ARCH_ARMv7:
5fb31a96
LW
283 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
284 : /* No output operands */
8925ec4c 285 : "r" (1));
5fb31a96
LW
286 isb();
287 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
288 : "=r" (id_reg));
8925ec4c
WD
289 line_size = 4 << ((id_reg & 0x7) + 2);
290 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
291 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
292 break;
293 case CPU_ARCH_ARMv6:
294 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
295 break;
296 default:
297 /* I-cache aliases will be handled by D-cache aliasing code */
298 aliasing_icache = 0;
299 }
300
301 return aliasing_icache;
302}
303
c0e95878
RK
304static void __init cacheid_init(void)
305{
c0e95878
RK
306 unsigned int arch = cpu_architecture();
307
55bdd694
CM
308 if (arch == CPU_ARCH_ARMv7M) {
309 cacheid = 0;
310 } else if (arch >= CPU_ARCH_ARMv6) {
ac52e83f 311 unsigned int cachetype = read_cpuid_cachetype();
b57ee99f
CM
312 if ((cachetype & (7 << 29)) == 4 << 29) {
313 /* ARMv7 register format */
72dc53ac 314 arch = CPU_ARCH_ARMv7;
b57ee99f 315 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
316 switch (cachetype & (3 << 14)) {
317 case (1 << 14):
b57ee99f 318 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
319 break;
320 case (3 << 14):
321 cacheid |= CACHEID_PIPT;
322 break;
323 }
8925ec4c 324 } else {
72dc53ac
WD
325 arch = CPU_ARCH_ARMv6;
326 if (cachetype & (1 << 23))
327 cacheid = CACHEID_VIPT_ALIASING;
328 else
329 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 330 }
72dc53ac
WD
331 if (cpu_has_aliasing_icache(arch))
332 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
333 } else {
334 cacheid = CACHEID_VIVT;
335 }
2b4ae1f1
RK
336
337 printk("CPU: %s data cache, %s instruction cache\n",
338 cache_is_vivt() ? "VIVT" :
339 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 340 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
341 cache_is_vivt() ? "VIVT" :
342 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 343 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 344 icache_is_pipt() ? "PIPT" :
2b4ae1f1 345 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
346}
347
1da177e4
LT
348/*
349 * These functions re-use the assembly code in head.S, which
350 * already provide the required functionality.
351 */
0f44ba1d 352extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 353
93c02ab4 354void __init early_print(const char *str, ...)
6fc31d54
RK
355{
356 extern void printascii(const char *);
357 char buf[256];
358 va_list ap;
359
360 va_start(ap, str);
361 vsnprintf(buf, sizeof(buf), str, ap);
362 va_end(ap);
363
364#ifdef CONFIG_DEBUG_LL
365 printascii(buf);
366#endif
367 printk("%s", buf);
368}
369
8164f7af
SB
370static void __init cpuid_init_hwcaps(void)
371{
a469abd0 372 unsigned int divide_instrs, vmsa;
8164f7af
SB
373
374 if (cpu_architecture() < CPU_ARCH_ARMv7)
375 return;
376
377 divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
378
379 switch (divide_instrs) {
380 case 2:
381 elf_hwcap |= HWCAP_IDIVA;
382 case 1:
383 elf_hwcap |= HWCAP_IDIVT;
384 }
a469abd0
WD
385
386 /* LPAE implies atomic ldrd/strd instructions */
387 vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
388 if (vmsa >= 5)
389 elf_hwcap |= HWCAP_LPAE;
8164f7af
SB
390}
391
f159f4ed
TL
392static void __init feat_v6_fixup(void)
393{
394 int id = read_cpuid_id();
395
396 if ((id & 0xff0f0000) != 0x41070000)
397 return;
398
399 /*
400 * HWCAP_TLS is available only on 1136 r1p0 and later,
401 * see also kuser_get_tls_init.
402 */
403 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
404 elf_hwcap &= ~HWCAP_TLS;
405}
406
ccea7a19
RK
407/*
408 * cpu_init - initialise one CPU.
409 *
90f1e084 410 * cpu_init sets up the per-CPU stacks.
ccea7a19 411 */
1783d457 412void notrace cpu_init(void)
ccea7a19 413{
55bdd694 414#ifndef CONFIG_CPU_V7M
ccea7a19
RK
415 unsigned int cpu = smp_processor_id();
416 struct stack *stk = &stacks[cpu];
417
418 if (cpu >= NR_CPUS) {
419 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
420 BUG();
421 }
422
14318efb
RH
423 /*
424 * This only works on resume and secondary cores. For booting on the
425 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
426 */
427 set_my_cpu_offset(per_cpu_offset(cpu));
428
b69874e4
RK
429 cpu_proc_init();
430
b86040a5
CM
431 /*
432 * Define the placement constraint for the inline asm directive below.
433 * In Thumb-2, msr with an immediate value is not allowed.
434 */
435#ifdef CONFIG_THUMB2_KERNEL
436#define PLC "r"
437#else
438#define PLC "I"
439#endif
440
ccea7a19
RK
441 /*
442 * setup stacks for re-entrant exception handlers
443 */
444 __asm__ (
445 "msr cpsr_c, %1\n\t"
b86040a5
CM
446 "add r14, %0, %2\n\t"
447 "mov sp, r14\n\t"
ccea7a19 448 "msr cpsr_c, %3\n\t"
b86040a5
CM
449 "add r14, %0, %4\n\t"
450 "mov sp, r14\n\t"
ccea7a19 451 "msr cpsr_c, %5\n\t"
b86040a5
CM
452 "add r14, %0, %6\n\t"
453 "mov sp, r14\n\t"
ccea7a19
RK
454 "msr cpsr_c, %7"
455 :
456 : "r" (stk),
b86040a5 457 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 458 "I" (offsetof(struct stack, irq[0])),
b86040a5 459 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 460 "I" (offsetof(struct stack, abt[0])),
b86040a5 461 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 462 "I" (offsetof(struct stack, und[0])),
b86040a5 463 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 464 : "r14");
55bdd694 465#endif
ccea7a19
RK
466}
467
18d7f152 468u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
eb50439b
WD
469
470void __init smp_setup_processor_id(void)
471{
472 int i;
cb8cf4f8
LP
473 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
474 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
475
476 cpu_logical_map(0) = cpu;
cb8cf4f8 477 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
478 cpu_logical_map(i) = i == cpu ? 0 : i;
479
9394c1c6
ML
480 /*
481 * clear __my_cpu_offset on boot CPU to avoid hang caused by
482 * using percpu variable early, for example, lockdep will
483 * access percpu variable inside lock_release
484 */
485 set_my_cpu_offset(0);
486
cb8cf4f8 487 printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
488}
489
8cf72172
LP
490struct mpidr_hash mpidr_hash;
491#ifdef CONFIG_SMP
492/**
493 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
494 * level in order to build a linear index from an
495 * MPIDR value. Resulting algorithm is a collision
496 * free hash carried out through shifting and ORing
497 */
498static void __init smp_build_mpidr_hash(void)
499{
500 u32 i, affinity;
501 u32 fs[3], bits[3], ls, mask = 0;
502 /*
503 * Pre-scan the list of MPIDRS and filter out bits that do
504 * not contribute to affinity levels, ie they never toggle.
505 */
506 for_each_possible_cpu(i)
507 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
508 pr_debug("mask of set bits 0x%x\n", mask);
509 /*
510 * Find and stash the last and first bit set at all affinity levels to
511 * check how many bits are required to represent them.
512 */
513 for (i = 0; i < 3; i++) {
514 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
515 /*
516 * Find the MSB bit and LSB bits position
517 * to determine how many bits are required
518 * to express the affinity level.
519 */
520 ls = fls(affinity);
521 fs[i] = affinity ? ffs(affinity) - 1 : 0;
522 bits[i] = ls - fs[i];
523 }
524 /*
525 * An index can be created from the MPIDR by isolating the
526 * significant bits at each affinity level and by shifting
527 * them in order to compress the 24 bits values space to a
528 * compressed set of values. This is equivalent to hashing
529 * the MPIDR through shifting and ORing. It is a collision free
530 * hash though not minimal since some levels might contain a number
531 * of CPUs that is not an exact power of 2 and their bit
532 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
533 */
534 mpidr_hash.shift_aff[0] = fs[0];
535 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
536 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
537 (bits[1] + bits[0]);
538 mpidr_hash.mask = mask;
539 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
540 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
541 mpidr_hash.shift_aff[0],
542 mpidr_hash.shift_aff[1],
543 mpidr_hash.shift_aff[2],
544 mpidr_hash.mask,
545 mpidr_hash.bits);
546 /*
547 * 4x is an arbitrary value used to warn on a hash table much bigger
548 * than expected on most systems.
549 */
550 if (mpidr_hash_size() > 4 * num_possible_cpus())
551 pr_warn("Large number of MPIDR hash buckets detected\n");
552 sync_cache_w(&mpidr_hash);
553}
554#endif
555
b69874e4
RK
556static void __init setup_processor(void)
557{
558 struct proc_info_list *list;
559
560 /*
561 * locate processor in the list of supported processor
562 * types. The linker builds this table for us from the
563 * entries in arch/arm/mm/proc-*.S
564 */
565 list = lookup_processor_type(read_cpuid_id());
566 if (!list) {
567 printk("CPU configuration botched (ID %08x), unable "
568 "to continue.\n", read_cpuid_id());
569 while (1);
570 }
571
572 cpu_name = list->cpu_name;
2ecccf90 573 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
574
575#ifdef MULTI_CPU
576 processor = *list->proc;
577#endif
578#ifdef MULTI_TLB
579 cpu_tlb = *list->tlb;
580#endif
581#ifdef MULTI_USER
582 cpu_user = *list->user;
583#endif
584#ifdef MULTI_CACHE
585 cpu_cache = *list->cache;
586#endif
587
588 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
589 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
590 proc_arch[cpu_architecture()], cr_alignment);
591
a34dbfb0
WD
592 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
593 list->arch_name, ENDIANNESS);
594 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
595 list->elf_name, ENDIANNESS);
b69874e4 596 elf_hwcap = list->elf_hwcap;
8164f7af
SB
597
598 cpuid_init_hwcaps();
599
b69874e4 600#ifndef CONFIG_ARM_THUMB
c40e3641 601 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
b69874e4
RK
602#endif
603
92871b94
RH
604 erratum_a15_798181_init();
605
b69874e4
RK
606 feat_v6_fixup();
607
608 cacheid_init();
609 cpu_init();
610}
611
93c02ab4 612void __init dump_machine_table(void)
1da177e4 613{
ff69a4c8 614 const struct machine_desc *p;
1da177e4 615
6291319d
GL
616 early_print("Available machine support:\n\nID (hex)\tNAME\n");
617 for_each_machine_desc(p)
dce72dd0 618 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 619
dce72dd0 620 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 621
dce72dd0
NP
622 while (true)
623 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
624}
625
6a5014aa 626int __init arm_add_memory(u64 start, u64 size)
3a669411 627{
4b5f32ce 628 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
6d7d5da7 629 u64 aligned_start;
4b5f32ce
NP
630
631 if (meminfo.nr_banks >= NR_BANKS) {
632 printk(KERN_CRIT "NR_BANKS too low, "
29a38193 633 "ignoring memory at 0x%08llx\n", (long long)start);
4b5f32ce
NP
634 return -EINVAL;
635 }
05f96ef1 636
3a669411
RK
637 /*
638 * Ensure that start/size are aligned to a page boundary.
639 * Size is appropriately rounded down, start is rounded up.
640 */
641 size -= start & ~PAGE_MASK;
6d7d5da7 642 aligned_start = PAGE_ALIGN(start);
e5ab8580 643
6d7d5da7
MD
644#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
645 if (aligned_start > ULONG_MAX) {
646 printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
647 "32-bit physical address space\n", (long long)start);
648 return -EINVAL;
649 }
650
651 if (aligned_start + size > ULONG_MAX) {
e5ab8580
WD
652 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
653 "32-bit physical address space\n", (long long)start);
654 /*
655 * To ensure bank->start + bank->size is representable in
656 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
657 * This means we lose a page after masking.
658 */
6d7d5da7 659 size = ULONG_MAX - aligned_start;
e5ab8580
WD
660 }
661#endif
662
6d7d5da7 663 bank->start = aligned_start;
a5d5f7da 664 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
665
666 /*
667 * Check whether this memory region has non-zero size or
668 * invalid node number.
669 */
be370302 670 if (bank->size == 0)
4b5f32ce
NP
671 return -EINVAL;
672
673 meminfo.nr_banks++;
674 return 0;
3a669411
RK
675}
676
1da177e4
LT
677/*
678 * Pick out the memory size. We look for mem=size@start,
679 * where start and size are "size[KkMm]"
680 */
2b0d8c25 681static int __init early_mem(char *p)
1da177e4
LT
682{
683 static int usermem __initdata = 0;
6a5014aa
MD
684 u64 size;
685 u64 start;
2b0d8c25 686 char *endp;
1da177e4
LT
687
688 /*
689 * If the user specifies memory size, we
690 * blow away any automatically generated
691 * size.
692 */
693 if (usermem == 0) {
694 usermem = 1;
695 meminfo.nr_banks = 0;
696 }
697
698 start = PHYS_OFFSET;
2b0d8c25
JK
699 size = memparse(p, &endp);
700 if (*endp == '@')
701 start = memparse(endp + 1, NULL);
1da177e4 702
1c97b73e 703 arm_add_memory(start, size);
1da177e4 704
2b0d8c25 705 return 0;
1da177e4 706}
2b0d8c25 707early_param("mem", early_mem);
1da177e4 708
ff69a4c8 709static void __init request_standard_resources(const struct machine_desc *mdesc)
1da177e4 710{
11b9369c 711 struct memblock_region *region;
1da177e4 712 struct resource *res;
1da177e4 713
37efe642
RK
714 kernel_code.start = virt_to_phys(_text);
715 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 716 kernel_data.start = virt_to_phys(_sdata);
37efe642 717 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 718
11b9369c 719 for_each_memblock(memory, region) {
1da177e4
LT
720 res = alloc_bootmem_low(sizeof(*res));
721 res->name = "System RAM";
11b9369c
DZ
722 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
723 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
1da177e4
LT
724 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
725
726 request_resource(&iomem_resource, res);
727
728 if (kernel_code.start >= res->start &&
729 kernel_code.end <= res->end)
730 request_resource(res, &kernel_code);
731 if (kernel_data.start >= res->start &&
732 kernel_data.end <= res->end)
733 request_resource(res, &kernel_data);
734 }
735
736 if (mdesc->video_start) {
737 video_ram.start = mdesc->video_start;
738 video_ram.end = mdesc->video_end;
739 request_resource(&iomem_resource, &video_ram);
740 }
741
742 /*
743 * Some machines don't have the possibility of ever
744 * possessing lp0, lp1 or lp2
745 */
746 if (mdesc->reserve_lp0)
747 request_resource(&ioport_resource, &lp0);
748 if (mdesc->reserve_lp1)
749 request_resource(&ioport_resource, &lp1);
750 if (mdesc->reserve_lp2)
751 request_resource(&ioport_resource, &lp2);
752}
753
1da177e4
LT
754#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
755struct screen_info screen_info = {
756 .orig_video_lines = 30,
757 .orig_video_cols = 80,
758 .orig_video_mode = 0,
759 .orig_video_ega_bx = 0,
760 .orig_video_isVGA = 1,
761 .orig_video_points = 8
762};
4394c124 763#endif
1da177e4 764
1da177e4
LT
765static int __init customize_machine(void)
766{
883a106b
AB
767 /*
768 * customizes platform devices, or adds new ones
769 * On DT based machines, we fall back to populating the
770 * machine from the device tree, if no callback is provided,
771 * otherwise we would always need an init_machine callback.
772 */
8ff1443c
RK
773 if (machine_desc->init_machine)
774 machine_desc->init_machine();
883a106b
AB
775#ifdef CONFIG_OF
776 else
777 of_platform_populate(NULL, of_default_bus_match_table,
778 NULL, NULL);
779#endif
1da177e4
LT
780 return 0;
781}
782arch_initcall(customize_machine);
783
90de4137
SG
784static int __init init_machine_late(void)
785{
786 if (machine_desc->init_late)
787 machine_desc->init_late();
788 return 0;
789}
790late_initcall(init_machine_late);
791
3c57fb43
MW
792#ifdef CONFIG_KEXEC
793static inline unsigned long long get_total_mem(void)
794{
795 unsigned long total;
796
797 total = max_low_pfn - min_low_pfn;
798 return total << PAGE_SHIFT;
799}
800
801/**
802 * reserve_crashkernel() - reserves memory are for crash kernel
803 *
804 * This function reserves memory area given in "crashkernel=" kernel command
805 * line parameter. The memory reserved is used by a dump capture kernel when
806 * primary kernel is crashing.
807 */
808static void __init reserve_crashkernel(void)
809{
810 unsigned long long crash_size, crash_base;
811 unsigned long long total_mem;
812 int ret;
813
814 total_mem = get_total_mem();
815 ret = parse_crashkernel(boot_command_line, total_mem,
816 &crash_size, &crash_base);
817 if (ret)
818 return;
819
820 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
821 if (ret < 0) {
822 printk(KERN_WARNING "crashkernel reservation failed - "
823 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
824 return;
825 }
826
827 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
828 "for crashkernel (System RAM: %ldMB)\n",
829 (unsigned long)(crash_size >> 20),
830 (unsigned long)(crash_base >> 20),
831 (unsigned long)(total_mem >> 20));
832
833 crashk_res.start = crash_base;
834 crashk_res.end = crash_base + crash_size - 1;
835 insert_resource(&iomem_resource, &crashk_res);
836}
837#else
838static inline void reserve_crashkernel(void) {}
839#endif /* CONFIG_KEXEC */
840
27a3f0e9
NP
841static int __init meminfo_cmp(const void *_a, const void *_b)
842{
843 const struct membank *a = _a, *b = _b;
844 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
845 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
846}
6291319d 847
4588c34d
DM
848void __init hyp_mode_check(void)
849{
850#ifdef CONFIG_ARM_VIRT_EXT
8fbac214
MR
851 sync_boot_mode();
852
4588c34d
DM
853 if (is_hyp_mode_available()) {
854 pr_info("CPU: All CPU(s) started in HYP mode.\n");
855 pr_info("CPU: Virtualization extensions available.\n");
856 } else if (is_hyp_mode_mismatched()) {
857 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
858 __boot_cpu_mode & MODE_MASK);
859 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
860 } else
861 pr_info("CPU: All CPU(s) started in SVC mode.\n");
862#endif
863}
864
6291319d
GL
865void __init setup_arch(char **cmdline_p)
866{
ff69a4c8 867 const struct machine_desc *mdesc;
6291319d 868
6291319d 869 setup_processor();
93c02ab4
GL
870 mdesc = setup_machine_fdt(__atags_pointer);
871 if (!mdesc)
b8b499c8 872 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
6291319d
GL
873 machine_desc = mdesc;
874 machine_name = mdesc->name;
875
16d6d5b0
RH
876 if (mdesc->reboot_mode != REBOOT_HARD)
877 reboot_mode = mdesc->reboot_mode;
6291319d 878
37efe642
RK
879 init_mm.start_code = (unsigned long) _text;
880 init_mm.end_code = (unsigned long) _etext;
881 init_mm.end_data = (unsigned long) _edata;
882 init_mm.brk = (unsigned long) _end;
1da177e4 883
48ab7e09
JK
884 /* populate cmd_line too for later use, preserving boot_command_line */
885 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
886 *cmdline_p = cmd_line;
2b0d8c25
JK
887
888 parse_early_param();
889
27a3f0e9 890 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
a77e0c7b
SS
891
892 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
7c927322 893 setup_dma_zone(mdesc);
0371d3f7 894 sanity_check_meminfo();
8d717a52 895 arm_memblock_init(&meminfo, mdesc);
2778f620 896
4b5f32ce 897 paging_init(mdesc);
11b9369c 898 request_standard_resources(mdesc);
1da177e4 899
a528721d
RK
900 if (mdesc->restart)
901 arm_pm_restart = mdesc->restart;
902
93c02ab4
GL
903 unflatten_device_tree();
904
5587164e 905 arm_dt_init_cpu_maps();
05774088 906 psci_init();
7bbb7940 907#ifdef CONFIG_SMP
abcee5fb 908 if (is_smp()) {
b382b940
JM
909 if (!mdesc->smp_init || !mdesc->smp_init()) {
910 if (psci_smp_available())
911 smp_set_ops(&psci_smp_ops);
912 else if (mdesc->smp)
913 smp_set_ops(mdesc->smp);
914 }
f00ec48f 915 smp_init_cpus();
8cf72172 916 smp_build_mpidr_hash();
abcee5fb 917 }
7bbb7940 918#endif
4588c34d
DM
919
920 if (!is_smp())
921 hyp_mode_check();
922
3c57fb43 923 reserve_crashkernel();
7bbb7940 924
52108641 925#ifdef CONFIG_MULTI_IRQ_HANDLER
926 handle_arch_irq = mdesc->handle_irq;
927#endif
1da177e4
LT
928
929#ifdef CONFIG_VT
930#if defined(CONFIG_VGA_CONSOLE)
931 conswitchp = &vga_con;
932#elif defined(CONFIG_DUMMY_CONSOLE)
933 conswitchp = &dummy_con;
934#endif
935#endif
dec12e62
RK
936
937 if (mdesc->init_early)
938 mdesc->init_early();
1da177e4
LT
939}
940
941
942static int __init topology_init(void)
943{
944 int cpu;
945
66fb8bd2
RK
946 for_each_possible_cpu(cpu) {
947 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
948 cpuinfo->cpu.hotpluggable = 1;
949 register_cpu(&cpuinfo->cpu, cpu);
950 }
1da177e4
LT
951
952 return 0;
953}
1da177e4
LT
954subsys_initcall(topology_init);
955
e119bfff
RK
956#ifdef CONFIG_HAVE_PROC_CPU
957static int __init proc_cpu_init(void)
958{
959 struct proc_dir_entry *res;
960
961 res = proc_mkdir("cpu", NULL);
962 if (!res)
963 return -ENOMEM;
964 return 0;
965}
966fs_initcall(proc_cpu_init);
967#endif
968
1da177e4
LT
969static const char *hwcap_str[] = {
970 "swp",
971 "half",
972 "thumb",
973 "26bit",
974 "fastmult",
975 "fpa",
976 "vfp",
977 "edsp",
978 "java",
8f7f9435 979 "iwmmxt",
99e4a6dd 980 "crunch",
4369ae16 981 "thumbee",
2bedbdf4 982 "neon",
7279dc3e
CM
983 "vfpv3",
984 "vfpv3d16",
254cdf8e
WD
985 "tls",
986 "vfpv4",
987 "idiva",
988 "idivt",
ab8d46c0 989 "vfpd32",
a469abd0 990 "lpae",
e9faebc6 991 "evtstrm",
1da177e4
LT
992 NULL
993};
994
1da177e4
LT
995static int c_show(struct seq_file *m, void *v)
996{
b4b8f770
LP
997 int i, j;
998 u32 cpuid;
1da177e4 999
1da177e4 1000 for_each_online_cpu(i) {
15559722
RK
1001 /*
1002 * glibc reads /proc/cpuinfo to determine the number of
1003 * online processors, looking for lines beginning with
1004 * "processor". Give glibc what it expects.
1005 */
1006 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
1007 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1008 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1009 cpu_name, cpuid & 15, elf_platform);
1010
b4b8f770
LP
1011 /* dump out the processor features */
1012 seq_puts(m, "Features\t: ");
1da177e4 1013
b4b8f770
LP
1014 for (j = 0; hwcap_str[j]; j++)
1015 if (elf_hwcap & (1 << j))
1016 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 1017
b4b8f770
LP
1018 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1019 seq_printf(m, "CPU architecture: %s\n",
1020 proc_arch[cpu_architecture()]);
1da177e4 1021
b4b8f770
LP
1022 if ((cpuid & 0x0008f000) == 0x00000000) {
1023 /* pre-ARM7 */
1024 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 1025 } else {
b4b8f770
LP
1026 if ((cpuid & 0x0008f000) == 0x00007000) {
1027 /* ARM7 */
1028 seq_printf(m, "CPU variant\t: 0x%02x\n",
1029 (cpuid >> 16) & 127);
1030 } else {
1031 /* post-ARM7 */
1032 seq_printf(m, "CPU variant\t: 0x%x\n",
1033 (cpuid >> 20) & 15);
1034 }
1035 seq_printf(m, "CPU part\t: 0x%03x\n",
1036 (cpuid >> 4) & 0xfff);
1da177e4 1037 }
b4b8f770 1038 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 1039 }
1da177e4
LT
1040
1041 seq_printf(m, "Hardware\t: %s\n", machine_name);
1042 seq_printf(m, "Revision\t: %04x\n", system_rev);
1043 seq_printf(m, "Serial\t\t: %08x%08x\n",
1044 system_serial_high, system_serial_low);
1045
1046 return 0;
1047}
1048
1049static void *c_start(struct seq_file *m, loff_t *pos)
1050{
1051 return *pos < 1 ? (void *)1 : NULL;
1052}
1053
1054static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1055{
1056 ++*pos;
1057 return NULL;
1058}
1059
1060static void c_stop(struct seq_file *m, void *v)
1061{
1062}
1063
2ffd6e18 1064const struct seq_operations cpuinfo_op = {
1da177e4
LT
1065 .start = c_start,
1066 .next = c_next,
1067 .stop = c_stop,
1068 .show = c_show
1069};