ARM: kernel: add MIDR to per-CPU information data
[linux-2.6-block.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
ecea4ab6 10#include <linux/export.h>
1da177e4
LT
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
1da177e4 21#include <linux/init.h>
3c57fb43 22#include <linux/kexec.h>
93c02ab4 23#include <linux/of_fdt.h>
1da177e4
LT
24#include <linux/cpu.h>
25#include <linux/interrupt.h>
7bbb7940 26#include <linux/smp.h>
e119bfff 27#include <linux/proc_fs.h>
2778f620 28#include <linux/memblock.h>
2ecccf90
DM
29#include <linux/bug.h>
30#include <linux/compiler.h>
27a3f0e9 31#include <linux/sort.h>
1da177e4 32
b86040a5 33#include <asm/unified.h>
15d07dc9 34#include <asm/cp15.h>
1da177e4 35#include <asm/cpu.h>
0ba8b9b2 36#include <asm/cputype.h>
1da177e4 37#include <asm/elf.h>
1da177e4 38#include <asm/procinfo.h>
37efe642 39#include <asm/sections.h>
1da177e4 40#include <asm/setup.h>
f00ec48f 41#include <asm/smp_plat.h>
1da177e4
LT
42#include <asm/mach-types.h>
43#include <asm/cacheflush.h>
46097c7d 44#include <asm/cachetype.h>
1da177e4
LT
45#include <asm/tlbflush.h>
46
93c02ab4 47#include <asm/prom.h>
1da177e4
LT
48#include <asm/mach/arch.h>
49#include <asm/mach/irq.h>
50#include <asm/mach/time.h>
9f97da78
DH
51#include <asm/system_info.h>
52#include <asm/system_misc.h>
5cbad0eb 53#include <asm/traps.h>
bff595c1 54#include <asm/unwind.h>
1c16d242 55#include <asm/memblock.h>
4588c34d 56#include <asm/virt.h>
1da177e4 57
4cd9d6f7 58#include "atags.h"
bc581770 59#include "tcm.h"
0fc1c832 60
1da177e4
LT
61
62#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
63char fpe_type[8];
64
65static int __init fpe_setup(char *line)
66{
67 memcpy(fpe_type, line, 8);
68 return 1;
69}
70
71__setup("fpe=", fpe_setup);
72#endif
73
4b5f32ce 74extern void paging_init(struct machine_desc *desc);
0371d3f7 75extern void sanity_check_meminfo(void);
1da177e4 76extern void reboot_setup(char *str);
c7909509 77extern void setup_dma_zone(struct machine_desc *desc);
1da177e4
LT
78
79unsigned int processor_id;
c18f6581 80EXPORT_SYMBOL(processor_id);
0385ebc0 81unsigned int __machine_arch_type __read_mostly;
1da177e4 82EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 83unsigned int cacheid __read_mostly;
c0e95878 84EXPORT_SYMBOL(cacheid);
1da177e4 85
9d20fdd5
BG
86unsigned int __atags_pointer __initdata;
87
1da177e4
LT
88unsigned int system_rev;
89EXPORT_SYMBOL(system_rev);
90
91unsigned int system_serial_low;
92EXPORT_SYMBOL(system_serial_low);
93
94unsigned int system_serial_high;
95EXPORT_SYMBOL(system_serial_high);
96
0385ebc0 97unsigned int elf_hwcap __read_mostly;
1da177e4
LT
98EXPORT_SYMBOL(elf_hwcap);
99
100
101#ifdef MULTI_CPU
0385ebc0 102struct processor processor __read_mostly;
1da177e4
LT
103#endif
104#ifdef MULTI_TLB
0385ebc0 105struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
106#endif
107#ifdef MULTI_USER
0385ebc0 108struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
109#endif
110#ifdef MULTI_CACHE
0385ebc0 111struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 112#endif
953233dc 113#ifdef CONFIG_OUTER_CACHE
0385ebc0 114struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 115EXPORT_SYMBOL(outer_cache);
953233dc 116#endif
1da177e4 117
2ecccf90
DM
118/*
119 * Cached cpu_architecture() result for use by assembler code.
120 * C code should use the cpu_architecture() function instead of accessing this
121 * variable directly.
122 */
123int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
124
ccea7a19
RK
125struct stack {
126 u32 irq[3];
127 u32 abt[3];
128 u32 und[3];
129} ____cacheline_aligned;
130
131static struct stack stacks[NR_CPUS];
132
1da177e4
LT
133char elf_platform[ELF_PLATFORM_SIZE];
134EXPORT_SYMBOL(elf_platform);
135
1da177e4
LT
136static const char *cpu_name;
137static const char *machine_name;
48ab7e09 138static char __initdata cmd_line[COMMAND_LINE_SIZE];
8ff1443c 139struct machine_desc *machine_desc __initdata;
1da177e4 140
1da177e4
LT
141static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
142#define ENDIANNESS ((char)endian_test.l)
143
144DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
145
146/*
147 * Standard memory resources
148 */
149static struct resource mem_res[] = {
740e518e
GKH
150 {
151 .name = "Video RAM",
152 .start = 0,
153 .end = 0,
154 .flags = IORESOURCE_MEM
155 },
156 {
a36d8e5b 157 .name = "Kernel code",
740e518e
GKH
158 .start = 0,
159 .end = 0,
160 .flags = IORESOURCE_MEM
161 },
162 {
163 .name = "Kernel data",
164 .start = 0,
165 .end = 0,
166 .flags = IORESOURCE_MEM
167 }
1da177e4
LT
168};
169
170#define video_ram mem_res[0]
171#define kernel_code mem_res[1]
172#define kernel_data mem_res[2]
173
174static struct resource io_res[] = {
740e518e
GKH
175 {
176 .name = "reserved",
177 .start = 0x3bc,
178 .end = 0x3be,
179 .flags = IORESOURCE_IO | IORESOURCE_BUSY
180 },
181 {
182 .name = "reserved",
183 .start = 0x378,
184 .end = 0x37f,
185 .flags = IORESOURCE_IO | IORESOURCE_BUSY
186 },
187 {
188 .name = "reserved",
189 .start = 0x278,
190 .end = 0x27f,
191 .flags = IORESOURCE_IO | IORESOURCE_BUSY
192 }
1da177e4
LT
193};
194
195#define lp0 io_res[0]
196#define lp1 io_res[1]
197#define lp2 io_res[2]
198
1da177e4
LT
199static const char *proc_arch[] = {
200 "undefined/unknown",
201 "3",
202 "4",
203 "4T",
204 "5",
205 "5T",
206 "5TE",
207 "5TEJ",
208 "6TEJ",
6b090a25 209 "7",
1da177e4
LT
210 "?(11)",
211 "?(12)",
212 "?(13)",
213 "?(14)",
214 "?(15)",
215 "?(16)",
216 "?(17)",
217};
218
2ecccf90 219static int __get_cpu_architecture(void)
1da177e4
LT
220{
221 int cpu_arch;
222
0ba8b9b2 223 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 224 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
225 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
226 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
227 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
228 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
229 if (cpu_arch)
230 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 231 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
232 unsigned int mmfr0;
233
234 /* Revised CPUID format. Read the Memory Model Feature
235 * Register 0 and check for VMSAv7 or PMSAv7 */
236 asm("mrc p15, 0, %0, c0, c1, 4"
237 : "=r" (mmfr0));
315cfe78
CM
238 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
239 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
240 cpu_arch = CPU_ARCH_ARMv7;
241 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
242 (mmfr0 & 0x000000f0) == 0x00000020)
243 cpu_arch = CPU_ARCH_ARMv6;
244 else
245 cpu_arch = CPU_ARCH_UNKNOWN;
246 } else
247 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
248
249 return cpu_arch;
250}
251
2ecccf90
DM
252int __pure cpu_architecture(void)
253{
254 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
255
256 return __cpu_architecture;
257}
258
8925ec4c
WD
259static int cpu_has_aliasing_icache(unsigned int arch)
260{
261 int aliasing_icache;
262 unsigned int id_reg, num_sets, line_size;
263
7f94e9cc
WD
264 /* PIPT caches never alias. */
265 if (icache_is_pipt())
266 return 0;
267
8925ec4c
WD
268 /* arch specifies the register format */
269 switch (arch) {
270 case CPU_ARCH_ARMv7:
5fb31a96
LW
271 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
272 : /* No output operands */
8925ec4c 273 : "r" (1));
5fb31a96
LW
274 isb();
275 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
276 : "=r" (id_reg));
8925ec4c
WD
277 line_size = 4 << ((id_reg & 0x7) + 2);
278 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
279 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
280 break;
281 case CPU_ARCH_ARMv6:
282 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
283 break;
284 default:
285 /* I-cache aliases will be handled by D-cache aliasing code */
286 aliasing_icache = 0;
287 }
288
289 return aliasing_icache;
290}
291
c0e95878
RK
292static void __init cacheid_init(void)
293{
294 unsigned int cachetype = read_cpuid_cachetype();
295 unsigned int arch = cpu_architecture();
296
b57ee99f
CM
297 if (arch >= CPU_ARCH_ARMv6) {
298 if ((cachetype & (7 << 29)) == 4 << 29) {
299 /* ARMv7 register format */
72dc53ac 300 arch = CPU_ARCH_ARMv7;
b57ee99f 301 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
302 switch (cachetype & (3 << 14)) {
303 case (1 << 14):
b57ee99f 304 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
305 break;
306 case (3 << 14):
307 cacheid |= CACHEID_PIPT;
308 break;
309 }
8925ec4c 310 } else {
72dc53ac
WD
311 arch = CPU_ARCH_ARMv6;
312 if (cachetype & (1 << 23))
313 cacheid = CACHEID_VIPT_ALIASING;
314 else
315 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 316 }
72dc53ac
WD
317 if (cpu_has_aliasing_icache(arch))
318 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
319 } else {
320 cacheid = CACHEID_VIVT;
321 }
2b4ae1f1
RK
322
323 printk("CPU: %s data cache, %s instruction cache\n",
324 cache_is_vivt() ? "VIVT" :
325 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 326 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
327 cache_is_vivt() ? "VIVT" :
328 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 329 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 330 icache_is_pipt() ? "PIPT" :
2b4ae1f1 331 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
332}
333
1da177e4
LT
334/*
335 * These functions re-use the assembly code in head.S, which
336 * already provide the required functionality.
337 */
0f44ba1d 338extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 339
93c02ab4 340void __init early_print(const char *str, ...)
6fc31d54
RK
341{
342 extern void printascii(const char *);
343 char buf[256];
344 va_list ap;
345
346 va_start(ap, str);
347 vsnprintf(buf, sizeof(buf), str, ap);
348 va_end(ap);
349
350#ifdef CONFIG_DEBUG_LL
351 printascii(buf);
352#endif
353 printk("%s", buf);
354}
355
f159f4ed
TL
356static void __init feat_v6_fixup(void)
357{
358 int id = read_cpuid_id();
359
360 if ((id & 0xff0f0000) != 0x41070000)
361 return;
362
363 /*
364 * HWCAP_TLS is available only on 1136 r1p0 and later,
365 * see also kuser_get_tls_init.
366 */
367 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
368 elf_hwcap &= ~HWCAP_TLS;
369}
370
ccea7a19
RK
371/*
372 * cpu_init - initialise one CPU.
373 *
90f1e084 374 * cpu_init sets up the per-CPU stacks.
ccea7a19 375 */
36c5ed23 376void cpu_init(void)
ccea7a19
RK
377{
378 unsigned int cpu = smp_processor_id();
379 struct stack *stk = &stacks[cpu];
380
381 if (cpu >= NR_CPUS) {
382 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
383 BUG();
384 }
385
b69874e4
RK
386 cpu_proc_init();
387
b86040a5
CM
388 /*
389 * Define the placement constraint for the inline asm directive below.
390 * In Thumb-2, msr with an immediate value is not allowed.
391 */
392#ifdef CONFIG_THUMB2_KERNEL
393#define PLC "r"
394#else
395#define PLC "I"
396#endif
397
ccea7a19
RK
398 /*
399 * setup stacks for re-entrant exception handlers
400 */
401 __asm__ (
402 "msr cpsr_c, %1\n\t"
b86040a5
CM
403 "add r14, %0, %2\n\t"
404 "mov sp, r14\n\t"
ccea7a19 405 "msr cpsr_c, %3\n\t"
b86040a5
CM
406 "add r14, %0, %4\n\t"
407 "mov sp, r14\n\t"
ccea7a19 408 "msr cpsr_c, %5\n\t"
b86040a5
CM
409 "add r14, %0, %6\n\t"
410 "mov sp, r14\n\t"
ccea7a19
RK
411 "msr cpsr_c, %7"
412 :
413 : "r" (stk),
b86040a5 414 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 415 "I" (offsetof(struct stack, irq[0])),
b86040a5 416 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 417 "I" (offsetof(struct stack, abt[0])),
b86040a5 418 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 419 "I" (offsetof(struct stack, und[0])),
b86040a5 420 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 421 : "r14");
ccea7a19
RK
422}
423
eb50439b
WD
424int __cpu_logical_map[NR_CPUS];
425
426void __init smp_setup_processor_id(void)
427{
428 int i;
429 u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
430
431 cpu_logical_map(0) = cpu;
432 for (i = 1; i < NR_CPUS; ++i)
433 cpu_logical_map(i) = i == cpu ? 0 : i;
434
435 printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
436}
437
b69874e4
RK
438static void __init setup_processor(void)
439{
440 struct proc_info_list *list;
441
442 /*
443 * locate processor in the list of supported processor
444 * types. The linker builds this table for us from the
445 * entries in arch/arm/mm/proc-*.S
446 */
447 list = lookup_processor_type(read_cpuid_id());
448 if (!list) {
449 printk("CPU configuration botched (ID %08x), unable "
450 "to continue.\n", read_cpuid_id());
451 while (1);
452 }
453
454 cpu_name = list->cpu_name;
2ecccf90 455 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
456
457#ifdef MULTI_CPU
458 processor = *list->proc;
459#endif
460#ifdef MULTI_TLB
461 cpu_tlb = *list->tlb;
462#endif
463#ifdef MULTI_USER
464 cpu_user = *list->user;
465#endif
466#ifdef MULTI_CACHE
467 cpu_cache = *list->cache;
468#endif
469
470 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
471 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
472 proc_arch[cpu_architecture()], cr_alignment);
473
a34dbfb0
WD
474 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
475 list->arch_name, ENDIANNESS);
476 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
477 list->elf_name, ENDIANNESS);
b69874e4
RK
478 elf_hwcap = list->elf_hwcap;
479#ifndef CONFIG_ARM_THUMB
480 elf_hwcap &= ~HWCAP_THUMB;
481#endif
482
483 feat_v6_fixup();
484
485 cacheid_init();
486 cpu_init();
487}
488
93c02ab4 489void __init dump_machine_table(void)
1da177e4 490{
dce72dd0 491 struct machine_desc *p;
1da177e4 492
6291319d
GL
493 early_print("Available machine support:\n\nID (hex)\tNAME\n");
494 for_each_machine_desc(p)
dce72dd0 495 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 496
dce72dd0 497 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 498
dce72dd0
NP
499 while (true)
500 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
501}
502
a5d5f7da 503int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
3a669411 504{
4b5f32ce
NP
505 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
506
507 if (meminfo.nr_banks >= NR_BANKS) {
508 printk(KERN_CRIT "NR_BANKS too low, "
29a38193 509 "ignoring memory at 0x%08llx\n", (long long)start);
4b5f32ce
NP
510 return -EINVAL;
511 }
05f96ef1 512
3a669411
RK
513 /*
514 * Ensure that start/size are aligned to a page boundary.
515 * Size is appropriately rounded down, start is rounded up.
516 */
517 size -= start & ~PAGE_MASK;
05f96ef1 518 bank->start = PAGE_ALIGN(start);
e5ab8580
WD
519
520#ifndef CONFIG_LPAE
521 if (bank->start + size < bank->start) {
522 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
523 "32-bit physical address space\n", (long long)start);
524 /*
525 * To ensure bank->start + bank->size is representable in
526 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
527 * This means we lose a page after masking.
528 */
529 size = ULONG_MAX - bank->start;
530 }
531#endif
532
a5d5f7da 533 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
534
535 /*
536 * Check whether this memory region has non-zero size or
537 * invalid node number.
538 */
be370302 539 if (bank->size == 0)
4b5f32ce
NP
540 return -EINVAL;
541
542 meminfo.nr_banks++;
543 return 0;
3a669411
RK
544}
545
1da177e4
LT
546/*
547 * Pick out the memory size. We look for mem=size@start,
548 * where start and size are "size[KkMm]"
549 */
2b0d8c25 550static int __init early_mem(char *p)
1da177e4
LT
551{
552 static int usermem __initdata = 0;
a5d5f7da 553 phys_addr_t size;
f60892d3 554 phys_addr_t start;
2b0d8c25 555 char *endp;
1da177e4
LT
556
557 /*
558 * If the user specifies memory size, we
559 * blow away any automatically generated
560 * size.
561 */
562 if (usermem == 0) {
563 usermem = 1;
564 meminfo.nr_banks = 0;
565 }
566
567 start = PHYS_OFFSET;
2b0d8c25
JK
568 size = memparse(p, &endp);
569 if (*endp == '@')
570 start = memparse(endp + 1, NULL);
1da177e4 571
1c97b73e 572 arm_add_memory(start, size);
1da177e4 573
2b0d8c25 574 return 0;
1da177e4 575}
2b0d8c25 576early_param("mem", early_mem);
1da177e4 577
11b9369c 578static void __init request_standard_resources(struct machine_desc *mdesc)
1da177e4 579{
11b9369c 580 struct memblock_region *region;
1da177e4 581 struct resource *res;
1da177e4 582
37efe642
RK
583 kernel_code.start = virt_to_phys(_text);
584 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 585 kernel_data.start = virt_to_phys(_sdata);
37efe642 586 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 587
11b9369c 588 for_each_memblock(memory, region) {
1da177e4
LT
589 res = alloc_bootmem_low(sizeof(*res));
590 res->name = "System RAM";
11b9369c
DZ
591 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
592 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
1da177e4
LT
593 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
594
595 request_resource(&iomem_resource, res);
596
597 if (kernel_code.start >= res->start &&
598 kernel_code.end <= res->end)
599 request_resource(res, &kernel_code);
600 if (kernel_data.start >= res->start &&
601 kernel_data.end <= res->end)
602 request_resource(res, &kernel_data);
603 }
604
605 if (mdesc->video_start) {
606 video_ram.start = mdesc->video_start;
607 video_ram.end = mdesc->video_end;
608 request_resource(&iomem_resource, &video_ram);
609 }
610
611 /*
612 * Some machines don't have the possibility of ever
613 * possessing lp0, lp1 or lp2
614 */
615 if (mdesc->reserve_lp0)
616 request_resource(&ioport_resource, &lp0);
617 if (mdesc->reserve_lp1)
618 request_resource(&ioport_resource, &lp1);
619 if (mdesc->reserve_lp2)
620 request_resource(&ioport_resource, &lp2);
621}
622
1da177e4
LT
623#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
624struct screen_info screen_info = {
625 .orig_video_lines = 30,
626 .orig_video_cols = 80,
627 .orig_video_mode = 0,
628 .orig_video_ega_bx = 0,
629 .orig_video_isVGA = 1,
630 .orig_video_points = 8
631};
4394c124 632#endif
1da177e4 633
1da177e4
LT
634static int __init customize_machine(void)
635{
636 /* customizes platform devices, or adds new ones */
8ff1443c
RK
637 if (machine_desc->init_machine)
638 machine_desc->init_machine();
1da177e4
LT
639 return 0;
640}
641arch_initcall(customize_machine);
642
90de4137
SG
643static int __init init_machine_late(void)
644{
645 if (machine_desc->init_late)
646 machine_desc->init_late();
647 return 0;
648}
649late_initcall(init_machine_late);
650
3c57fb43
MW
651#ifdef CONFIG_KEXEC
652static inline unsigned long long get_total_mem(void)
653{
654 unsigned long total;
655
656 total = max_low_pfn - min_low_pfn;
657 return total << PAGE_SHIFT;
658}
659
660/**
661 * reserve_crashkernel() - reserves memory are for crash kernel
662 *
663 * This function reserves memory area given in "crashkernel=" kernel command
664 * line parameter. The memory reserved is used by a dump capture kernel when
665 * primary kernel is crashing.
666 */
667static void __init reserve_crashkernel(void)
668{
669 unsigned long long crash_size, crash_base;
670 unsigned long long total_mem;
671 int ret;
672
673 total_mem = get_total_mem();
674 ret = parse_crashkernel(boot_command_line, total_mem,
675 &crash_size, &crash_base);
676 if (ret)
677 return;
678
679 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
680 if (ret < 0) {
681 printk(KERN_WARNING "crashkernel reservation failed - "
682 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
683 return;
684 }
685
686 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
687 "for crashkernel (System RAM: %ldMB)\n",
688 (unsigned long)(crash_size >> 20),
689 (unsigned long)(crash_base >> 20),
690 (unsigned long)(total_mem >> 20));
691
692 crashk_res.start = crash_base;
693 crashk_res.end = crash_base + crash_size - 1;
694 insert_resource(&iomem_resource, &crashk_res);
695}
696#else
697static inline void reserve_crashkernel(void) {}
698#endif /* CONFIG_KEXEC */
699
27a3f0e9
NP
700static int __init meminfo_cmp(const void *_a, const void *_b)
701{
702 const struct membank *a = _a, *b = _b;
703 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
704 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
705}
6291319d 706
4588c34d
DM
707void __init hyp_mode_check(void)
708{
709#ifdef CONFIG_ARM_VIRT_EXT
710 if (is_hyp_mode_available()) {
711 pr_info("CPU: All CPU(s) started in HYP mode.\n");
712 pr_info("CPU: Virtualization extensions available.\n");
713 } else if (is_hyp_mode_mismatched()) {
714 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
715 __boot_cpu_mode & MODE_MASK);
716 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
717 } else
718 pr_info("CPU: All CPU(s) started in SVC mode.\n");
719#endif
720}
721
6291319d
GL
722void __init setup_arch(char **cmdline_p)
723{
724 struct machine_desc *mdesc;
725
6291319d 726 setup_processor();
93c02ab4
GL
727 mdesc = setup_machine_fdt(__atags_pointer);
728 if (!mdesc)
aa783b6f 729 mdesc = setup_machine_tags(__atags_pointer, machine_arch_type);
6291319d
GL
730 machine_desc = mdesc;
731 machine_name = mdesc->name;
732
c7909509
MS
733 setup_dma_zone(mdesc);
734
b44c350d
RK
735 if (mdesc->restart_mode)
736 reboot_setup(&mdesc->restart_mode);
6291319d 737
37efe642
RK
738 init_mm.start_code = (unsigned long) _text;
739 init_mm.end_code = (unsigned long) _etext;
740 init_mm.end_data = (unsigned long) _edata;
741 init_mm.brk = (unsigned long) _end;
1da177e4 742
48ab7e09
JK
743 /* populate cmd_line too for later use, preserving boot_command_line */
744 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
745 *cmdline_p = cmd_line;
2b0d8c25
JK
746
747 parse_early_param();
748
27a3f0e9 749 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
0371d3f7 750 sanity_check_meminfo();
8d717a52 751 arm_memblock_init(&meminfo, mdesc);
2778f620 752
4b5f32ce 753 paging_init(mdesc);
11b9369c 754 request_standard_resources(mdesc);
1da177e4 755
a528721d
RK
756 if (mdesc->restart)
757 arm_pm_restart = mdesc->restart;
758
93c02ab4
GL
759 unflatten_device_tree();
760
7bbb7940 761#ifdef CONFIG_SMP
abcee5fb
MZ
762 if (is_smp()) {
763 smp_set_ops(mdesc->smp);
f00ec48f 764 smp_init_cpus();
abcee5fb 765 }
7bbb7940 766#endif
4588c34d
DM
767
768 if (!is_smp())
769 hyp_mode_check();
770
3c57fb43 771 reserve_crashkernel();
7bbb7940 772
bc581770 773 tcm_init();
ccea7a19 774
52108641 775#ifdef CONFIG_MULTI_IRQ_HANDLER
776 handle_arch_irq = mdesc->handle_irq;
777#endif
1da177e4
LT
778
779#ifdef CONFIG_VT
780#if defined(CONFIG_VGA_CONSOLE)
781 conswitchp = &vga_con;
782#elif defined(CONFIG_DUMMY_CONSOLE)
783 conswitchp = &dummy_con;
784#endif
785#endif
dec12e62
RK
786
787 if (mdesc->init_early)
788 mdesc->init_early();
1da177e4
LT
789}
790
791
792static int __init topology_init(void)
793{
794 int cpu;
795
66fb8bd2
RK
796 for_each_possible_cpu(cpu) {
797 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
798 cpuinfo->cpu.hotpluggable = 1;
799 register_cpu(&cpuinfo->cpu, cpu);
800 }
1da177e4
LT
801
802 return 0;
803}
1da177e4
LT
804subsys_initcall(topology_init);
805
e119bfff
RK
806#ifdef CONFIG_HAVE_PROC_CPU
807static int __init proc_cpu_init(void)
808{
809 struct proc_dir_entry *res;
810
811 res = proc_mkdir("cpu", NULL);
812 if (!res)
813 return -ENOMEM;
814 return 0;
815}
816fs_initcall(proc_cpu_init);
817#endif
818
1da177e4
LT
819static const char *hwcap_str[] = {
820 "swp",
821 "half",
822 "thumb",
823 "26bit",
824 "fastmult",
825 "fpa",
826 "vfp",
827 "edsp",
828 "java",
8f7f9435 829 "iwmmxt",
99e4a6dd 830 "crunch",
4369ae16 831 "thumbee",
2bedbdf4 832 "neon",
7279dc3e
CM
833 "vfpv3",
834 "vfpv3d16",
254cdf8e
WD
835 "tls",
836 "vfpv4",
837 "idiva",
838 "idivt",
1da177e4
LT
839 NULL
840};
841
1da177e4
LT
842static int c_show(struct seq_file *m, void *v)
843{
844 int i;
845
846 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
0ba8b9b2 847 cpu_name, read_cpuid_id() & 15, elf_platform);
1da177e4
LT
848
849#if defined(CONFIG_SMP)
850 for_each_online_cpu(i) {
15559722
RK
851 /*
852 * glibc reads /proc/cpuinfo to determine the number of
853 * online processors, looking for lines beginning with
854 * "processor". Give glibc what it expects.
855 */
856 seq_printf(m, "processor\t: %d\n", i);
1da177e4
LT
857 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
858 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
859 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
860 }
861#else /* CONFIG_SMP */
862 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
863 loops_per_jiffy / (500000/HZ),
864 (loops_per_jiffy / (5000/HZ)) % 100);
865#endif
866
867 /* dump out the processor features */
868 seq_puts(m, "Features\t: ");
869
870 for (i = 0; hwcap_str[i]; i++)
871 if (elf_hwcap & (1 << i))
872 seq_printf(m, "%s ", hwcap_str[i]);
873
0ba8b9b2 874 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1da177e4
LT
875 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
876
0ba8b9b2 877 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1da177e4 878 /* pre-ARM7 */
0ba8b9b2 879 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1da177e4 880 } else {
0ba8b9b2 881 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1da177e4
LT
882 /* ARM7 */
883 seq_printf(m, "CPU variant\t: 0x%02x\n",
0ba8b9b2 884 (read_cpuid_id() >> 16) & 127);
1da177e4
LT
885 } else {
886 /* post-ARM7 */
887 seq_printf(m, "CPU variant\t: 0x%x\n",
0ba8b9b2 888 (read_cpuid_id() >> 20) & 15);
1da177e4
LT
889 }
890 seq_printf(m, "CPU part\t: 0x%03x\n",
0ba8b9b2 891 (read_cpuid_id() >> 4) & 0xfff);
1da177e4 892 }
0ba8b9b2 893 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1da177e4 894
1da177e4
LT
895 seq_puts(m, "\n");
896
897 seq_printf(m, "Hardware\t: %s\n", machine_name);
898 seq_printf(m, "Revision\t: %04x\n", system_rev);
899 seq_printf(m, "Serial\t\t: %08x%08x\n",
900 system_serial_high, system_serial_low);
901
902 return 0;
903}
904
905static void *c_start(struct seq_file *m, loff_t *pos)
906{
907 return *pos < 1 ? (void *)1 : NULL;
908}
909
910static void *c_next(struct seq_file *m, void *v, loff_t *pos)
911{
912 ++*pos;
913 return NULL;
914}
915
916static void c_stop(struct seq_file *m, void *v)
917{
918}
919
2ffd6e18 920const struct seq_operations cpuinfo_op = {
1da177e4
LT
921 .start = c_start,
922 .next = c_next,
923 .stop = c_stop,
924 .show = c_show
925};