ARM: Provide common header for hard_smp_processor_id()
[linux-2.6-block.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
1da177e4
LT
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
1da177e4 21#include <linux/init.h>
3c57fb43 22#include <linux/kexec.h>
cea0bb1b 23#include <linux/crash_dump.h>
1da177e4
LT
24#include <linux/root_dev.h>
25#include <linux/cpu.h>
26#include <linux/interrupt.h>
7bbb7940 27#include <linux/smp.h>
4e950f6f 28#include <linux/fs.h>
e119bfff 29#include <linux/proc_fs.h>
2778f620 30#include <linux/memblock.h>
1da177e4 31
b86040a5 32#include <asm/unified.h>
1da177e4 33#include <asm/cpu.h>
0ba8b9b2 34#include <asm/cputype.h>
1da177e4 35#include <asm/elf.h>
1da177e4 36#include <asm/procinfo.h>
37efe642 37#include <asm/sections.h>
1da177e4
LT
38#include <asm/setup.h>
39#include <asm/mach-types.h>
40#include <asm/cacheflush.h>
46097c7d 41#include <asm/cachetype.h>
1da177e4
LT
42#include <asm/tlbflush.h>
43
44#include <asm/mach/arch.h>
45#include <asm/mach/irq.h>
46#include <asm/mach/time.h>
5cbad0eb 47#include <asm/traps.h>
bff595c1 48#include <asm/unwind.h>
1da177e4 49
73a65b3f 50#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
0fc1c832 51#include "compat.h"
73a65b3f 52#endif
4cd9d6f7 53#include "atags.h"
bc581770 54#include "tcm.h"
0fc1c832 55
1da177e4
LT
56#ifndef MEM_SIZE
57#define MEM_SIZE (16*1024*1024)
58#endif
59
60#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
61char fpe_type[8];
62
63static int __init fpe_setup(char *line)
64{
65 memcpy(fpe_type, line, 8);
66 return 1;
67}
68
69__setup("fpe=", fpe_setup);
70#endif
71
4b5f32ce 72extern void paging_init(struct machine_desc *desc);
1da177e4 73extern void reboot_setup(char *str);
1da177e4
LT
74
75unsigned int processor_id;
c18f6581 76EXPORT_SYMBOL(processor_id);
1da177e4
LT
77unsigned int __machine_arch_type;
78EXPORT_SYMBOL(__machine_arch_type);
c0e95878
RK
79unsigned int cacheid;
80EXPORT_SYMBOL(cacheid);
1da177e4 81
9d20fdd5
BG
82unsigned int __atags_pointer __initdata;
83
1da177e4
LT
84unsigned int system_rev;
85EXPORT_SYMBOL(system_rev);
86
87unsigned int system_serial_low;
88EXPORT_SYMBOL(system_serial_low);
89
90unsigned int system_serial_high;
91EXPORT_SYMBOL(system_serial_high);
92
93unsigned int elf_hwcap;
94EXPORT_SYMBOL(elf_hwcap);
95
96
97#ifdef MULTI_CPU
98struct processor processor;
99#endif
100#ifdef MULTI_TLB
101struct cpu_tlb_fns cpu_tlb;
102#endif
103#ifdef MULTI_USER
104struct cpu_user_fns cpu_user;
105#endif
106#ifdef MULTI_CACHE
107struct cpu_cache_fns cpu_cache;
108#endif
953233dc
CM
109#ifdef CONFIG_OUTER_CACHE
110struct outer_cache_fns outer_cache;
6c09f09d 111EXPORT_SYMBOL(outer_cache);
953233dc 112#endif
1da177e4 113
ccea7a19
RK
114struct stack {
115 u32 irq[3];
116 u32 abt[3];
117 u32 und[3];
118} ____cacheline_aligned;
119
120static struct stack stacks[NR_CPUS];
121
1da177e4
LT
122char elf_platform[ELF_PLATFORM_SIZE];
123EXPORT_SYMBOL(elf_platform);
124
1da177e4
LT
125static const char *cpu_name;
126static const char *machine_name;
48ab7e09 127static char __initdata cmd_line[COMMAND_LINE_SIZE];
1da177e4
LT
128
129static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
130static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
131#define ENDIANNESS ((char)endian_test.l)
132
133DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
134
135/*
136 * Standard memory resources
137 */
138static struct resource mem_res[] = {
740e518e
GKH
139 {
140 .name = "Video RAM",
141 .start = 0,
142 .end = 0,
143 .flags = IORESOURCE_MEM
144 },
145 {
146 .name = "Kernel text",
147 .start = 0,
148 .end = 0,
149 .flags = IORESOURCE_MEM
150 },
151 {
152 .name = "Kernel data",
153 .start = 0,
154 .end = 0,
155 .flags = IORESOURCE_MEM
156 }
1da177e4
LT
157};
158
159#define video_ram mem_res[0]
160#define kernel_code mem_res[1]
161#define kernel_data mem_res[2]
162
163static struct resource io_res[] = {
740e518e
GKH
164 {
165 .name = "reserved",
166 .start = 0x3bc,
167 .end = 0x3be,
168 .flags = IORESOURCE_IO | IORESOURCE_BUSY
169 },
170 {
171 .name = "reserved",
172 .start = 0x378,
173 .end = 0x37f,
174 .flags = IORESOURCE_IO | IORESOURCE_BUSY
175 },
176 {
177 .name = "reserved",
178 .start = 0x278,
179 .end = 0x27f,
180 .flags = IORESOURCE_IO | IORESOURCE_BUSY
181 }
1da177e4
LT
182};
183
184#define lp0 io_res[0]
185#define lp1 io_res[1]
186#define lp2 io_res[2]
187
1da177e4
LT
188static const char *proc_arch[] = {
189 "undefined/unknown",
190 "3",
191 "4",
192 "4T",
193 "5",
194 "5T",
195 "5TE",
196 "5TEJ",
197 "6TEJ",
6b090a25 198 "7",
1da177e4
LT
199 "?(11)",
200 "?(12)",
201 "?(13)",
202 "?(14)",
203 "?(15)",
204 "?(16)",
205 "?(17)",
206};
207
1da177e4
LT
208int cpu_architecture(void)
209{
210 int cpu_arch;
211
0ba8b9b2 212 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 213 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
214 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
215 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
216 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
217 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
218 if (cpu_arch)
219 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 220 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
221 unsigned int mmfr0;
222
223 /* Revised CPUID format. Read the Memory Model Feature
224 * Register 0 and check for VMSAv7 or PMSAv7 */
225 asm("mrc p15, 0, %0, c0, c1, 4"
226 : "=r" (mmfr0));
227 if ((mmfr0 & 0x0000000f) == 0x00000003 ||
228 (mmfr0 & 0x000000f0) == 0x00000030)
229 cpu_arch = CPU_ARCH_ARMv7;
230 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
231 (mmfr0 & 0x000000f0) == 0x00000020)
232 cpu_arch = CPU_ARCH_ARMv6;
233 else
234 cpu_arch = CPU_ARCH_UNKNOWN;
235 } else
236 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
237
238 return cpu_arch;
239}
240
c0e95878
RK
241static void __init cacheid_init(void)
242{
243 unsigned int cachetype = read_cpuid_cachetype();
244 unsigned int arch = cpu_architecture();
245
b57ee99f
CM
246 if (arch >= CPU_ARCH_ARMv6) {
247 if ((cachetype & (7 << 29)) == 4 << 29) {
248 /* ARMv7 register format */
249 cacheid = CACHEID_VIPT_NONALIASING;
250 if ((cachetype & (3 << 14)) == 1 << 14)
251 cacheid |= CACHEID_ASID_TAGGED;
252 } else if (cachetype & (1 << 23))
c0e95878
RK
253 cacheid = CACHEID_VIPT_ALIASING;
254 else
255 cacheid = CACHEID_VIPT_NONALIASING;
256 } else {
257 cacheid = CACHEID_VIVT;
258 }
2b4ae1f1
RK
259
260 printk("CPU: %s data cache, %s instruction cache\n",
261 cache_is_vivt() ? "VIVT" :
262 cache_is_vipt_aliasing() ? "VIPT aliasing" :
263 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
264 cache_is_vivt() ? "VIVT" :
265 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
266 cache_is_vipt_aliasing() ? "VIPT aliasing" :
267 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
268}
269
1da177e4
LT
270/*
271 * These functions re-use the assembly code in head.S, which
272 * already provide the required functionality.
273 */
0f44ba1d 274extern struct proc_info_list *lookup_processor_type(unsigned int);
1da177e4
LT
275extern struct machine_desc *lookup_machine_type(unsigned int);
276
f159f4ed
TL
277static void __init feat_v6_fixup(void)
278{
279 int id = read_cpuid_id();
280
281 if ((id & 0xff0f0000) != 0x41070000)
282 return;
283
284 /*
285 * HWCAP_TLS is available only on 1136 r1p0 and later,
286 * see also kuser_get_tls_init.
287 */
288 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
289 elf_hwcap &= ~HWCAP_TLS;
290}
291
1da177e4
LT
292static void __init setup_processor(void)
293{
294 struct proc_info_list *list;
295
296 /*
297 * locate processor in the list of supported processor
298 * types. The linker builds this table for us from the
299 * entries in arch/arm/mm/proc-*.S
300 */
0ba8b9b2 301 list = lookup_processor_type(read_cpuid_id());
1da177e4
LT
302 if (!list) {
303 printk("CPU configuration botched (ID %08x), unable "
0ba8b9b2 304 "to continue.\n", read_cpuid_id());
1da177e4
LT
305 while (1);
306 }
307
308 cpu_name = list->cpu_name;
309
310#ifdef MULTI_CPU
311 processor = *list->proc;
312#endif
313#ifdef MULTI_TLB
314 cpu_tlb = *list->tlb;
315#endif
316#ifdef MULTI_USER
317 cpu_user = *list->user;
318#endif
319#ifdef MULTI_CACHE
320 cpu_cache = *list->cache;
321#endif
322
4e19025b 323 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
0ba8b9b2 324 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
264edb35 325 proc_arch[cpu_architecture()], cr_alignment);
1da177e4 326
96b644bd 327 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
1da177e4
LT
328 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
329 elf_hwcap = list->elf_hwcap;
adeff422
CM
330#ifndef CONFIG_ARM_THUMB
331 elf_hwcap &= ~HWCAP_THUMB;
332#endif
1da177e4 333
f159f4ed
TL
334 feat_v6_fixup();
335
c0e95878 336 cacheid_init();
1da177e4
LT
337 cpu_proc_init();
338}
339
ccea7a19
RK
340/*
341 * cpu_init - initialise one CPU.
342 *
90f1e084 343 * cpu_init sets up the per-CPU stacks.
ccea7a19 344 */
36c5ed23 345void cpu_init(void)
ccea7a19
RK
346{
347 unsigned int cpu = smp_processor_id();
348 struct stack *stk = &stacks[cpu];
349
350 if (cpu >= NR_CPUS) {
351 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
352 BUG();
353 }
354
b86040a5
CM
355 /*
356 * Define the placement constraint for the inline asm directive below.
357 * In Thumb-2, msr with an immediate value is not allowed.
358 */
359#ifdef CONFIG_THUMB2_KERNEL
360#define PLC "r"
361#else
362#define PLC "I"
363#endif
364
ccea7a19
RK
365 /*
366 * setup stacks for re-entrant exception handlers
367 */
368 __asm__ (
369 "msr cpsr_c, %1\n\t"
b86040a5
CM
370 "add r14, %0, %2\n\t"
371 "mov sp, r14\n\t"
ccea7a19 372 "msr cpsr_c, %3\n\t"
b86040a5
CM
373 "add r14, %0, %4\n\t"
374 "mov sp, r14\n\t"
ccea7a19 375 "msr cpsr_c, %5\n\t"
b86040a5
CM
376 "add r14, %0, %6\n\t"
377 "mov sp, r14\n\t"
ccea7a19
RK
378 "msr cpsr_c, %7"
379 :
380 : "r" (stk),
b86040a5 381 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 382 "I" (offsetof(struct stack, irq[0])),
b86040a5 383 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 384 "I" (offsetof(struct stack, abt[0])),
b86040a5 385 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 386 "I" (offsetof(struct stack, und[0])),
b86040a5 387 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 388 : "r14");
ccea7a19
RK
389}
390
1da177e4
LT
391static struct machine_desc * __init setup_machine(unsigned int nr)
392{
393 struct machine_desc *list;
394
395 /*
396 * locate machine in the list of supported machines.
397 */
398 list = lookup_machine_type(nr);
399 if (!list) {
400 printk("Machine configuration botched (nr %d), unable "
401 "to continue.\n", nr);
402 while (1);
403 }
404
405 printk("Machine: %s\n", list->name);
406
407 return list;
408}
409
4b5f32ce 410static int __init arm_add_memory(unsigned long start, unsigned long size)
3a669411 411{
4b5f32ce
NP
412 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
413
414 if (meminfo.nr_banks >= NR_BANKS) {
415 printk(KERN_CRIT "NR_BANKS too low, "
416 "ignoring memory at %#lx\n", start);
417 return -EINVAL;
418 }
05f96ef1 419
3a669411
RK
420 /*
421 * Ensure that start/size are aligned to a page boundary.
422 * Size is appropriately rounded down, start is rounded up.
423 */
424 size -= start & ~PAGE_MASK;
05f96ef1
RK
425 bank->start = PAGE_ALIGN(start);
426 bank->size = size & PAGE_MASK;
4b5f32ce
NP
427
428 /*
429 * Check whether this memory region has non-zero size or
430 * invalid node number.
431 */
be370302 432 if (bank->size == 0)
4b5f32ce
NP
433 return -EINVAL;
434
435 meminfo.nr_banks++;
436 return 0;
3a669411
RK
437}
438
1da177e4
LT
439/*
440 * Pick out the memory size. We look for mem=size@start,
441 * where start and size are "size[KkMm]"
442 */
2b0d8c25 443static int __init early_mem(char *p)
1da177e4
LT
444{
445 static int usermem __initdata = 0;
446 unsigned long size, start;
2b0d8c25 447 char *endp;
1da177e4
LT
448
449 /*
450 * If the user specifies memory size, we
451 * blow away any automatically generated
452 * size.
453 */
454 if (usermem == 0) {
455 usermem = 1;
456 meminfo.nr_banks = 0;
457 }
458
459 start = PHYS_OFFSET;
2b0d8c25
JK
460 size = memparse(p, &endp);
461 if (*endp == '@')
462 start = memparse(endp + 1, NULL);
1da177e4 463
1c97b73e 464 arm_add_memory(start, size);
1da177e4 465
2b0d8c25 466 return 0;
1da177e4 467}
2b0d8c25 468early_param("mem", early_mem);
1da177e4
LT
469
470static void __init
471setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
472{
473#ifdef CONFIG_BLK_DEV_RAM
474 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
475
476 rd_image_start = image_start;
477 rd_prompt = prompt;
478 rd_doload = doload;
479
480 if (rd_sz)
481 rd_size = rd_sz;
482#endif
483}
484
485static void __init
486request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
487{
488 struct resource *res;
489 int i;
490
37efe642
RK
491 kernel_code.start = virt_to_phys(_text);
492 kernel_code.end = virt_to_phys(_etext - 1);
493 kernel_data.start = virt_to_phys(_data);
494 kernel_data.end = virt_to_phys(_end - 1);
1da177e4
LT
495
496 for (i = 0; i < mi->nr_banks; i++) {
1da177e4
LT
497 if (mi->bank[i].size == 0)
498 continue;
499
1da177e4
LT
500 res = alloc_bootmem_low(sizeof(*res));
501 res->name = "System RAM";
3319f5e5
NP
502 res->start = mi->bank[i].start;
503 res->end = mi->bank[i].start + mi->bank[i].size - 1;
1da177e4
LT
504 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
505
506 request_resource(&iomem_resource, res);
507
508 if (kernel_code.start >= res->start &&
509 kernel_code.end <= res->end)
510 request_resource(res, &kernel_code);
511 if (kernel_data.start >= res->start &&
512 kernel_data.end <= res->end)
513 request_resource(res, &kernel_data);
514 }
515
516 if (mdesc->video_start) {
517 video_ram.start = mdesc->video_start;
518 video_ram.end = mdesc->video_end;
519 request_resource(&iomem_resource, &video_ram);
520 }
521
522 /*
523 * Some machines don't have the possibility of ever
524 * possessing lp0, lp1 or lp2
525 */
526 if (mdesc->reserve_lp0)
527 request_resource(&ioport_resource, &lp0);
528 if (mdesc->reserve_lp1)
529 request_resource(&ioport_resource, &lp1);
530 if (mdesc->reserve_lp2)
531 request_resource(&ioport_resource, &lp2);
532}
533
534/*
535 * Tag parsing.
536 *
537 * This is the new way of passing data to the kernel at boot time. Rather
538 * than passing a fixed inflexible structure to the kernel, we pass a list
539 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
540 * tag for the list to be recognised (to distinguish the tagged list from
541 * a param_struct). The list is terminated with a zero-length tag (this tag
542 * is not parsed in any way).
543 */
544static int __init parse_tag_core(const struct tag *tag)
545{
546 if (tag->hdr.size > 2) {
547 if ((tag->u.core.flags & 1) == 0)
548 root_mountflags &= ~MS_RDONLY;
549 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
550 }
551 return 0;
552}
553
554__tagtable(ATAG_CORE, parse_tag_core);
555
556static int __init parse_tag_mem32(const struct tag *tag)
557{
4b5f32ce 558 return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
1da177e4
LT
559}
560
561__tagtable(ATAG_MEM, parse_tag_mem32);
562
563#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
564struct screen_info screen_info = {
565 .orig_video_lines = 30,
566 .orig_video_cols = 80,
567 .orig_video_mode = 0,
568 .orig_video_ega_bx = 0,
569 .orig_video_isVGA = 1,
570 .orig_video_points = 8
571};
572
573static int __init parse_tag_videotext(const struct tag *tag)
574{
575 screen_info.orig_x = tag->u.videotext.x;
576 screen_info.orig_y = tag->u.videotext.y;
577 screen_info.orig_video_page = tag->u.videotext.video_page;
578 screen_info.orig_video_mode = tag->u.videotext.video_mode;
579 screen_info.orig_video_cols = tag->u.videotext.video_cols;
580 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
581 screen_info.orig_video_lines = tag->u.videotext.video_lines;
582 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
583 screen_info.orig_video_points = tag->u.videotext.video_points;
584 return 0;
585}
586
587__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
588#endif
589
590static int __init parse_tag_ramdisk(const struct tag *tag)
591{
592 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
593 (tag->u.ramdisk.flags & 2) == 0,
594 tag->u.ramdisk.start, tag->u.ramdisk.size);
595 return 0;
596}
597
598__tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
599
1da177e4
LT
600static int __init parse_tag_serialnr(const struct tag *tag)
601{
602 system_serial_low = tag->u.serialnr.low;
603 system_serial_high = tag->u.serialnr.high;
604 return 0;
605}
606
607__tagtable(ATAG_SERIAL, parse_tag_serialnr);
608
609static int __init parse_tag_revision(const struct tag *tag)
610{
611 system_rev = tag->u.revision.rev;
612 return 0;
613}
614
615__tagtable(ATAG_REVISION, parse_tag_revision);
616
92d2040d 617#ifndef CONFIG_CMDLINE_FORCE
1da177e4
LT
618static int __init parse_tag_cmdline(const struct tag *tag)
619{
620 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
621 return 0;
622}
623
624__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
92d2040d 625#endif /* CONFIG_CMDLINE_FORCE */
1da177e4
LT
626
627/*
628 * Scan the tag table for this tag, and call its parse function.
629 * The tag table is built by the linker from all the __tagtable
630 * declarations.
631 */
632static int __init parse_tag(const struct tag *tag)
633{
634 extern struct tagtable __tagtable_begin, __tagtable_end;
635 struct tagtable *t;
636
637 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
638 if (tag->hdr.tag == t->tag) {
639 t->parse(tag);
640 break;
641 }
642
643 return t < &__tagtable_end;
644}
645
646/*
647 * Parse all tags in the list, checking both the global and architecture
648 * specific tag tables.
649 */
650static void __init parse_tags(const struct tag *t)
651{
652 for (; t->hdr.size; t = tag_next(t))
653 if (!parse_tag(t))
654 printk(KERN_WARNING
655 "Ignoring unrecognised tag 0x%08x\n",
656 t->hdr.tag);
657}
658
659/*
660 * This holds our defaults.
661 */
662static struct init_tags {
663 struct tag_header hdr1;
664 struct tag_core core;
665 struct tag_header hdr2;
666 struct tag_mem32 mem;
667 struct tag_header hdr3;
668} init_tags __initdata = {
669 { tag_size(tag_core), ATAG_CORE },
670 { 1, PAGE_SIZE, 0xff },
671 { tag_size(tag_mem32), ATAG_MEM },
672 { MEM_SIZE, PHYS_OFFSET },
673 { 0, ATAG_NONE }
674};
675
676static void (*init_machine)(void) __initdata;
677
678static int __init customize_machine(void)
679{
680 /* customizes platform devices, or adds new ones */
681 if (init_machine)
682 init_machine();
683 return 0;
684}
685arch_initcall(customize_machine);
686
3c57fb43
MW
687#ifdef CONFIG_KEXEC
688static inline unsigned long long get_total_mem(void)
689{
690 unsigned long total;
691
692 total = max_low_pfn - min_low_pfn;
693 return total << PAGE_SHIFT;
694}
695
696/**
697 * reserve_crashkernel() - reserves memory are for crash kernel
698 *
699 * This function reserves memory area given in "crashkernel=" kernel command
700 * line parameter. The memory reserved is used by a dump capture kernel when
701 * primary kernel is crashing.
702 */
703static void __init reserve_crashkernel(void)
704{
705 unsigned long long crash_size, crash_base;
706 unsigned long long total_mem;
707 int ret;
708
709 total_mem = get_total_mem();
710 ret = parse_crashkernel(boot_command_line, total_mem,
711 &crash_size, &crash_base);
712 if (ret)
713 return;
714
715 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
716 if (ret < 0) {
717 printk(KERN_WARNING "crashkernel reservation failed - "
718 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
719 return;
720 }
721
722 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
723 "for crashkernel (System RAM: %ldMB)\n",
724 (unsigned long)(crash_size >> 20),
725 (unsigned long)(crash_base >> 20),
726 (unsigned long)(total_mem >> 20));
727
728 crashk_res.start = crash_base;
729 crashk_res.end = crash_base + crash_size - 1;
730 insert_resource(&iomem_resource, &crashk_res);
731}
732#else
733static inline void reserve_crashkernel(void) {}
734#endif /* CONFIG_KEXEC */
735
cea0bb1b
MW
736/*
737 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
738 * is_kdump_kernel() to determine if we are booting after a panic. Hence
739 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
740 */
741
742#ifdef CONFIG_CRASH_DUMP
743/*
744 * elfcorehdr= specifies the location of elf core header stored by the crashed
745 * kernel. This option will be passed by kexec loader to the capture kernel.
746 */
747static int __init setup_elfcorehdr(char *arg)
748{
749 char *end;
750
751 if (!arg)
752 return -EINVAL;
753
754 elfcorehdr_addr = memparse(arg, &end);
755 return end > arg ? 0 : -EINVAL;
756}
757early_param("elfcorehdr", setup_elfcorehdr);
758#endif /* CONFIG_CRASH_DUMP */
759
73a65b3f
UKK
760static void __init squash_mem_tags(struct tag *tag)
761{
762 for (; tag->hdr.size; tag = tag_next(tag))
763 if (tag->hdr.tag == ATAG_MEM)
764 tag->hdr.tag = ATAG_NONE;
765}
766
1da177e4
LT
767void __init setup_arch(char **cmdline_p)
768{
769 struct tag *tags = (struct tag *)&init_tags;
770 struct machine_desc *mdesc;
771 char *from = default_command_line;
772
bff595c1
CM
773 unwind_init();
774
1da177e4
LT
775 setup_processor();
776 mdesc = setup_machine(machine_arch_type);
777 machine_name = mdesc->name;
778
779 if (mdesc->soft_reboot)
780 reboot_setup("s");
781
9d20fdd5
BG
782 if (__atags_pointer)
783 tags = phys_to_virt(__atags_pointer);
784 else if (mdesc->boot_params)
f9bd6ea4 785 tags = phys_to_virt(mdesc->boot_params);
1da177e4 786
73a65b3f 787#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
1da177e4
LT
788 /*
789 * If we have the old style parameters, convert them to
790 * a tag list.
791 */
792 if (tags->hdr.tag != ATAG_CORE)
793 convert_to_tag_list(tags);
73a65b3f 794#endif
1da177e4
LT
795 if (tags->hdr.tag != ATAG_CORE)
796 tags = (struct tag *)&init_tags;
797
798 if (mdesc->fixup)
799 mdesc->fixup(mdesc, tags, &from, &meminfo);
800
801 if (tags->hdr.tag == ATAG_CORE) {
802 if (meminfo.nr_banks != 0)
803 squash_mem_tags(tags);
4cd9d6f7 804 save_atags(tags);
1da177e4
LT
805 parse_tags(tags);
806 }
807
37efe642
RK
808 init_mm.start_code = (unsigned long) _text;
809 init_mm.end_code = (unsigned long) _etext;
810 init_mm.end_data = (unsigned long) _edata;
811 init_mm.brk = (unsigned long) _end;
1da177e4 812
2b0d8c25
JK
813 /* parse_early_param needs a boot_command_line */
814 strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
815
48ab7e09
JK
816 /* populate cmd_line too for later use, preserving boot_command_line */
817 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
818 *cmdline_p = cmd_line;
2b0d8c25
JK
819
820 parse_early_param();
821
8d717a52 822 arm_memblock_init(&meminfo, mdesc);
2778f620 823
4b5f32ce 824 paging_init(mdesc);
1da177e4
LT
825 request_standard_resources(&meminfo, mdesc);
826
7bbb7940
RK
827#ifdef CONFIG_SMP
828 smp_init_cpus();
829#endif
3c57fb43 830 reserve_crashkernel();
7bbb7940 831
ccea7a19 832 cpu_init();
bc581770 833 tcm_init();
ccea7a19 834
1da177e4
LT
835 /*
836 * Set up various architecture-specific pointers
837 */
354e6f72 838 arch_nr_irqs = mdesc->nr_irqs;
1da177e4
LT
839 init_arch_irq = mdesc->init_irq;
840 system_timer = mdesc->timer;
841 init_machine = mdesc->init_machine;
842
843#ifdef CONFIG_VT
844#if defined(CONFIG_VGA_CONSOLE)
845 conswitchp = &vga_con;
846#elif defined(CONFIG_DUMMY_CONSOLE)
847 conswitchp = &dummy_con;
848#endif
849#endif
5cbad0eb 850 early_trap_init();
1da177e4
LT
851}
852
853
854static int __init topology_init(void)
855{
856 int cpu;
857
66fb8bd2
RK
858 for_each_possible_cpu(cpu) {
859 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
860 cpuinfo->cpu.hotpluggable = 1;
861 register_cpu(&cpuinfo->cpu, cpu);
862 }
1da177e4
LT
863
864 return 0;
865}
1da177e4
LT
866subsys_initcall(topology_init);
867
e119bfff
RK
868#ifdef CONFIG_HAVE_PROC_CPU
869static int __init proc_cpu_init(void)
870{
871 struct proc_dir_entry *res;
872
873 res = proc_mkdir("cpu", NULL);
874 if (!res)
875 return -ENOMEM;
876 return 0;
877}
878fs_initcall(proc_cpu_init);
879#endif
880
1da177e4
LT
881static const char *hwcap_str[] = {
882 "swp",
883 "half",
884 "thumb",
885 "26bit",
886 "fastmult",
887 "fpa",
888 "vfp",
889 "edsp",
890 "java",
8f7f9435 891 "iwmmxt",
99e4a6dd 892 "crunch",
4369ae16 893 "thumbee",
2bedbdf4 894 "neon",
7279dc3e
CM
895 "vfpv3",
896 "vfpv3d16",
1da177e4
LT
897 NULL
898};
899
1da177e4
LT
900static int c_show(struct seq_file *m, void *v)
901{
902 int i;
903
904 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
0ba8b9b2 905 cpu_name, read_cpuid_id() & 15, elf_platform);
1da177e4
LT
906
907#if defined(CONFIG_SMP)
908 for_each_online_cpu(i) {
15559722
RK
909 /*
910 * glibc reads /proc/cpuinfo to determine the number of
911 * online processors, looking for lines beginning with
912 * "processor". Give glibc what it expects.
913 */
914 seq_printf(m, "processor\t: %d\n", i);
1da177e4
LT
915 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
916 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
917 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
918 }
919#else /* CONFIG_SMP */
920 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
921 loops_per_jiffy / (500000/HZ),
922 (loops_per_jiffy / (5000/HZ)) % 100);
923#endif
924
925 /* dump out the processor features */
926 seq_puts(m, "Features\t: ");
927
928 for (i = 0; hwcap_str[i]; i++)
929 if (elf_hwcap & (1 << i))
930 seq_printf(m, "%s ", hwcap_str[i]);
931
0ba8b9b2 932 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1da177e4
LT
933 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
934
0ba8b9b2 935 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1da177e4 936 /* pre-ARM7 */
0ba8b9b2 937 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1da177e4 938 } else {
0ba8b9b2 939 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1da177e4
LT
940 /* ARM7 */
941 seq_printf(m, "CPU variant\t: 0x%02x\n",
0ba8b9b2 942 (read_cpuid_id() >> 16) & 127);
1da177e4
LT
943 } else {
944 /* post-ARM7 */
945 seq_printf(m, "CPU variant\t: 0x%x\n",
0ba8b9b2 946 (read_cpuid_id() >> 20) & 15);
1da177e4
LT
947 }
948 seq_printf(m, "CPU part\t: 0x%03x\n",
0ba8b9b2 949 (read_cpuid_id() >> 4) & 0xfff);
1da177e4 950 }
0ba8b9b2 951 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1da177e4 952
1da177e4
LT
953 seq_puts(m, "\n");
954
955 seq_printf(m, "Hardware\t: %s\n", machine_name);
956 seq_printf(m, "Revision\t: %04x\n", system_rev);
957 seq_printf(m, "Serial\t\t: %08x%08x\n",
958 system_serial_high, system_serial_low);
959
960 return 0;
961}
962
963static void *c_start(struct seq_file *m, loff_t *pos)
964{
965 return *pos < 1 ? (void *)1 : NULL;
966}
967
968static void *c_next(struct seq_file *m, void *v, loff_t *pos)
969{
970 ++*pos;
971 return NULL;
972}
973
974static void c_stop(struct seq_file *m, void *v)
975{
976}
977
2ffd6e18 978const struct seq_operations cpuinfo_op = {
1da177e4
LT
979 .start = c_start,
980 .next = c_next,
981 .stop = c_stop,
982 .show = c_show
983};