powerpc/prom: early_init_dt_scan_cpus() updates cpu features only once
[linux-2.6-block.git] / arch / powerpc / kernel / setup_64.c
CommitLineData
40ef8cbc
PM
1/*
2 *
3 * Common boot and setup code.
4 *
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
7191b615 13#define DEBUG
40ef8cbc 14
4b16f8e2 15#include <linux/export.h>
40ef8cbc
PM
16#include <linux/string.h>
17#include <linux/sched.h>
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <linux/reboot.h>
21#include <linux/delay.h>
22#include <linux/initrd.h>
40ef8cbc
PM
23#include <linux/seq_file.h>
24#include <linux/ioport.h>
25#include <linux/console.h>
26#include <linux/utsname.h>
27#include <linux/tty.h>
28#include <linux/root_dev.h>
29#include <linux/notifier.h>
30#include <linux/cpu.h>
31#include <linux/unistd.h>
32#include <linux/serial.h>
33#include <linux/serial_8250.h>
7a0268fa 34#include <linux/bootmem.h>
12d04eef 35#include <linux/pci.h>
945feb17 36#include <linux/lockdep.h>
95f72d1e 37#include <linux/memblock.h>
a6146888
BB
38#include <linux/hugetlb.h>
39
40ef8cbc 40#include <asm/io.h>
0cc4746c 41#include <asm/kdump.h>
40ef8cbc
PM
42#include <asm/prom.h>
43#include <asm/processor.h>
44#include <asm/pgtable.h>
40ef8cbc
PM
45#include <asm/smp.h>
46#include <asm/elf.h>
47#include <asm/machdep.h>
48#include <asm/paca.h>
40ef8cbc
PM
49#include <asm/time.h>
50#include <asm/cputable.h>
51#include <asm/sections.h>
52#include <asm/btext.h>
53#include <asm/nvram.h>
54#include <asm/setup.h>
40ef8cbc
PM
55#include <asm/rtas.h>
56#include <asm/iommu.h>
57#include <asm/serial.h>
58#include <asm/cache.h>
59#include <asm/page.h>
60#include <asm/mmu.h>
40ef8cbc 61#include <asm/firmware.h>
f78541dc 62#include <asm/xmon.h>
dcad47fc 63#include <asm/udbg.h>
593e537b 64#include <asm/kexec.h>
25d21ad6 65#include <asm/mmu_context.h>
d36b4c4f 66#include <asm/code-patching.h>
aa04b4cc 67#include <asm/kvm_ppc.h>
a6146888 68#include <asm/hugetlb.h>
4e21b94c 69#include <asm/epapr_hcalls.h>
40ef8cbc
PM
70
71#ifdef DEBUG
72#define DBG(fmt...) udbg_printf(fmt)
73#else
74#define DBG(fmt...)
75#endif
76
8246aca7 77int spinning_secondaries;
40ef8cbc
PM
78u64 ppc64_pft_size;
79
dabcafd3
OJ
80/* Pick defaults since we might want to patch instructions
81 * before we've read this from the device tree.
82 */
83struct ppc64_caches ppc64_caches = {
5a2fe38d
OJ
84 .dline_size = 0x40,
85 .log_dline_size = 6,
86 .iline_size = 0x40,
87 .log_iline_size = 6
dabcafd3 88};
40ef8cbc
PM
89EXPORT_SYMBOL_GPL(ppc64_caches);
90
91/*
92 * These are used in binfmt_elf.c to put aux entries on the stack
93 * for each elf executable being started.
94 */
95int dcache_bsize;
96int icache_bsize;
97int ucache_bsize;
98
28efc35f
SW
99#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
100static void setup_tlb_core_data(void)
101{
102 int cpu;
103
82d86de2
SW
104 BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
105
28efc35f
SW
106 for_each_possible_cpu(cpu) {
107 int first = cpu_first_thread_sibling(cpu);
108
109 paca[cpu].tcd_ptr = &paca[first].tcd;
110
111 /*
112 * If we have threads, we need either tlbsrx.
113 * or e6500 tablewalk mode, or else TLB handlers
114 * will be racy and could produce duplicate entries.
115 */
116 if (smt_enabled_at_boot >= 2 &&
117 !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
118 book3e_htw_mode != PPC_HTW_E6500) {
119 /* Should we panic instead? */
120 WARN_ONCE("%s: unsupported MMU configuration -- expect problems\n",
121 __func__);
122 }
123 }
124}
125#else
126static void setup_tlb_core_data(void)
127{
128}
129#endif
130
40ef8cbc
PM
131#ifdef CONFIG_SMP
132
954e6da5 133static char *smt_enabled_cmdline;
40ef8cbc
PM
134
135/* Look for ibm,smt-enabled OF option */
136static void check_smt_enabled(void)
137{
138 struct device_node *dn;
a7f67bdf 139 const char *smt_option;
40ef8cbc 140
954e6da5
NF
141 /* Default to enabling all threads */
142 smt_enabled_at_boot = threads_per_core;
40ef8cbc 143
954e6da5
NF
144 /* Allow the command line to overrule the OF option */
145 if (smt_enabled_cmdline) {
146 if (!strcmp(smt_enabled_cmdline, "on"))
147 smt_enabled_at_boot = threads_per_core;
148 else if (!strcmp(smt_enabled_cmdline, "off"))
149 smt_enabled_at_boot = 0;
150 else {
151 long smt;
152 int rc;
153
154 rc = strict_strtol(smt_enabled_cmdline, 10, &smt);
155 if (!rc)
156 smt_enabled_at_boot =
157 min(threads_per_core, (int)smt);
158 }
159 } else {
160 dn = of_find_node_by_path("/options");
161 if (dn) {
162 smt_option = of_get_property(dn, "ibm,smt-enabled",
163 NULL);
164
165 if (smt_option) {
166 if (!strcmp(smt_option, "on"))
167 smt_enabled_at_boot = threads_per_core;
168 else if (!strcmp(smt_option, "off"))
169 smt_enabled_at_boot = 0;
170 }
171
172 of_node_put(dn);
173 }
174 }
40ef8cbc
PM
175}
176
177/* Look for smt-enabled= cmdline option */
178static int __init early_smt_enabled(char *p)
179{
954e6da5 180 smt_enabled_cmdline = p;
40ef8cbc
PM
181 return 0;
182}
183early_param("smt-enabled", early_smt_enabled);
184
5ad57078
PM
185#else
186#define check_smt_enabled()
40ef8cbc
PM
187#endif /* CONFIG_SMP */
188
25e13814
ME
189/** Fix up paca fields required for the boot cpu */
190static void fixup_boot_paca(void)
191{
192 /* The boot cpu is started */
193 get_paca()->cpu_start = 1;
194 /* Allow percpu accesses to work until we setup percpu data */
195 get_paca()->data_offset = 0;
196}
197
40ef8cbc
PM
198/*
199 * Early initialization entry point. This is called by head.S
200 * with MMU translation disabled. We rely on the "feature" of
201 * the CPU that ignores the top 2 bits of the address in real
202 * mode so we can access kernel globals normally provided we
203 * only toy with things in the RMO region. From here, we do
95f72d1e 204 * some early parsing of the device-tree to setup out MEMBLOCK
40ef8cbc
PM
205 * data structures, and allocate & initialize the hash table
206 * and segment tables so we can start running with translation
207 * enabled.
208 *
209 * It is this function which will call the probe() callback of
210 * the various platform types and copy the matching one to the
211 * global ppc_md structure. Your platform can eventually do
212 * some very early initializations from the probe() routine, but
213 * this is not recommended, be very careful as, for example, the
214 * device-tree is not accessible via normal means at this point.
215 */
216
217void __init early_setup(unsigned long dt_ptr)
218{
6a7e4064
GL
219 static __initdata struct paca_struct boot_paca;
220
24d96495
BH
221 /* -------- printk is _NOT_ safe to use here ! ------- */
222
42c4aaad 223 /* Identify CPU type */
974a76f5 224 identify_cpu(0, mfspr(SPRN_PVR));
42c4aaad 225
33dbcf72 226 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
1426d5a3
ME
227 initialise_paca(&boot_paca, 0);
228 setup_paca(&boot_paca);
25e13814 229 fixup_boot_paca();
33dbcf72 230
945feb17
BH
231 /* Initialize lockdep early or else spinlocks will blow */
232 lockdep_init();
233
24d96495
BH
234 /* -------- printk is now safe to use ------- */
235
f2fd2513
BH
236 /* Enable early debugging if any specified (see udbg.h) */
237 udbg_early_init();
238
e8222502 239 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
40ef8cbc 240
40ef8cbc 241 /*
3c607ce2
LV
242 * Do early initialization using the flattened device
243 * tree, such as retrieving the physical memory map or
244 * calculating/retrieving the hash table size.
40ef8cbc
PM
245 */
246 early_init_devtree(__va(dt_ptr));
247
4e21b94c
LT
248 epapr_paravirt_early_init();
249
4df20460 250 /* Now we know the logical id of our boot cpu, setup the paca. */
1426d5a3 251 setup_paca(&paca[boot_cpuid]);
25e13814 252 fixup_boot_paca();
4df20460 253
e8222502
BH
254 /* Probe the machine type */
255 probe_machine();
40ef8cbc 256
47310413 257 setup_kdump_trampoline();
0cc4746c 258
40ef8cbc
PM
259 DBG("Found, Initializing memory management...\n");
260
757c74d2
BH
261 /* Initialize the hash table or TLB handling */
262 early_init_mmu();
40ef8cbc 263
fa61a4e3
AK
264 kvm_cma_reserve();
265
a6146888
BB
266 /*
267 * Reserve any gigantic pages requested on the command line.
268 * memblock needs to have been initialized by the time this is
269 * called since this will reserve memory.
270 */
271 reserve_hugetlb_gpages();
272
40ef8cbc 273 DBG(" <- early_setup()\n");
7191b615
BH
274
275#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
276 /*
277 * This needs to be done *last* (after the above DBG() even)
278 *
279 * Right after we return from this function, we turn on the MMU
280 * which means the real-mode access trick that btext does will
281 * no longer work, it needs to switch to using a real MMU
282 * mapping. This call will ensure that it does
283 */
284 btext_map();
285#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
40ef8cbc
PM
286}
287
799d6046
PM
288#ifdef CONFIG_SMP
289void early_setup_secondary(void)
290{
d04c56f7 291 /* Mark interrupts enabled in PACA */
757c74d2 292 get_paca()->soft_enabled = 0;
799d6046 293
757c74d2
BH
294 /* Initialize the hash table or TLB handling */
295 early_init_mmu_secondary();
799d6046
PM
296}
297
298#endif /* CONFIG_SMP */
40ef8cbc 299
b8f51021
ME
300#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
301void smp_release_cpus(void)
302{
758438a7 303 unsigned long *ptr;
9d07bc84 304 int i;
b8f51021
ME
305
306 DBG(" -> smp_release_cpus()\n");
307
308 /* All secondary cpus are spinning on a common spinloop, release them
309 * all now so they can start to spin on their individual paca
310 * spinloops. For non SMP kernels, the secondary cpus never get out
311 * of the common spinloop.
1f6a93e4 312 */
b8f51021 313
758438a7
ME
314 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
315 - PHYSICAL_START);
1f6a93e4 316 *ptr = __pa(generic_secondary_smp_init);
9d07bc84
BH
317
318 /* And wait a bit for them to catch up */
319 for (i = 0; i < 100000; i++) {
320 mb();
321 HMT_low();
7ac87abb 322 if (spinning_secondaries == 0)
9d07bc84
BH
323 break;
324 udelay(1);
325 }
7ac87abb 326 DBG("spinning_secondaries = %d\n", spinning_secondaries);
b8f51021
ME
327
328 DBG(" <- smp_release_cpus()\n");
329}
330#endif /* CONFIG_SMP || CONFIG_KEXEC */
331
40ef8cbc 332/*
799d6046
PM
333 * Initialize some remaining members of the ppc64_caches and systemcfg
334 * structures
40ef8cbc
PM
335 * (at least until we get rid of them completely). This is mostly some
336 * cache informations about the CPU that will be used by cache flush
337 * routines and/or provided to userland
338 */
339static void __init initialize_cache_info(void)
340{
341 struct device_node *np;
342 unsigned long num_cpus = 0;
343
344 DBG(" -> initialize_cache_info()\n");
345
94db7c5e 346 for_each_node_by_type(np, "cpu") {
40ef8cbc
PM
347 num_cpus += 1;
348
dfbe93a2
AB
349 /*
350 * We're assuming *all* of the CPUs have the same
40ef8cbc
PM
351 * d-cache and i-cache sizes... -Peter
352 */
dfbe93a2 353 if (num_cpus == 1) {
7946d5a5 354 const __be32 *sizep, *lsizep;
40ef8cbc 355 u32 size, lsize;
40ef8cbc
PM
356
357 size = 0;
358 lsize = cur_cpu_spec->dcache_bsize;
e2eb6392 359 sizep = of_get_property(np, "d-cache-size", NULL);
40ef8cbc 360 if (sizep != NULL)
7946d5a5 361 size = be32_to_cpu(*sizep);
dfbe93a2
AB
362 lsizep = of_get_property(np, "d-cache-block-size",
363 NULL);
20474abd
BH
364 /* fallback if block size missing */
365 if (lsizep == NULL)
dfbe93a2
AB
366 lsizep = of_get_property(np,
367 "d-cache-line-size",
368 NULL);
40ef8cbc 369 if (lsizep != NULL)
7946d5a5 370 lsize = be32_to_cpu(*lsizep);
b0d436c7 371 if (sizep == NULL || lsizep == NULL)
40ef8cbc
PM
372 DBG("Argh, can't find dcache properties ! "
373 "sizep: %p, lsizep: %p\n", sizep, lsizep);
374
a7f290da
BH
375 ppc64_caches.dsize = size;
376 ppc64_caches.dline_size = lsize;
40ef8cbc
PM
377 ppc64_caches.log_dline_size = __ilog2(lsize);
378 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
379
380 size = 0;
381 lsize = cur_cpu_spec->icache_bsize;
e2eb6392 382 sizep = of_get_property(np, "i-cache-size", NULL);
40ef8cbc 383 if (sizep != NULL)
7946d5a5 384 size = be32_to_cpu(*sizep);
dfbe93a2
AB
385 lsizep = of_get_property(np, "i-cache-block-size",
386 NULL);
20474abd 387 if (lsizep == NULL)
dfbe93a2
AB
388 lsizep = of_get_property(np,
389 "i-cache-line-size",
390 NULL);
40ef8cbc 391 if (lsizep != NULL)
7946d5a5 392 lsize = be32_to_cpu(*lsizep);
b0d436c7 393 if (sizep == NULL || lsizep == NULL)
40ef8cbc
PM
394 DBG("Argh, can't find icache properties ! "
395 "sizep: %p, lsizep: %p\n", sizep, lsizep);
396
a7f290da
BH
397 ppc64_caches.isize = size;
398 ppc64_caches.iline_size = lsize;
40ef8cbc
PM
399 ppc64_caches.log_iline_size = __ilog2(lsize);
400 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
401 }
402 }
403
40ef8cbc
PM
404 DBG(" <- initialize_cache_info()\n");
405}
406
40ef8cbc
PM
407
408/*
409 * Do some initial setup of the system. The parameters are those which
410 * were passed in from the bootloader.
411 */
412void __init setup_system(void)
413{
414 DBG(" -> setup_system()\n");
415
826ea8f2
TB
416 /* Apply the CPUs-specific and firmware specific fixups to kernel
417 * text (nop out sections not relevant to this CPU or this firmware)
42c4aaad 418 */
0909c8c2 419 do_feature_fixups(cur_cpu_spec->cpu_features,
42c4aaad 420 &__start___ftr_fixup, &__stop___ftr_fixup);
7c03d653
BH
421 do_feature_fixups(cur_cpu_spec->mmu_features,
422 &__start___mmu_ftr_fixup, &__stop___mmu_ftr_fixup);
826ea8f2
TB
423 do_feature_fixups(powerpc_firmware_features,
424 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
2d1b2027
KG
425 do_lwsync_fixups(cur_cpu_spec->cpu_features,
426 &__start___lwsync_fixup, &__stop___lwsync_fixup);
d715e433 427 do_final_fixups();
42c4aaad 428
40ef8cbc
PM
429 /*
430 * Unflatten the device-tree passed by prom_init or kexec
431 */
432 unflatten_device_tree();
433
434 /*
435 * Fill the ppc64_caches & systemcfg structures with informations
0ebfff14 436 * retrieved from the device-tree.
40ef8cbc
PM
437 */
438 initialize_cache_info();
439
440#ifdef CONFIG_PPC_RTAS
441 /*
442 * Initialize RTAS if available
443 */
444 rtas_initialize();
445#endif /* CONFIG_PPC_RTAS */
40ef8cbc
PM
446
447 /*
448 * Check if we have an initrd provided via the device-tree
449 */
450 check_for_initrd();
40ef8cbc
PM
451
452 /*
453 * Do some platform specific early initializations, that includes
454 * setting up the hash table pointers. It also sets up some interrupt-mapping
455 * related options that will be used by finish_device_tree()
456 */
57744ea9
GL
457 if (ppc_md.init_early)
458 ppc_md.init_early();
40ef8cbc 459
463ce0e1
BH
460 /*
461 * We can discover serial ports now since the above did setup the
462 * hash table management for us, thus ioremap works. We do that early
463 * so that further code can be debugged
464 */
463ce0e1 465 find_legacy_serial_ports();
463ce0e1 466
40ef8cbc
PM
467 /*
468 * Register early console
469 */
470 register_early_udbg_console();
40ef8cbc 471
47679283
ME
472 /*
473 * Initialize xmon
474 */
475 xmon_setup();
480f6f35 476
5ad57078 477 smp_setup_cpu_maps();
954e6da5 478 check_smt_enabled();
28efc35f 479 setup_tlb_core_data();
40ef8cbc 480
f018b36f 481#ifdef CONFIG_SMP
40ef8cbc
PM
482 /* Release secondary cpus out of their spinloops at 0x60 now that
483 * we can map physical -> logical CPU ids
484 */
485 smp_release_cpus();
f018b36f 486#endif
40ef8cbc 487
96b644bd 488 printk("Starting Linux PPC64 %s\n", init_utsname()->version);
40ef8cbc
PM
489
490 printk("-----------------------------------------------------\n");
fe333321 491 printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
95f72d1e 492 printk("physicalMemorySize = 0x%llx\n", memblock_phys_mem_size());
9697add0
AB
493 if (ppc64_caches.dline_size != 0x80)
494 printk("ppc64_caches.dcache_line_size = 0x%x\n",
495 ppc64_caches.dline_size);
496 if (ppc64_caches.iline_size != 0x80)
497 printk("ppc64_caches.icache_line_size = 0x%x\n",
498 ppc64_caches.iline_size);
94491685 499#ifdef CONFIG_PPC_STD_MMU_64
9697add0
AB
500 if (htab_address)
501 printk("htab_address = 0x%p\n", htab_address);
40ef8cbc 502 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
94491685 503#endif /* CONFIG_PPC_STD_MMU_64 */
b160544c 504 if (PHYSICAL_START > 0)
e468455e
ME
505 printk("physical_start = 0x%llx\n",
506 (unsigned long long)PHYSICAL_START);
40ef8cbc 507 printk("-----------------------------------------------------\n");
40ef8cbc 508
40ef8cbc
PM
509 DBG(" <- setup_system()\n");
510}
511
40bd587a
BH
512/* This returns the limit below which memory accesses to the linear
513 * mapping are guarnateed not to cause a TLB or SLB miss. This is
514 * used to allocate interrupt or emergency stacks for which our
515 * exception entry path doesn't deal with being interrupted.
516 */
517static u64 safe_stack_limit(void)
095c7965 518{
40bd587a
BH
519#ifdef CONFIG_PPC_BOOK3E
520 /* Freescale BookE bolts the entire linear mapping */
521 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
522 return linear_map_top;
523 /* Other BookE, we assume the first GB is bolted */
524 return 1ul << 30;
525#else
526 /* BookS, the first segment is bolted */
527 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
095c7965 528 return 1UL << SID_SHIFT_1T;
095c7965 529 return 1UL << SID_SHIFT;
40bd587a 530#endif
095c7965
AB
531}
532
40ef8cbc
PM
533static void __init irqstack_early_init(void)
534{
40bd587a 535 u64 limit = safe_stack_limit();
40ef8cbc
PM
536 unsigned int i;
537
538 /*
8f4da26e
AB
539 * Interrupt stacks must be in the first segment since we
540 * cannot afford to take SLB misses on them.
40ef8cbc 541 */
0e551954 542 for_each_possible_cpu(i) {
3c726f8d 543 softirq_ctx[i] = (struct thread_info *)
95f72d1e 544 __va(memblock_alloc_base(THREAD_SIZE,
095c7965 545 THREAD_SIZE, limit));
3c726f8d 546 hardirq_ctx[i] = (struct thread_info *)
95f72d1e 547 __va(memblock_alloc_base(THREAD_SIZE,
095c7965 548 THREAD_SIZE, limit));
40ef8cbc
PM
549 }
550}
40ef8cbc 551
2d27cfd3
BH
552#ifdef CONFIG_PPC_BOOK3E
553static void __init exc_lvl_early_init(void)
554{
555 unsigned int i;
160c7324 556 unsigned long sp;
2d27cfd3
BH
557
558 for_each_possible_cpu(i) {
160c7324
TC
559 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
560 critirq_ctx[i] = (struct thread_info *)__va(sp);
561 paca[i].crit_kstack = __va(sp + THREAD_SIZE);
562
563 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
564 dbgirq_ctx[i] = (struct thread_info *)__va(sp);
565 paca[i].dbg_kstack = __va(sp + THREAD_SIZE);
566
567 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
568 mcheckirq_ctx[i] = (struct thread_info *)__va(sp);
569 paca[i].mc_kstack = __va(sp + THREAD_SIZE);
2d27cfd3 570 }
d36b4c4f
KG
571
572 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
565c2f24 573 patch_exception(0x040, exc_debug_debug_book3e);
2d27cfd3
BH
574}
575#else
576#define exc_lvl_early_init()
577#endif
578
40ef8cbc
PM
579/*
580 * Stack space used when we detect a bad kernel stack pointer, and
729b0f71
MS
581 * early in SMP boots before relocation is enabled. Exclusive emergency
582 * stack for machine checks.
40ef8cbc
PM
583 */
584static void __init emergency_stack_init(void)
585{
095c7965 586 u64 limit;
40ef8cbc
PM
587 unsigned int i;
588
589 /*
590 * Emergency stacks must be under 256MB, we cannot afford to take
591 * SLB misses on them. The ABI also requires them to be 128-byte
592 * aligned.
593 *
594 * Since we use these as temporary stacks during secondary CPU
595 * bringup, we need to get at them in real mode. This means they
596 * must also be within the RMO region.
597 */
40bd587a 598 limit = min(safe_stack_limit(), ppc64_rma_size);
40ef8cbc 599
3243d874
ME
600 for_each_possible_cpu(i) {
601 unsigned long sp;
95f72d1e 602 sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
3243d874
ME
603 sp += THREAD_SIZE;
604 paca[i].emergency_sp = __va(sp);
729b0f71
MS
605
606#ifdef CONFIG_PPC_BOOK3S_64
607 /* emergency stack for machine check exception handling. */
608 sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
609 sp += THREAD_SIZE;
610 paca[i].mc_emergency_sp = __va(sp);
611#endif
3243d874 612 }
40ef8cbc
PM
613}
614
40ef8cbc 615/*
0f6b77ca
AIB
616 * Called into from start_kernel this initializes bootmem, which is used
617 * to manage page allocation until mem_init is called.
40ef8cbc
PM
618 */
619void __init setup_arch(char **cmdline_p)
620{
40ef8cbc
PM
621 ppc64_boot_msg(0x12, "Setup Arch");
622
623 *cmdline_p = cmd_line;
624
625 /*
626 * Set cache line size based on type of cpu as a default.
627 * Systems with OF can look in the properties on the cpu node(s)
628 * for a possibly more accurate value.
629 */
630 dcache_bsize = ppc64_caches.dline_size;
631 icache_bsize = ppc64_caches.iline_size;
632
40ef8cbc 633 if (ppc_md.panic)
7e990266 634 setup_panic();
40ef8cbc 635
4846c5de 636 init_mm.start_code = (unsigned long)_stext;
40ef8cbc
PM
637 init_mm.end_code = (unsigned long) _etext;
638 init_mm.end_data = (unsigned long) _edata;
639 init_mm.brk = klimit;
5c1f6ee9
AK
640#ifdef CONFIG_PPC_64K_PAGES
641 init_mm.context.pte_frag = NULL;
642#endif
40ef8cbc 643 irqstack_early_init();
2d27cfd3 644 exc_lvl_early_init();
40ef8cbc
PM
645 emergency_stack_init();
646
94491685 647#ifdef CONFIG_PPC_STD_MMU_64
40ef8cbc 648 stabs_alloc();
94491685 649#endif
40ef8cbc
PM
650 /* set up the bootmem stuff with available memory */
651 do_init_bootmem();
652 sparse_init();
653
0458060c
PM
654#ifdef CONFIG_DUMMY_CONSOLE
655 conswitchp = &dummy_con;
656#endif
657
38db7e74
GL
658 if (ppc_md.setup_arch)
659 ppc_md.setup_arch();
40ef8cbc 660
40ef8cbc 661 paging_init();
6f0ef0f5
BH
662
663 /* Initialize the MMU context management stuff */
664 mmu_context_init();
665
61e2390e
MN
666 /* Interrupt code needs to be 64K-aligned */
667 if ((unsigned long)_stext & 0xffff)
668 panic("Kernelbase not 64K-aligned (0x%lx)!\n",
669 (unsigned long)_stext);
670
40ef8cbc
PM
671 ppc64_boot_msg(0x15, "Setup Done");
672}
673
674
675/* ToDo: do something useful if ppc_md is not yet setup. */
676#define PPC64_LINUX_FUNCTION 0x0f000000
677#define PPC64_IPL_MESSAGE 0xc0000000
678#define PPC64_TERM_MESSAGE 0xb0000000
679
680static void ppc64_do_msg(unsigned int src, const char *msg)
681{
682 if (ppc_md.progress) {
683 char buf[128];
684
685 sprintf(buf, "%08X\n", src);
686 ppc_md.progress(buf, 0);
687 snprintf(buf, 128, "%s", msg);
688 ppc_md.progress(buf, 0);
689 }
690}
691
692/* Print a boot progress message. */
693void ppc64_boot_msg(unsigned int src, const char *msg)
694{
695 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg);
696 printk("[boot]%04x %s\n", src, msg);
697}
698
7a0268fa 699#ifdef CONFIG_SMP
c2a7e818
TH
700#define PCPU_DYN_SIZE ()
701
702static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
7a0268fa 703{
c2a7e818
TH
704 return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
705 __pa(MAX_DMA_ADDRESS));
706}
7a0268fa 707
c2a7e818
TH
708static void __init pcpu_fc_free(void *ptr, size_t size)
709{
710 free_bootmem(__pa(ptr), size);
711}
7a0268fa 712
c2a7e818
TH
713static int pcpu_cpu_distance(unsigned int from, unsigned int to)
714{
715 if (cpu_to_node(from) == cpu_to_node(to))
716 return LOCAL_DISTANCE;
717 else
718 return REMOTE_DISTANCE;
719}
720
ae01f84b
AB
721unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
722EXPORT_SYMBOL(__per_cpu_offset);
723
c2a7e818
TH
724void __init setup_per_cpu_areas(void)
725{
726 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
727 size_t atom_size;
728 unsigned long delta;
729 unsigned int cpu;
730 int rc;
731
732 /*
733 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
734 * to group units. For larger mappings, use 1M atom which
735 * should be large enough to contain a number of units.
736 */
737 if (mmu_linear_psize == MMU_PAGE_4K)
738 atom_size = PAGE_SIZE;
739 else
740 atom_size = 1 << 20;
741
742 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
743 pcpu_fc_alloc, pcpu_fc_free);
744 if (rc < 0)
745 panic("cannot initialize percpu area (err=%d)", rc);
746
747 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
ae01f84b
AB
748 for_each_possible_cpu(cpu) {
749 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
750 paca[cpu].data_offset = __per_cpu_offset[cpu];
751 }
7a0268fa
AB
752}
753#endif
4cb3cee0
BH
754
755
ecd73cc5 756#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
4cb3cee0
BH
757struct ppc_pci_io ppc_pci_io;
758EXPORT_SYMBOL(ppc_pci_io);
ecd73cc5 759#endif