3 * Common boot and setup code.
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
15 #include <linux/export.h>
16 #include <linux/string.h>
17 #include <linux/sched.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/initrd.h>
23 #include <linux/seq_file.h>
24 #include <linux/ioport.h>
25 #include <linux/console.h>
26 #include <linux/utsname.h>
27 #include <linux/tty.h>
28 #include <linux/root_dev.h>
29 #include <linux/notifier.h>
30 #include <linux/cpu.h>
31 #include <linux/unistd.h>
32 #include <linux/serial.h>
33 #include <linux/serial_8250.h>
34 #include <linux/bootmem.h>
35 #include <linux/pci.h>
36 #include <linux/lockdep.h>
37 #include <linux/memblock.h>
38 #include <linux/hugetlb.h>
41 #include <asm/kdump.h>
43 #include <asm/processor.h>
44 #include <asm/pgtable.h>
47 #include <asm/machdep.h>
50 #include <asm/cputable.h>
51 #include <asm/sections.h>
52 #include <asm/btext.h>
53 #include <asm/nvram.h>
54 #include <asm/setup.h>
56 #include <asm/iommu.h>
57 #include <asm/serial.h>
58 #include <asm/cache.h>
61 #include <asm/firmware.h>
64 #include <asm/kexec.h>
65 #include <asm/mmu_context.h>
66 #include <asm/code-patching.h>
67 #include <asm/kvm_ppc.h>
68 #include <asm/hugetlb.h>
69 #include <asm/epapr_hcalls.h>
72 #define DBG(fmt...) udbg_printf(fmt)
77 int spinning_secondaries;
80 /* Pick defaults since we might want to patch instructions
81 * before we've read this from the device tree.
83 struct ppc64_caches ppc64_caches = {
89 EXPORT_SYMBOL_GPL(ppc64_caches);
92 * These are used in binfmt_elf.c to put aux entries on the stack
93 * for each elf executable being started.
99 #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
100 static void setup_tlb_core_data(void)
104 BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
106 for_each_possible_cpu(cpu) {
107 int first = cpu_first_thread_sibling(cpu);
109 paca[cpu].tcd_ptr = &paca[first].tcd;
112 * If we have threads, we need either tlbsrx.
113 * or e6500 tablewalk mode, or else TLB handlers
114 * will be racy and could produce duplicate entries.
116 if (smt_enabled_at_boot >= 2 &&
117 !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
118 book3e_htw_mode != PPC_HTW_E6500) {
119 /* Should we panic instead? */
120 WARN_ONCE("%s: unsupported MMU configuration -- expect problems\n",
126 static void setup_tlb_core_data(void)
133 static char *smt_enabled_cmdline;
135 /* Look for ibm,smt-enabled OF option */
136 static void check_smt_enabled(void)
138 struct device_node *dn;
139 const char *smt_option;
141 /* Default to enabling all threads */
142 smt_enabled_at_boot = threads_per_core;
144 /* Allow the command line to overrule the OF option */
145 if (smt_enabled_cmdline) {
146 if (!strcmp(smt_enabled_cmdline, "on"))
147 smt_enabled_at_boot = threads_per_core;
148 else if (!strcmp(smt_enabled_cmdline, "off"))
149 smt_enabled_at_boot = 0;
154 rc = strict_strtol(smt_enabled_cmdline, 10, &smt);
156 smt_enabled_at_boot =
157 min(threads_per_core, (int)smt);
160 dn = of_find_node_by_path("/options");
162 smt_option = of_get_property(dn, "ibm,smt-enabled",
166 if (!strcmp(smt_option, "on"))
167 smt_enabled_at_boot = threads_per_core;
168 else if (!strcmp(smt_option, "off"))
169 smt_enabled_at_boot = 0;
177 /* Look for smt-enabled= cmdline option */
178 static int __init early_smt_enabled(char *p)
180 smt_enabled_cmdline = p;
183 early_param("smt-enabled", early_smt_enabled);
186 #define check_smt_enabled()
187 #endif /* CONFIG_SMP */
189 /** Fix up paca fields required for the boot cpu */
190 static void fixup_boot_paca(void)
192 /* The boot cpu is started */
193 get_paca()->cpu_start = 1;
194 /* Allow percpu accesses to work until we setup percpu data */
195 get_paca()->data_offset = 0;
199 * Early initialization entry point. This is called by head.S
200 * with MMU translation disabled. We rely on the "feature" of
201 * the CPU that ignores the top 2 bits of the address in real
202 * mode so we can access kernel globals normally provided we
203 * only toy with things in the RMO region. From here, we do
204 * some early parsing of the device-tree to setup out MEMBLOCK
205 * data structures, and allocate & initialize the hash table
206 * and segment tables so we can start running with translation
209 * It is this function which will call the probe() callback of
210 * the various platform types and copy the matching one to the
211 * global ppc_md structure. Your platform can eventually do
212 * some very early initializations from the probe() routine, but
213 * this is not recommended, be very careful as, for example, the
214 * device-tree is not accessible via normal means at this point.
217 void __init early_setup(unsigned long dt_ptr)
219 static __initdata struct paca_struct boot_paca;
221 /* -------- printk is _NOT_ safe to use here ! ------- */
223 /* Identify CPU type */
224 identify_cpu(0, mfspr(SPRN_PVR));
226 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
227 initialise_paca(&boot_paca, 0);
228 setup_paca(&boot_paca);
231 /* Initialize lockdep early or else spinlocks will blow */
234 /* -------- printk is now safe to use ------- */
236 /* Enable early debugging if any specified (see udbg.h) */
239 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
242 * Do early initialization using the flattened device
243 * tree, such as retrieving the physical memory map or
244 * calculating/retrieving the hash table size.
246 early_init_devtree(__va(dt_ptr));
248 epapr_paravirt_early_init();
250 /* Now we know the logical id of our boot cpu, setup the paca. */
251 setup_paca(&paca[boot_cpuid]);
254 /* Probe the machine type */
257 setup_kdump_trampoline();
259 DBG("Found, Initializing memory management...\n");
261 /* Initialize the hash table or TLB handling */
267 * Reserve any gigantic pages requested on the command line.
268 * memblock needs to have been initialized by the time this is
269 * called since this will reserve memory.
271 reserve_hugetlb_gpages();
273 DBG(" <- early_setup()\n");
275 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
277 * This needs to be done *last* (after the above DBG() even)
279 * Right after we return from this function, we turn on the MMU
280 * which means the real-mode access trick that btext does will
281 * no longer work, it needs to switch to using a real MMU
282 * mapping. This call will ensure that it does
285 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
289 void early_setup_secondary(void)
291 /* Mark interrupts enabled in PACA */
292 get_paca()->soft_enabled = 0;
294 /* Initialize the hash table or TLB handling */
295 early_init_mmu_secondary();
298 #endif /* CONFIG_SMP */
300 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
301 void smp_release_cpus(void)
306 DBG(" -> smp_release_cpus()\n");
308 /* All secondary cpus are spinning on a common spinloop, release them
309 * all now so they can start to spin on their individual paca
310 * spinloops. For non SMP kernels, the secondary cpus never get out
311 * of the common spinloop.
314 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
316 *ptr = __pa(generic_secondary_smp_init);
318 /* And wait a bit for them to catch up */
319 for (i = 0; i < 100000; i++) {
322 if (spinning_secondaries == 0)
326 DBG("spinning_secondaries = %d\n", spinning_secondaries);
328 DBG(" <- smp_release_cpus()\n");
330 #endif /* CONFIG_SMP || CONFIG_KEXEC */
333 * Initialize some remaining members of the ppc64_caches and systemcfg
335 * (at least until we get rid of them completely). This is mostly some
336 * cache informations about the CPU that will be used by cache flush
337 * routines and/or provided to userland
339 static void __init initialize_cache_info(void)
341 struct device_node *np;
342 unsigned long num_cpus = 0;
344 DBG(" -> initialize_cache_info()\n");
346 for_each_node_by_type(np, "cpu") {
350 * We're assuming *all* of the CPUs have the same
351 * d-cache and i-cache sizes... -Peter
354 const __be32 *sizep, *lsizep;
358 lsize = cur_cpu_spec->dcache_bsize;
359 sizep = of_get_property(np, "d-cache-size", NULL);
361 size = be32_to_cpu(*sizep);
362 lsizep = of_get_property(np, "d-cache-block-size",
364 /* fallback if block size missing */
366 lsizep = of_get_property(np,
370 lsize = be32_to_cpu(*lsizep);
371 if (sizep == NULL || lsizep == NULL)
372 DBG("Argh, can't find dcache properties ! "
373 "sizep: %p, lsizep: %p\n", sizep, lsizep);
375 ppc64_caches.dsize = size;
376 ppc64_caches.dline_size = lsize;
377 ppc64_caches.log_dline_size = __ilog2(lsize);
378 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
381 lsize = cur_cpu_spec->icache_bsize;
382 sizep = of_get_property(np, "i-cache-size", NULL);
384 size = be32_to_cpu(*sizep);
385 lsizep = of_get_property(np, "i-cache-block-size",
388 lsizep = of_get_property(np,
392 lsize = be32_to_cpu(*lsizep);
393 if (sizep == NULL || lsizep == NULL)
394 DBG("Argh, can't find icache properties ! "
395 "sizep: %p, lsizep: %p\n", sizep, lsizep);
397 ppc64_caches.isize = size;
398 ppc64_caches.iline_size = lsize;
399 ppc64_caches.log_iline_size = __ilog2(lsize);
400 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
404 DBG(" <- initialize_cache_info()\n");
409 * Do some initial setup of the system. The parameters are those which
410 * were passed in from the bootloader.
412 void __init setup_system(void)
414 DBG(" -> setup_system()\n");
416 /* Apply the CPUs-specific and firmware specific fixups to kernel
417 * text (nop out sections not relevant to this CPU or this firmware)
419 do_feature_fixups(cur_cpu_spec->cpu_features,
420 &__start___ftr_fixup, &__stop___ftr_fixup);
421 do_feature_fixups(cur_cpu_spec->mmu_features,
422 &__start___mmu_ftr_fixup, &__stop___mmu_ftr_fixup);
423 do_feature_fixups(powerpc_firmware_features,
424 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
425 do_lwsync_fixups(cur_cpu_spec->cpu_features,
426 &__start___lwsync_fixup, &__stop___lwsync_fixup);
430 * Unflatten the device-tree passed by prom_init or kexec
432 unflatten_device_tree();
435 * Fill the ppc64_caches & systemcfg structures with informations
436 * retrieved from the device-tree.
438 initialize_cache_info();
440 #ifdef CONFIG_PPC_RTAS
442 * Initialize RTAS if available
445 #endif /* CONFIG_PPC_RTAS */
448 * Check if we have an initrd provided via the device-tree
453 * Do some platform specific early initializations, that includes
454 * setting up the hash table pointers. It also sets up some interrupt-mapping
455 * related options that will be used by finish_device_tree()
457 if (ppc_md.init_early)
461 * We can discover serial ports now since the above did setup the
462 * hash table management for us, thus ioremap works. We do that early
463 * so that further code can be debugged
465 find_legacy_serial_ports();
468 * Register early console
470 register_early_udbg_console();
477 smp_setup_cpu_maps();
479 setup_tlb_core_data();
482 /* Release secondary cpus out of their spinloops at 0x60 now that
483 * we can map physical -> logical CPU ids
488 printk("Starting Linux PPC64 %s\n", init_utsname()->version);
490 printk("-----------------------------------------------------\n");
491 printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
492 printk("physicalMemorySize = 0x%llx\n", memblock_phys_mem_size());
493 if (ppc64_caches.dline_size != 0x80)
494 printk("ppc64_caches.dcache_line_size = 0x%x\n",
495 ppc64_caches.dline_size);
496 if (ppc64_caches.iline_size != 0x80)
497 printk("ppc64_caches.icache_line_size = 0x%x\n",
498 ppc64_caches.iline_size);
499 #ifdef CONFIG_PPC_STD_MMU_64
501 printk("htab_address = 0x%p\n", htab_address);
502 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
503 #endif /* CONFIG_PPC_STD_MMU_64 */
504 if (PHYSICAL_START > 0)
505 printk("physical_start = 0x%llx\n",
506 (unsigned long long)PHYSICAL_START);
507 printk("-----------------------------------------------------\n");
509 DBG(" <- setup_system()\n");
512 /* This returns the limit below which memory accesses to the linear
513 * mapping are guarnateed not to cause a TLB or SLB miss. This is
514 * used to allocate interrupt or emergency stacks for which our
515 * exception entry path doesn't deal with being interrupted.
517 static u64 safe_stack_limit(void)
519 #ifdef CONFIG_PPC_BOOK3E
520 /* Freescale BookE bolts the entire linear mapping */
521 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
522 return linear_map_top;
523 /* Other BookE, we assume the first GB is bolted */
526 /* BookS, the first segment is bolted */
527 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
528 return 1UL << SID_SHIFT_1T;
529 return 1UL << SID_SHIFT;
533 static void __init irqstack_early_init(void)
535 u64 limit = safe_stack_limit();
539 * Interrupt stacks must be in the first segment since we
540 * cannot afford to take SLB misses on them.
542 for_each_possible_cpu(i) {
543 softirq_ctx[i] = (struct thread_info *)
544 __va(memblock_alloc_base(THREAD_SIZE,
545 THREAD_SIZE, limit));
546 hardirq_ctx[i] = (struct thread_info *)
547 __va(memblock_alloc_base(THREAD_SIZE,
548 THREAD_SIZE, limit));
552 #ifdef CONFIG_PPC_BOOK3E
553 static void __init exc_lvl_early_init(void)
558 for_each_possible_cpu(i) {
559 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
560 critirq_ctx[i] = (struct thread_info *)__va(sp);
561 paca[i].crit_kstack = __va(sp + THREAD_SIZE);
563 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
564 dbgirq_ctx[i] = (struct thread_info *)__va(sp);
565 paca[i].dbg_kstack = __va(sp + THREAD_SIZE);
567 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
568 mcheckirq_ctx[i] = (struct thread_info *)__va(sp);
569 paca[i].mc_kstack = __va(sp + THREAD_SIZE);
572 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
573 patch_exception(0x040, exc_debug_debug_book3e);
576 #define exc_lvl_early_init()
580 * Stack space used when we detect a bad kernel stack pointer, and
581 * early in SMP boots before relocation is enabled. Exclusive emergency
582 * stack for machine checks.
584 static void __init emergency_stack_init(void)
590 * Emergency stacks must be under 256MB, we cannot afford to take
591 * SLB misses on them. The ABI also requires them to be 128-byte
594 * Since we use these as temporary stacks during secondary CPU
595 * bringup, we need to get at them in real mode. This means they
596 * must also be within the RMO region.
598 limit = min(safe_stack_limit(), ppc64_rma_size);
600 for_each_possible_cpu(i) {
602 sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
604 paca[i].emergency_sp = __va(sp);
606 #ifdef CONFIG_PPC_BOOK3S_64
607 /* emergency stack for machine check exception handling. */
608 sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
610 paca[i].mc_emergency_sp = __va(sp);
616 * Called into from start_kernel this initializes bootmem, which is used
617 * to manage page allocation until mem_init is called.
619 void __init setup_arch(char **cmdline_p)
621 ppc64_boot_msg(0x12, "Setup Arch");
623 *cmdline_p = cmd_line;
626 * Set cache line size based on type of cpu as a default.
627 * Systems with OF can look in the properties on the cpu node(s)
628 * for a possibly more accurate value.
630 dcache_bsize = ppc64_caches.dline_size;
631 icache_bsize = ppc64_caches.iline_size;
636 init_mm.start_code = (unsigned long)_stext;
637 init_mm.end_code = (unsigned long) _etext;
638 init_mm.end_data = (unsigned long) _edata;
639 init_mm.brk = klimit;
640 #ifdef CONFIG_PPC_64K_PAGES
641 init_mm.context.pte_frag = NULL;
643 irqstack_early_init();
644 exc_lvl_early_init();
645 emergency_stack_init();
647 #ifdef CONFIG_PPC_STD_MMU_64
650 /* set up the bootmem stuff with available memory */
654 #ifdef CONFIG_DUMMY_CONSOLE
655 conswitchp = &dummy_con;
658 if (ppc_md.setup_arch)
663 /* Initialize the MMU context management stuff */
666 /* Interrupt code needs to be 64K-aligned */
667 if ((unsigned long)_stext & 0xffff)
668 panic("Kernelbase not 64K-aligned (0x%lx)!\n",
669 (unsigned long)_stext);
671 ppc64_boot_msg(0x15, "Setup Done");
675 /* ToDo: do something useful if ppc_md is not yet setup. */
676 #define PPC64_LINUX_FUNCTION 0x0f000000
677 #define PPC64_IPL_MESSAGE 0xc0000000
678 #define PPC64_TERM_MESSAGE 0xb0000000
680 static void ppc64_do_msg(unsigned int src, const char *msg)
682 if (ppc_md.progress) {
685 sprintf(buf, "%08X\n", src);
686 ppc_md.progress(buf, 0);
687 snprintf(buf, 128, "%s", msg);
688 ppc_md.progress(buf, 0);
692 /* Print a boot progress message. */
693 void ppc64_boot_msg(unsigned int src, const char *msg)
695 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg);
696 printk("[boot]%04x %s\n", src, msg);
700 #define PCPU_DYN_SIZE ()
702 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
704 return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
705 __pa(MAX_DMA_ADDRESS));
708 static void __init pcpu_fc_free(void *ptr, size_t size)
710 free_bootmem(__pa(ptr), size);
713 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
715 if (cpu_to_node(from) == cpu_to_node(to))
716 return LOCAL_DISTANCE;
718 return REMOTE_DISTANCE;
721 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
722 EXPORT_SYMBOL(__per_cpu_offset);
724 void __init setup_per_cpu_areas(void)
726 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
733 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
734 * to group units. For larger mappings, use 1M atom which
735 * should be large enough to contain a number of units.
737 if (mmu_linear_psize == MMU_PAGE_4K)
738 atom_size = PAGE_SIZE;
742 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
743 pcpu_fc_alloc, pcpu_fc_free);
745 panic("cannot initialize percpu area (err=%d)", rc);
747 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
748 for_each_possible_cpu(cpu) {
749 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
750 paca[cpu].data_offset = __per_cpu_offset[cpu];
756 #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
757 struct ppc_pci_io ppc_pci_io;
758 EXPORT_SYMBOL(ppc_pci_io);