1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Common boot and setup code.
6 * Copyright (C) 2001 PPC64 Team, IBM Corp
9 #include <linux/export.h>
10 #include <linux/string.h>
11 #include <linux/sched.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/reboot.h>
15 #include <linux/delay.h>
16 #include <linux/initrd.h>
17 #include <linux/seq_file.h>
18 #include <linux/ioport.h>
19 #include <linux/console.h>
20 #include <linux/utsname.h>
21 #include <linux/tty.h>
22 #include <linux/root_dev.h>
23 #include <linux/notifier.h>
24 #include <linux/cpu.h>
25 #include <linux/unistd.h>
26 #include <linux/serial.h>
27 #include <linux/serial_8250.h>
28 #include <linux/memblock.h>
29 #include <linux/pci.h>
30 #include <linux/lockdep.h>
31 #include <linux/memory.h>
32 #include <linux/nmi.h>
33 #include <linux/pgtable.h>
35 #include <linux/of_fdt.h>
37 #include <asm/asm-prototypes.h>
38 #include <asm/kvm_guest.h>
40 #include <asm/kdump.h>
41 #include <asm/processor.h>
44 #include <asm/machdep.h>
47 #include <asm/cputable.h>
48 #include <asm/dt_cpu_ftrs.h>
49 #include <asm/sections.h>
50 #include <asm/btext.h>
51 #include <asm/nvram.h>
52 #include <asm/setup.h>
54 #include <asm/iommu.h>
55 #include <asm/serial.h>
56 #include <asm/cache.h>
59 #include <asm/firmware.h>
62 #include <asm/kexec.h>
63 #include <asm/code-patching.h>
64 #include <asm/ftrace.h>
66 #include <asm/cputhreads.h>
67 #include <asm/hw_irq.h>
68 #include <asm/feature-fixups.h>
70 #include <asm/early_ioremap.h>
71 #include <asm/pgalloc.h>
75 int spinning_secondaries;
78 struct ppc64_caches ppc64_caches = {
88 EXPORT_SYMBOL_GPL(ppc64_caches);
90 #if defined(CONFIG_PPC_BOOK3E_64) && defined(CONFIG_SMP)
91 void __init setup_tlb_core_data(void)
95 BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
97 for_each_possible_cpu(cpu) {
98 int first = cpu_first_thread_sibling(cpu);
101 * If we boot via kdump on a non-primary thread,
102 * make sure we point at the thread that actually
105 if (cpu_first_thread_sibling(boot_cpuid) == first)
108 paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd;
111 * If we have threads, we need either tlbsrx.
112 * or e6500 tablewalk mode, or else TLB handlers
113 * will be racy and could produce duplicate entries.
114 * Should we panic instead?
116 WARN_ONCE(smt_enabled_at_boot >= 2 &&
117 book3e_htw_mode != PPC_HTW_E6500,
118 "%s: unsupported MMU configuration\n", __func__);
125 static char *smt_enabled_cmdline;
127 /* Look for ibm,smt-enabled OF option */
128 void __init check_smt_enabled(void)
130 struct device_node *dn;
131 const char *smt_option;
133 /* Default to enabling all threads */
134 smt_enabled_at_boot = threads_per_core;
136 /* Allow the command line to overrule the OF option */
137 if (smt_enabled_cmdline) {
138 if (!strcmp(smt_enabled_cmdline, "on"))
139 smt_enabled_at_boot = threads_per_core;
140 else if (!strcmp(smt_enabled_cmdline, "off"))
141 smt_enabled_at_boot = 0;
146 rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
148 smt_enabled_at_boot =
149 min(threads_per_core, smt);
152 dn = of_find_node_by_path("/options");
154 smt_option = of_get_property(dn, "ibm,smt-enabled",
158 if (!strcmp(smt_option, "on"))
159 smt_enabled_at_boot = threads_per_core;
160 else if (!strcmp(smt_option, "off"))
161 smt_enabled_at_boot = 0;
169 /* Look for smt-enabled= cmdline option */
170 static int __init early_smt_enabled(char *p)
172 smt_enabled_cmdline = p;
175 early_param("smt-enabled", early_smt_enabled);
177 #endif /* CONFIG_SMP */
179 /** Fix up paca fields required for the boot cpu */
180 static void __init fixup_boot_paca(struct paca_struct *boot_paca)
182 /* The boot cpu is started */
183 boot_paca->cpu_start = 1;
184 #ifdef CONFIG_PPC_BOOK3S_64
186 * Give the early boot machine check stack somewhere to use, use
187 * half of the init stack. This is a bit hacky but there should not be
188 * deep stack usage in early init so shouldn't overflow it or overwrite
191 boot_paca->mc_emergency_sp = (void *)&init_thread_union +
194 /* Allow percpu accesses to work until we setup percpu data */
195 boot_paca->data_offset = 0;
196 /* Mark interrupts soft and hard disabled in PACA */
197 boot_paca->irq_soft_mask = IRQS_DISABLED;
198 boot_paca->irq_happened = PACA_IRQ_HARD_DIS;
199 WARN_ON(mfmsr() & MSR_EE);
202 static void __init configure_exceptions(void)
205 * Setup the trampolines from the lowmem exception vectors
206 * to the kdump kernel when not using a relocatable kernel.
208 setup_kdump_trampoline();
210 /* Under a PAPR hypervisor, we need hypercalls */
211 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
213 * - PR KVM does not support AIL mode interrupts in the host
214 * while a PR guest is running.
216 * - SCV system call interrupt vectors are only implemented for
217 * AIL mode interrupts.
219 * - On pseries, AIL mode can only be enabled and disabled
220 * system-wide so when a PR VM is created on a pseries host,
221 * all CPUs of the host are set to AIL=0 mode.
223 * - Therefore host CPUs must not execute scv while a PR VM
226 * - SCV support can not be disabled dynamically because the
227 * feature is advertised to host userspace. Disabling the
228 * facility and emulating it would be possible but is not
231 * - So SCV support is blanket disabled if PR KVM could possibly
232 * run. That is, PR support compiled in, booting on pseries
235 if (IS_ENABLED(CONFIG_KVM_BOOK3S_PR_POSSIBLE) && !radix_enabled()) {
236 init_task.thread.fscr &= ~FSCR_SCV;
237 cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_SCV;
240 /* Enable AIL if possible */
241 if (!pseries_enable_reloc_on_exc()) {
242 init_task.thread.fscr &= ~FSCR_SCV;
243 cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_SCV;
247 * Tell the hypervisor that we want our exceptions to
248 * be taken in little endian mode.
250 * We don't call this for big endian as our calling convention
251 * makes us always enter in BE, and the call may fail under
252 * some circumstances with kdump.
254 #ifdef __LITTLE_ENDIAN__
255 pseries_little_endian_exceptions();
258 /* Set endian mode using OPAL */
259 if (firmware_has_feature(FW_FEATURE_OPAL))
260 opal_configure_cores();
262 /* AIL on native is done in cpu_ready_for_interrupts() */
266 static void cpu_ready_for_interrupts(void)
269 * Enable AIL if supported, and we are in hypervisor mode. This
270 * is called once for every processor.
272 * If we are not in hypervisor mode the job is done once for
273 * the whole partition in configure_exceptions().
275 if (cpu_has_feature(CPU_FTR_HVMODE)) {
276 unsigned long lpcr = mfspr(SPRN_LPCR);
277 unsigned long new_lpcr = lpcr;
279 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
280 /* P10 DD1 does not have HAIL */
281 if (pvr_version_is(PVR_POWER10) &&
282 (mfspr(SPRN_PVR) & 0xf00) == 0x100)
283 new_lpcr |= LPCR_AIL_3;
285 new_lpcr |= LPCR_HAIL;
286 } else if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
287 new_lpcr |= LPCR_AIL_3;
290 if (new_lpcr != lpcr)
291 mtspr(SPRN_LPCR, new_lpcr);
295 * Set HFSCR:TM based on CPU features:
296 * In the special case of TM no suspend (P9N DD2.1), Linux is
297 * told TM is off via the dt-ftrs but told to (partially) use
298 * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM]
299 * will be off from dt-ftrs but we need to turn it on for the
302 if (cpu_has_feature(CPU_FTR_HVMODE)) {
303 if (cpu_has_feature(CPU_FTR_TM_COMP))
304 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM);
306 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
309 /* Set IR and DR in PACA MSR */
310 get_paca()->kernel_msr = MSR_KERNEL;
313 unsigned long spr_default_dscr = 0;
315 static void __init record_spr_defaults(void)
317 if (early_cpu_has_feature(CPU_FTR_DSCR))
318 spr_default_dscr = mfspr(SPRN_DSCR);
322 * Early initialization entry point. This is called by head.S
323 * with MMU translation disabled. We rely on the "feature" of
324 * the CPU that ignores the top 2 bits of the address in real
325 * mode so we can access kernel globals normally provided we
326 * only toy with things in the RMO region. From here, we do
327 * some early parsing of the device-tree to setup out MEMBLOCK
328 * data structures, and allocate & initialize the hash table
329 * and segment tables so we can start running with translation
332 * It is this function which will call the probe() callback of
333 * the various platform types and copy the matching one to the
334 * global ppc_md structure. Your platform can eventually do
335 * some very early initializations from the probe() routine, but
336 * this is not recommended, be very careful as, for example, the
337 * device-tree is not accessible via normal means at this point.
340 void __init early_setup(unsigned long dt_ptr)
342 static __initdata struct paca_struct boot_paca;
344 /* -------- printk is _NOT_ safe to use here ! ------- */
347 * Assume we're on cpu 0 for now.
349 * We need to load a PACA very early for a few reasons.
351 * The stack protector canary is stored in the paca, so as soon as we
352 * call any stack protected code we need r13 pointing somewhere valid.
354 * If we are using kcov it will call in_task() in its instrumentation,
355 * which relies on the current task from the PACA.
357 * dt_cpu_ftrs_init() calls into generic OF/fdt code, as well as
358 * printk(), which can trigger both stack protector and kcov.
360 * percpu variables and spin locks also use the paca.
362 * So set up a temporary paca. It will be replaced below once we know
363 * what CPU we are on.
365 initialise_paca(&boot_paca, 0);
366 fixup_boot_paca(&boot_paca);
367 WARN_ON(local_paca != 0);
368 setup_paca(&boot_paca); /* install the paca into registers */
370 /* -------- printk is now safe to use ------- */
372 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && (mfmsr() & MSR_HV))
373 enable_machine_check();
375 /* Try new device tree based feature discovery ... */
376 if (!dt_cpu_ftrs_init(__va(dt_ptr)))
377 /* Otherwise use the old style CPU table */
378 identify_cpu(0, mfspr(SPRN_PVR));
380 /* Enable early debugging if any specified (see udbg.h) */
383 udbg_printf(" -> %s(), dt_ptr: 0x%lx\n", __func__, dt_ptr);
386 * Do early initialization using the flattened device
387 * tree, such as retrieving the physical memory map or
388 * calculating/retrieving the hash table size.
390 early_init_devtree(__va(dt_ptr));
392 /* Now we know the logical id of our boot cpu, setup the paca. */
393 if (boot_cpuid != 0) {
394 /* Poison paca_ptrs[0] again if it's not the boot cpu */
395 memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0]));
397 fixup_boot_paca(paca_ptrs[boot_cpuid]);
398 setup_paca(paca_ptrs[boot_cpuid]); /* install the paca into registers */
401 * Configure exception handlers. This include setting up trampolines
402 * if needed, setting exception endian mode, etc...
404 configure_exceptions();
407 * Configure Kernel Userspace Protection. This needs to happen before
408 * feature fixups for platforms that implement this using features.
412 /* Apply all the dynamic patching */
413 apply_feature_fixups();
414 setup_feature_keys();
416 /* Initialize the hash table or TLB handling */
419 early_ioremap_setup();
422 * After firmware and early platform setup code has set things up,
423 * we note the SPR values for configurable control/performance
424 * registers, and use those as initial defaults.
426 record_spr_defaults();
429 * At this point, we can let interrupts switch to virtual mode
430 * (the MMU has been setup), so adjust the MSR in the PACA to
431 * have IR and DR set and enable AIL if it exists
433 cpu_ready_for_interrupts();
436 * We enable ftrace here, but since we only support DYNAMIC_FTRACE, it
437 * will only actually get enabled on the boot cpu much later once
438 * ftrace itself has been initialized.
440 this_cpu_enable_ftrace();
442 udbg_printf(" <- %s()\n", __func__);
444 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
446 * This needs to be done *last* (after the above udbg_printf() even)
448 * Right after we return from this function, we turn on the MMU
449 * which means the real-mode access trick that btext does will
450 * no longer work, it needs to switch to using a real MMU
451 * mapping. This call will ensure that it does
454 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
458 void early_setup_secondary(void)
460 /* Mark interrupts disabled in PACA */
461 irq_soft_mask_set(IRQS_DISABLED);
463 /* Initialize the hash table or TLB handling */
464 early_init_mmu_secondary();
466 /* Perform any KUP setup that is per-cpu */
470 * At this point, we can let interrupts switch to virtual mode
471 * (the MMU has been setup), so adjust the MSR in the PACA to
472 * have IR and DR set.
474 cpu_ready_for_interrupts();
477 #endif /* CONFIG_SMP */
479 void panic_smp_self_stop(void)
487 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
488 static bool use_spinloop(void)
490 if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
492 * See comments in head_64.S -- not all platforms insert
493 * secondaries at __secondary_hold and wait at the spin
496 if (firmware_has_feature(FW_FEATURE_OPAL))
502 * When book3e boots from kexec, the ePAPR spin table does
505 return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
508 void smp_release_cpus(void)
516 /* All secondary cpus are spinning on a common spinloop, release them
517 * all now so they can start to spin on their individual paca
518 * spinloops. For non SMP kernels, the secondary cpus never get out
519 * of the common spinloop.
522 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
524 *ptr = ppc_function_entry(generic_secondary_smp_init);
526 /* And wait a bit for them to catch up */
527 for (i = 0; i < 100000; i++) {
530 if (spinning_secondaries == 0)
534 pr_debug("spinning_secondaries = %d\n", spinning_secondaries);
536 #endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */
539 * Initialize some remaining members of the ppc64_caches and systemcfg
541 * (at least until we get rid of them completely). This is mostly some
542 * cache informations about the CPU that will be used by cache flush
543 * routines and/or provided to userland
546 static void __init init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
551 info->line_size = lsize;
552 info->block_size = bsize;
553 info->log_block_size = __ilog2(bsize);
555 info->blocks_per_page = PAGE_SIZE / bsize;
557 info->blocks_per_page = 0;
560 info->assoc = 0xffff;
562 info->assoc = size / (sets * lsize);
565 static bool __init parse_cache_info(struct device_node *np,
567 struct ppc_cache_info *info)
569 static const char *ipropnames[] __initdata = {
572 "i-cache-block-size",
575 static const char *dpropnames[] __initdata = {
578 "d-cache-block-size",
581 const char **propnames = icache ? ipropnames : dpropnames;
582 const __be32 *sizep, *lsizep, *bsizep, *setsp;
583 u32 size, lsize, bsize, sets;
588 lsize = bsize = cur_cpu_spec->dcache_bsize;
589 sizep = of_get_property(np, propnames[0], NULL);
591 size = be32_to_cpu(*sizep);
592 setsp = of_get_property(np, propnames[1], NULL);
594 sets = be32_to_cpu(*setsp);
595 bsizep = of_get_property(np, propnames[2], NULL);
596 lsizep = of_get_property(np, propnames[3], NULL);
602 lsize = be32_to_cpu(*lsizep);
604 bsize = be32_to_cpu(*bsizep);
605 if (sizep == NULL || bsizep == NULL || lsizep == NULL)
609 * OF is weird .. it represents fully associative caches
610 * as "1 way" which doesn't make much sense and doesn't
611 * leave room for direct mapped. We'll assume that 0
612 * in OF means direct mapped for that reason.
619 init_cache_info(info, size, lsize, bsize, sets);
624 void __init initialize_cache_info(void)
626 struct device_node *cpu = NULL, *l2, *l3 = NULL;
630 * All shipping POWER8 machines have a firmware bug that
631 * puts incorrect information in the device-tree. This will
632 * be (hopefully) fixed for future chips but for now hard
633 * code the values if we are running on one of these
635 pvr = PVR_VER(mfspr(SPRN_PVR));
636 if (pvr == PVR_POWER8 || pvr == PVR_POWER8E ||
637 pvr == PVR_POWER8NVL) {
638 /* size lsize blk sets */
639 init_cache_info(&ppc64_caches.l1i, 0x8000, 128, 128, 32);
640 init_cache_info(&ppc64_caches.l1d, 0x10000, 128, 128, 64);
641 init_cache_info(&ppc64_caches.l2, 0x80000, 128, 0, 512);
642 init_cache_info(&ppc64_caches.l3, 0x800000, 128, 0, 8192);
644 cpu = of_find_node_by_type(NULL, "cpu");
647 * We're assuming *all* of the CPUs have the same
648 * d-cache and i-cache sizes... -Peter
651 if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
652 pr_warn("Argh, can't find dcache properties !\n");
654 if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
655 pr_warn("Argh, can't find icache properties !\n");
658 * Try to find the L2 and L3 if any. Assume they are
659 * unified and use the D-side properties.
661 l2 = of_find_next_cache_node(cpu);
664 parse_cache_info(l2, false, &ppc64_caches.l2);
665 l3 = of_find_next_cache_node(l2);
669 parse_cache_info(l3, false, &ppc64_caches.l3);
674 /* For use by binfmt_elf */
675 dcache_bsize = ppc64_caches.l1d.block_size;
676 icache_bsize = ppc64_caches.l1i.block_size;
678 cur_cpu_spec->dcache_bsize = dcache_bsize;
679 cur_cpu_spec->icache_bsize = icache_bsize;
683 * This returns the limit below which memory accesses to the linear
684 * mapping are guarnateed not to cause an architectural exception (e.g.,
685 * TLB or SLB miss fault).
687 * This is used to allocate PACAs and various interrupt stacks that
688 * that are accessed early in interrupt handlers that must not cause
689 * re-entrant interrupts.
691 __init u64 ppc64_bolted_size(void)
693 #ifdef CONFIG_PPC_BOOK3E_64
694 /* Freescale BookE bolts the entire linear mapping */
695 /* XXX: BookE ppc64_rma_limit setup seems to disagree? */
696 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E))
697 return linear_map_top;
698 /* Other BookE, we assume the first GB is bolted */
701 /* BookS radix, does not take faults on linear mapping */
702 if (early_radix_enabled())
705 /* BookS hash, the first segment is bolted */
706 if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT))
707 return 1UL << SID_SHIFT_1T;
708 return 1UL << SID_SHIFT;
712 static void *__init alloc_stack(unsigned long limit, int cpu)
716 BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
718 ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_ALIGN,
719 MEMBLOCK_LOW_LIMIT, limit,
720 early_cpu_to_node(cpu));
722 panic("cannot allocate stacks");
727 void __init irqstack_early_init(void)
729 u64 limit = ppc64_bolted_size();
733 * Interrupt stacks must be in the first segment since we
734 * cannot afford to take SLB misses on them. They are not
735 * accessed in realmode.
737 for_each_possible_cpu(i) {
738 softirq_ctx[i] = alloc_stack(limit, i);
739 hardirq_ctx[i] = alloc_stack(limit, i);
743 #ifdef CONFIG_PPC_BOOK3E_64
744 void __init exc_lvl_early_init(void)
748 for_each_possible_cpu(i) {
751 sp = alloc_stack(ULONG_MAX, i);
753 paca_ptrs[i]->crit_kstack = sp + THREAD_SIZE;
755 sp = alloc_stack(ULONG_MAX, i);
757 paca_ptrs[i]->dbg_kstack = sp + THREAD_SIZE;
759 sp = alloc_stack(ULONG_MAX, i);
760 mcheckirq_ctx[i] = sp;
761 paca_ptrs[i]->mc_kstack = sp + THREAD_SIZE;
764 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
765 patch_exception(0x040, exc_debug_debug_book3e);
770 * Stack space used when we detect a bad kernel stack pointer, and
771 * early in SMP boots before relocation is enabled. Exclusive emergency
772 * stack for machine checks.
774 void __init emergency_stack_init(void)
776 u64 limit, mce_limit;
780 * Emergency stacks must be under 256MB, we cannot afford to take
781 * SLB misses on them. The ABI also requires them to be 128-byte
784 * Since we use these as temporary stacks during secondary CPU
785 * bringup, machine check, system reset, and HMI, we need to get
786 * at them in real mode. This means they must also be within the RMO
789 * The IRQ stacks allocated elsewhere in this file are zeroed and
790 * initialized in kernel/irq.c. These are initialized here in order
791 * to have emergency stacks available as early as possible.
793 limit = mce_limit = min(ppc64_bolted_size(), ppc64_rma_size);
796 * Machine check on pseries calls rtas, but can't use the static
797 * rtas_args due to a machine check hitting while the lock is held.
798 * rtas args have to be under 4GB, so the machine check stack is
799 * limited to 4GB so args can be put on stack.
801 if (firmware_has_feature(FW_FEATURE_LPAR) && mce_limit > SZ_4G)
804 for_each_possible_cpu(i) {
805 paca_ptrs[i]->emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
807 #ifdef CONFIG_PPC_BOOK3S_64
808 /* emergency stack for NMI exception handling. */
809 paca_ptrs[i]->nmi_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
811 /* emergency stack for machine check exception handling. */
812 paca_ptrs[i]->mc_emergency_sp = alloc_stack(mce_limit, i) + THREAD_SIZE;
818 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
820 if (early_cpu_to_node(from) == early_cpu_to_node(to))
821 return LOCAL_DISTANCE;
823 return REMOTE_DISTANCE;
826 static __init int pcpu_cpu_to_node(int cpu)
828 return early_cpu_to_node(cpu);
831 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
832 EXPORT_SYMBOL(__per_cpu_offset);
834 void __init setup_per_cpu_areas(void)
836 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
843 * BookE and BookS radix are historical values and should be revisited.
845 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) {
847 } else if (radix_enabled()) {
848 atom_size = PAGE_SIZE;
849 } else if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU)) {
851 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
852 * to group units. For larger mappings, use 1M atom which
853 * should be large enough to contain a number of units.
855 if (mmu_linear_psize == MMU_PAGE_4K)
856 atom_size = PAGE_SIZE;
861 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
862 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
865 pr_warn("PERCPU: %s allocator failed (%d), "
866 "falling back to page size\n",
867 pcpu_fc_names[pcpu_chosen_fc], rc);
871 rc = pcpu_page_first_chunk(0, pcpu_cpu_to_node);
873 panic("cannot initialize percpu area (err=%d)", rc);
875 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
876 for_each_possible_cpu(cpu) {
877 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
878 paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu];
883 #ifdef CONFIG_MEMORY_HOTPLUG
884 unsigned long memory_block_size_bytes(void)
886 if (ppc_md.memory_block_size)
887 return ppc_md.memory_block_size();
889 return MIN_MEMORY_BLOCK_SIZE;
893 #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
894 struct ppc_pci_io ppc_pci_io;
895 EXPORT_SYMBOL(ppc_pci_io);
898 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
899 u64 hw_nmi_get_sample_period(int watchdog_thresh)
901 return ppc_proc_freq * watchdog_thresh;
906 * The perf based hardlockup detector breaks PMU event based branches, so
907 * disable it by default. Book3S has a soft-nmi hardlockup detector based
908 * on the decrementer interrupt, so it does not suffer from this problem.
910 * It is likely to get false positives in KVM guests, so disable it there
911 * by default too. PowerVM will not stop or arbitrarily oversubscribe
912 * CPUs, but give a minimum regular allotment even with SPLPAR, so enable
913 * the detector for non-KVM guests, assume PowerVM.
915 static int __init disable_hardlockup_detector(void)
917 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
918 hardlockup_detector_disable();
920 if (firmware_has_feature(FW_FEATURE_LPAR)) {
922 hardlockup_detector_disable();
928 early_initcall(disable_hardlockup_detector);