powerpc/64: Fix task_cpu in early boot when booting non-zero cpuid
[linux-2.6-block.git] / arch / powerpc / kernel / setup_64.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
40ef8cbc
PM
2/*
3 *
4 * Common boot and setup code.
5 *
6 * Copyright (C) 2001 PPC64 Team, IBM Corp
40ef8cbc
PM
7 */
8
4b16f8e2 9#include <linux/export.h>
40ef8cbc
PM
10#include <linux/string.h>
11#include <linux/sched.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/reboot.h>
15#include <linux/delay.h>
16#include <linux/initrd.h>
40ef8cbc
PM
17#include <linux/seq_file.h>
18#include <linux/ioport.h>
19#include <linux/console.h>
20#include <linux/utsname.h>
21#include <linux/tty.h>
22#include <linux/root_dev.h>
23#include <linux/notifier.h>
24#include <linux/cpu.h>
25#include <linux/unistd.h>
26#include <linux/serial.h>
27#include <linux/serial_8250.h>
57c8a661 28#include <linux/memblock.h>
12d04eef 29#include <linux/pci.h>
945feb17 30#include <linux/lockdep.h>
a5d86257 31#include <linux/memory.h>
c54b2bf1 32#include <linux/nmi.h>
65fddcfc 33#include <linux/pgtable.h>
e6f6390a
CL
34#include <linux/of.h>
35#include <linux/of_fdt.h>
a6146888 36
2f5182cf 37#include <asm/asm-prototypes.h>
633c8e98 38#include <asm/kvm_guest.h>
40ef8cbc 39#include <asm/io.h>
0cc4746c 40#include <asm/kdump.h>
40ef8cbc 41#include <asm/processor.h>
40ef8cbc
PM
42#include <asm/smp.h>
43#include <asm/elf.h>
44#include <asm/machdep.h>
45#include <asm/paca.h>
40ef8cbc
PM
46#include <asm/time.h>
47#include <asm/cputable.h>
5a61ef74 48#include <asm/dt_cpu_ftrs.h>
40ef8cbc
PM
49#include <asm/sections.h>
50#include <asm/btext.h>
51#include <asm/nvram.h>
52#include <asm/setup.h>
40ef8cbc
PM
53#include <asm/rtas.h>
54#include <asm/iommu.h>
55#include <asm/serial.h>
56#include <asm/cache.h>
57#include <asm/page.h>
58#include <asm/mmu.h>
40ef8cbc 59#include <asm/firmware.h>
f78541dc 60#include <asm/xmon.h>
dcad47fc 61#include <asm/udbg.h>
593e537b 62#include <asm/kexec.h>
d36b4c4f 63#include <asm/code-patching.h>
5d7c8545 64#include <asm/ftrace.h>
d3cbff1b 65#include <asm/opal.h>
b1923caa 66#include <asm/cputhreads.h>
c2e480ba 67#include <asm/hw_irq.h>
2c86cd18 68#include <asm/feature-fixups.h>
69795cab 69#include <asm/kup.h>
265c3491 70#include <asm/early_ioremap.h>
eb553f16 71#include <asm/pgalloc.h>
40ef8cbc 72
1696d0fb
NP
73#include "setup.h"
74
8246aca7 75int spinning_secondaries;
40ef8cbc
PM
76u64 ppc64_pft_size;
77
dabcafd3 78struct ppc64_caches ppc64_caches = {
e2827fe5
BH
79 .l1d = {
80 .block_size = 0x40,
81 .log_block_size = 6,
82 },
83 .l1i = {
84 .block_size = 0x40,
85 .log_block_size = 6
86 },
dabcafd3 87};
40ef8cbc
PM
88EXPORT_SYMBOL_GPL(ppc64_caches);
89
e0d68273 90#if defined(CONFIG_PPC_BOOK3E_64) && defined(CONFIG_SMP)
b1923caa 91void __init setup_tlb_core_data(void)
28efc35f
SW
92{
93 int cpu;
94
82d86de2
SW
95 BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
96
28efc35f
SW
97 for_each_possible_cpu(cpu) {
98 int first = cpu_first_thread_sibling(cpu);
99
d9e1831a
SW
100 /*
101 * If we boot via kdump on a non-primary thread,
102 * make sure we point at the thread that actually
103 * set up this TLB.
104 */
105 if (cpu_first_thread_sibling(boot_cpuid) == first)
106 first = boot_cpuid;
107
d2e60075 108 paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd;
28efc35f
SW
109
110 /*
111 * If we have threads, we need either tlbsrx.
112 * or e6500 tablewalk mode, or else TLB handlers
113 * will be racy and could produce duplicate entries.
0d2b5cdc 114 * Should we panic instead?
28efc35f 115 */
0d2b5cdc 116 WARN_ONCE(smt_enabled_at_boot >= 2 &&
0d2b5cdc
ME
117 book3e_htw_mode != PPC_HTW_E6500,
118 "%s: unsupported MMU configuration\n", __func__);
28efc35f
SW
119 }
120}
28efc35f
SW
121#endif
122
40ef8cbc
PM
123#ifdef CONFIG_SMP
124
954e6da5 125static char *smt_enabled_cmdline;
40ef8cbc
PM
126
127/* Look for ibm,smt-enabled OF option */
b1923caa 128void __init check_smt_enabled(void)
40ef8cbc
PM
129{
130 struct device_node *dn;
a7f67bdf 131 const char *smt_option;
40ef8cbc 132
954e6da5
NF
133 /* Default to enabling all threads */
134 smt_enabled_at_boot = threads_per_core;
40ef8cbc 135
954e6da5
NF
136 /* Allow the command line to overrule the OF option */
137 if (smt_enabled_cmdline) {
138 if (!strcmp(smt_enabled_cmdline, "on"))
139 smt_enabled_at_boot = threads_per_core;
140 else if (!strcmp(smt_enabled_cmdline, "off"))
141 smt_enabled_at_boot = 0;
142 else {
1618bd53 143 int smt;
954e6da5
NF
144 int rc;
145
1618bd53 146 rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
954e6da5
NF
147 if (!rc)
148 smt_enabled_at_boot =
1618bd53 149 min(threads_per_core, smt);
954e6da5
NF
150 }
151 } else {
152 dn = of_find_node_by_path("/options");
153 if (dn) {
154 smt_option = of_get_property(dn, "ibm,smt-enabled",
155 NULL);
156
157 if (smt_option) {
158 if (!strcmp(smt_option, "on"))
159 smt_enabled_at_boot = threads_per_core;
160 else if (!strcmp(smt_option, "off"))
161 smt_enabled_at_boot = 0;
162 }
163
164 of_node_put(dn);
165 }
166 }
40ef8cbc
PM
167}
168
169/* Look for smt-enabled= cmdline option */
170static int __init early_smt_enabled(char *p)
171{
954e6da5 172 smt_enabled_cmdline = p;
40ef8cbc
PM
173 return 0;
174}
175early_param("smt-enabled", early_smt_enabled);
176
40ef8cbc
PM
177#endif /* CONFIG_SMP */
178
25e13814 179/** Fix up paca fields required for the boot cpu */
519b2e31 180static void __init fixup_boot_paca(struct paca_struct *boot_paca)
25e13814
ME
181{
182 /* The boot cpu is started */
519b2e31 183 boot_paca->cpu_start = 1;
2f5182cf
NP
184#ifdef CONFIG_PPC_BOOK3S_64
185 /*
186 * Give the early boot machine check stack somewhere to use, use
187 * half of the init stack. This is a bit hacky but there should not be
188 * deep stack usage in early init so shouldn't overflow it or overwrite
189 * things.
190 */
519b2e31 191 boot_paca->mc_emergency_sp = (void *)&init_thread_union +
2f5182cf
NP
192 (THREAD_SIZE/2);
193#endif
25e13814 194 /* Allow percpu accesses to work until we setup percpu data */
519b2e31 195 boot_paca->data_offset = 0;
799f7063 196 /* Mark interrupts soft and hard disabled in PACA */
519b2e31
NP
197 boot_paca->irq_soft_mask = IRQS_DISABLED;
198 boot_paca->irq_happened = PACA_IRQ_HARD_DIS;
799f7063 199 WARN_ON(mfmsr() & MSR_EE);
25e13814
ME
200}
201
009776ba 202static void __init configure_exceptions(void)
8f619b54 203{
633440f1 204 /*
d3cbff1b
BH
205 * Setup the trampolines from the lowmem exception vectors
206 * to the kdump kernel when not using a relocatable kernel.
633440f1 207 */
d3cbff1b
BH
208 setup_kdump_trampoline();
209
210 /* Under a PAPR hypervisor, we need hypercalls */
211 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
b5149e22
NP
212 /*
213 * - PR KVM does not support AIL mode interrupts in the host
214 * while a PR guest is running.
215 *
216 * - SCV system call interrupt vectors are only implemented for
217 * AIL mode interrupts.
218 *
219 * - On pseries, AIL mode can only be enabled and disabled
220 * system-wide so when a PR VM is created on a pseries host,
221 * all CPUs of the host are set to AIL=0 mode.
222 *
223 * - Therefore host CPUs must not execute scv while a PR VM
224 * exists.
225 *
226 * - SCV support can not be disabled dynamically because the
227 * feature is advertised to host userspace. Disabling the
228 * facility and emulating it would be possible but is not
229 * implemented.
230 *
231 * - So SCV support is blanket disabled if PR KVM could possibly
232 * run. That is, PR support compiled in, booting on pseries
233 * with hash MMU.
234 */
235 if (IS_ENABLED(CONFIG_KVM_BOOK3S_PR_POSSIBLE) && !radix_enabled()) {
236 init_task.thread.fscr &= ~FSCR_SCV;
237 cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_SCV;
238 }
239
d3cbff1b 240 /* Enable AIL if possible */
7fa95f9a
NP
241 if (!pseries_enable_reloc_on_exc()) {
242 init_task.thread.fscr &= ~FSCR_SCV;
243 cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_SCV;
244 }
d3cbff1b
BH
245
246 /*
247 * Tell the hypervisor that we want our exceptions to
248 * be taken in little endian mode.
249 *
250 * We don't call this for big endian as our calling convention
251 * makes us always enter in BE, and the call may fail under
252 * some circumstances with kdump.
253 */
254#ifdef __LITTLE_ENDIAN__
255 pseries_little_endian_exceptions();
256#endif
257 } else {
258 /* Set endian mode using OPAL */
259 if (firmware_has_feature(FW_FEATURE_OPAL))
260 opal_configure_cores();
261
c0a36013 262 /* AIL on native is done in cpu_ready_for_interrupts() */
8f619b54
BH
263 }
264}
265
d3cbff1b
BH
266static void cpu_ready_for_interrupts(void)
267{
c0a36013
BH
268 /*
269 * Enable AIL if supported, and we are in hypervisor mode. This
270 * is called once for every processor.
271 *
272 * If we are not in hypervisor mode the job is done once for
273 * the whole partition in configure_exceptions().
274 */
49c1d07f 275 if (cpu_has_feature(CPU_FTR_HVMODE)) {
c0a36013 276 unsigned long lpcr = mfspr(SPRN_LPCR);
49c1d07f
NP
277 unsigned long new_lpcr = lpcr;
278
279 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
280 /* P10 DD1 does not have HAIL */
281 if (pvr_version_is(PVR_POWER10) &&
282 (mfspr(SPRN_PVR) & 0xf00) == 0x100)
283 new_lpcr |= LPCR_AIL_3;
284 else
285 new_lpcr |= LPCR_HAIL;
286 } else if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
287 new_lpcr |= LPCR_AIL_3;
288 }
289
290 if (new_lpcr != lpcr)
291 mtspr(SPRN_LPCR, new_lpcr);
c0a36013
BH
292 }
293
7ed23e1b 294 /*
dd9a8c5a
MN
295 * Set HFSCR:TM based on CPU features:
296 * In the special case of TM no suspend (P9N DD2.1), Linux is
297 * told TM is off via the dt-ftrs but told to (partially) use
298 * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM]
299 * will be off from dt-ftrs but we need to turn it on for the
300 * no suspend case.
7ed23e1b 301 */
dd9a8c5a
MN
302 if (cpu_has_feature(CPU_FTR_HVMODE)) {
303 if (cpu_has_feature(CPU_FTR_TM_COMP))
304 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM);
305 else
306 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
307 }
7ed23e1b 308
d3cbff1b
BH
309 /* Set IR and DR in PACA MSR */
310 get_paca()->kernel_msr = MSR_KERNEL;
311}
312
c0abd0c7
NP
313unsigned long spr_default_dscr = 0;
314
692e5928 315static void __init record_spr_defaults(void)
c0abd0c7
NP
316{
317 if (early_cpu_has_feature(CPU_FTR_DSCR))
318 spr_default_dscr = mfspr(SPRN_DSCR);
319}
320
40ef8cbc
PM
321/*
322 * Early initialization entry point. This is called by head.S
323 * with MMU translation disabled. We rely on the "feature" of
324 * the CPU that ignores the top 2 bits of the address in real
325 * mode so we can access kernel globals normally provided we
326 * only toy with things in the RMO region. From here, we do
95f72d1e 327 * some early parsing of the device-tree to setup out MEMBLOCK
40ef8cbc
PM
328 * data structures, and allocate & initialize the hash table
329 * and segment tables so we can start running with translation
330 * enabled.
331 *
332 * It is this function which will call the probe() callback of
333 * the various platform types and copy the matching one to the
334 * global ppc_md structure. Your platform can eventually do
335 * some very early initializations from the probe() routine, but
336 * this is not recommended, be very careful as, for example, the
337 * device-tree is not accessible via normal means at this point.
338 */
339
a7223f5b 340void __init early_setup(unsigned long dt_ptr)
40ef8cbc 341{
6a7e4064
GL
342 static __initdata struct paca_struct boot_paca;
343
24d96495
BH
344 /* -------- printk is _NOT_ safe to use here ! ------- */
345
d4a8e986
DA
346 /*
347 * Assume we're on cpu 0 for now.
348 *
349 * We need to load a PACA very early for a few reasons.
350 *
351 * The stack protector canary is stored in the paca, so as soon as we
352 * call any stack protected code we need r13 pointing somewhere valid.
353 *
354 * If we are using kcov it will call in_task() in its instrumentation,
355 * which relies on the current task from the PACA.
356 *
357 * dt_cpu_ftrs_init() calls into generic OF/fdt code, as well as
358 * printk(), which can trigger both stack protector and kcov.
359 *
360 * percpu variables and spin locks also use the paca.
361 *
362 * So set up a temporary paca. It will be replaced below once we know
363 * what CPU we are on.
364 */
1426d5a3 365 initialise_paca(&boot_paca, 0);
519b2e31 366 fixup_boot_paca(&boot_paca);
e1100cee 367 WARN_ON(local_paca != 0);
519b2e31 368 setup_paca(&boot_paca); /* install the paca into registers */
33dbcf72 369
24d96495
BH
370 /* -------- printk is now safe to use ------- */
371
2f5182cf
NP
372 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && (mfmsr() & MSR_HV))
373 enable_machine_check();
374
d4a8e986
DA
375 /* Try new device tree based feature discovery ... */
376 if (!dt_cpu_ftrs_init(__va(dt_ptr)))
377 /* Otherwise use the old style CPU table */
378 identify_cpu(0, mfspr(SPRN_PVR));
379
f2fd2513
BH
380 /* Enable early debugging if any specified (see udbg.h) */
381 udbg_early_init();
382
3b9176e9 383 udbg_printf(" -> %s(), dt_ptr: 0x%lx\n", __func__, dt_ptr);
40ef8cbc 384
40ef8cbc 385 /*
3c607ce2
LV
386 * Do early initialization using the flattened device
387 * tree, such as retrieving the physical memory map or
388 * calculating/retrieving the hash table size.
40ef8cbc
PM
389 */
390 early_init_devtree(__va(dt_ptr));
391
4df20460 392 /* Now we know the logical id of our boot cpu, setup the paca. */
4890aea6
NP
393 if (boot_cpuid != 0) {
394 /* Poison paca_ptrs[0] again if it's not the boot cpu */
395 memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0]));
396 }
519b2e31
NP
397 fixup_boot_paca(paca_ptrs[boot_cpuid]);
398 setup_paca(paca_ptrs[boot_cpuid]); /* install the paca into registers */
9fa24404
NP
399 // smp_processor_id() now reports boot_cpuid
400
401#ifdef CONFIG_SMP
402 task_thread_info(current)->cpu = boot_cpuid; // fix task_cpu(current)
403#endif
4df20460 404
63c254a5 405 /*
d3cbff1b
BH
406 * Configure exception handlers. This include setting up trampolines
407 * if needed, setting exception endian mode, etc...
63c254a5 408 */
d3cbff1b 409 configure_exceptions();
0cc4746c 410
69795cab
CL
411 /*
412 * Configure Kernel Userspace Protection. This needs to happen before
413 * feature fixups for platforms that implement this using features.
414 */
415 setup_kup();
416
c4bd6cb8
BH
417 /* Apply all the dynamic patching */
418 apply_feature_fixups();
97f6e0cc 419 setup_feature_keys();
c4bd6cb8 420
9e8066f3
ME
421 /* Initialize the hash table or TLB handling */
422 early_init_mmu();
423
e2f5efd0
AK
424 early_ioremap_setup();
425
1696d0fb
NP
426 /*
427 * After firmware and early platform setup code has set things up,
428 * we note the SPR values for configurable control/performance
429 * registers, and use those as initial defaults.
430 */
431 record_spr_defaults();
432
a944a9c4
BH
433 /*
434 * At this point, we can let interrupts switch to virtual mode
435 * (the MMU has been setup), so adjust the MSR in the PACA to
8f619b54 436 * have IR and DR set and enable AIL if it exists
a944a9c4 437 */
8f619b54 438 cpu_ready_for_interrupts();
a944a9c4 439
d1039786
NR
440 /*
441 * We enable ftrace here, but since we only support DYNAMIC_FTRACE, it
442 * will only actually get enabled on the boot cpu much later once
443 * ftrace itself has been initialized.
444 */
445 this_cpu_enable_ftrace();
446
3b9176e9 447 udbg_printf(" <- %s()\n", __func__);
7191b615
BH
448
449#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
450 /*
3b9176e9 451 * This needs to be done *last* (after the above udbg_printf() even)
7191b615
BH
452 *
453 * Right after we return from this function, we turn on the MMU
454 * which means the real-mode access trick that btext does will
455 * no longer work, it needs to switch to using a real MMU
456 * mapping. This call will ensure that it does
457 */
458 btext_map();
459#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
40ef8cbc
PM
460}
461
799d6046
PM
462#ifdef CONFIG_SMP
463void early_setup_secondary(void)
464{
103b7827 465 /* Mark interrupts disabled in PACA */
4e26bc4a 466 irq_soft_mask_set(IRQS_DISABLED);
799d6046 467
757c74d2
BH
468 /* Initialize the hash table or TLB handling */
469 early_init_mmu_secondary();
a944a9c4 470
b28c9750
RC
471 /* Perform any KUP setup that is per-cpu */
472 setup_kup();
473
a944a9c4
BH
474 /*
475 * At this point, we can let interrupts switch to virtual mode
476 * (the MMU has been setup), so adjust the MSR in the PACA to
477 * have IR and DR set.
478 */
8f619b54 479 cpu_ready_for_interrupts();
799d6046
PM
480}
481
482#endif /* CONFIG_SMP */
40ef8cbc 483
8c1aef6a
NP
484void panic_smp_self_stop(void)
485{
486 hard_irq_disable();
487 spin_begin();
488 while (1)
489 spin_cpu_relax();
490}
491
da665885 492#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
567cf94d
SW
493static bool use_spinloop(void)
494{
339a3293
NP
495 if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
496 /*
497 * See comments in head_64.S -- not all platforms insert
498 * secondaries at __secondary_hold and wait at the spin
499 * loop.
500 */
501 if (firmware_has_feature(FW_FEATURE_OPAL))
502 return false;
567cf94d 503 return true;
339a3293 504 }
567cf94d
SW
505
506 /*
507 * When book3e boots from kexec, the ePAPR spin table does
508 * not get used.
509 */
510 return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
511}
512
b8f51021
ME
513void smp_release_cpus(void)
514{
758438a7 515 unsigned long *ptr;
9d07bc84 516 int i;
b8f51021 517
567cf94d
SW
518 if (!use_spinloop())
519 return;
520
b8f51021
ME
521 /* All secondary cpus are spinning on a common spinloop, release them
522 * all now so they can start to spin on their individual paca
523 * spinloops. For non SMP kernels, the secondary cpus never get out
524 * of the common spinloop.
1f6a93e4 525 */
b8f51021 526
758438a7
ME
527 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
528 - PHYSICAL_START);
2751b628 529 *ptr = ppc_function_entry(generic_secondary_smp_init);
9d07bc84
BH
530
531 /* And wait a bit for them to catch up */
532 for (i = 0; i < 100000; i++) {
533 mb();
534 HMT_low();
7ac87abb 535 if (spinning_secondaries == 0)
9d07bc84
BH
536 break;
537 udelay(1);
538 }
3b9176e9 539 pr_debug("spinning_secondaries = %d\n", spinning_secondaries);
b8f51021 540}
da665885 541#endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */
b8f51021 542
40ef8cbc 543/*
799d6046
PM
544 * Initialize some remaining members of the ppc64_caches and systemcfg
545 * structures
40ef8cbc
PM
546 * (at least until we get rid of them completely). This is mostly some
547 * cache informations about the CPU that will be used by cache flush
548 * routines and/or provided to userland
549 */
e2827fe5 550
d276960d 551static void __init init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
e2827fe5
BH
552 u32 bsize, u32 sets)
553{
554 info->size = size;
555 info->sets = sets;
556 info->line_size = lsize;
557 info->block_size = bsize;
558 info->log_block_size = __ilog2(bsize);
6ba422c7
AB
559 if (bsize)
560 info->blocks_per_page = PAGE_SIZE / bsize;
561 else
562 info->blocks_per_page = 0;
98a5f361
BH
563
564 if (sets == 0)
565 info->assoc = 0xffff;
566 else
567 info->assoc = size / (sets * lsize);
e2827fe5
BH
568}
569
570static bool __init parse_cache_info(struct device_node *np,
571 bool icache,
572 struct ppc_cache_info *info)
573{
574 static const char *ipropnames[] __initdata = {
575 "i-cache-size",
576 "i-cache-sets",
577 "i-cache-block-size",
578 "i-cache-line-size",
579 };
580 static const char *dpropnames[] __initdata = {
581 "d-cache-size",
582 "d-cache-sets",
583 "d-cache-block-size",
584 "d-cache-line-size",
585 };
586 const char **propnames = icache ? ipropnames : dpropnames;
587 const __be32 *sizep, *lsizep, *bsizep, *setsp;
588 u32 size, lsize, bsize, sets;
589 bool success = true;
590
591 size = 0;
592 sets = -1u;
593 lsize = bsize = cur_cpu_spec->dcache_bsize;
594 sizep = of_get_property(np, propnames[0], NULL);
595 if (sizep != NULL)
596 size = be32_to_cpu(*sizep);
597 setsp = of_get_property(np, propnames[1], NULL);
598 if (setsp != NULL)
599 sets = be32_to_cpu(*setsp);
600 bsizep = of_get_property(np, propnames[2], NULL);
601 lsizep = of_get_property(np, propnames[3], NULL);
602 if (bsizep == NULL)
603 bsizep = lsizep;
94c0b013
CP
604 if (lsizep == NULL)
605 lsizep = bsizep;
e2827fe5
BH
606 if (lsizep != NULL)
607 lsize = be32_to_cpu(*lsizep);
608 if (bsizep != NULL)
609 bsize = be32_to_cpu(*bsizep);
610 if (sizep == NULL || bsizep == NULL || lsizep == NULL)
611 success = false;
612
613 /*
614 * OF is weird .. it represents fully associative caches
615 * as "1 way" which doesn't make much sense and doesn't
616 * leave room for direct mapped. We'll assume that 0
617 * in OF means direct mapped for that reason.
618 */
619 if (sets == 1)
620 sets = 0;
621 else if (sets == 0)
622 sets = 1;
623
624 init_cache_info(info, size, lsize, bsize, sets);
625
626 return success;
627}
628
b1923caa 629void __init initialize_cache_info(void)
40ef8cbc 630{
608b4214
BH
631 struct device_node *cpu = NULL, *l2, *l3 = NULL;
632 u32 pvr;
40ef8cbc 633
608b4214
BH
634 /*
635 * All shipping POWER8 machines have a firmware bug that
636 * puts incorrect information in the device-tree. This will
637 * be (hopefully) fixed for future chips but for now hard
638 * code the values if we are running on one of these
639 */
640 pvr = PVR_VER(mfspr(SPRN_PVR));
641 if (pvr == PVR_POWER8 || pvr == PVR_POWER8E ||
642 pvr == PVR_POWER8NVL) {
643 /* size lsize blk sets */
644 init_cache_info(&ppc64_caches.l1i, 0x8000, 128, 128, 32);
645 init_cache_info(&ppc64_caches.l1d, 0x10000, 128, 128, 64);
646 init_cache_info(&ppc64_caches.l2, 0x80000, 128, 0, 512);
647 init_cache_info(&ppc64_caches.l3, 0x800000, 128, 0, 8192);
648 } else
649 cpu = of_find_node_by_type(NULL, "cpu");
40ef8cbc 650
e2827fe5
BH
651 /*
652 * We're assuming *all* of the CPUs have the same
653 * d-cache and i-cache sizes... -Peter
654 */
65e01f38
BH
655 if (cpu) {
656 if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
3b9176e9 657 pr_warn("Argh, can't find dcache properties !\n");
e2827fe5 658
65e01f38 659 if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
3b9176e9 660 pr_warn("Argh, can't find icache properties !\n");
65e01f38
BH
661
662 /*
663 * Try to find the L2 and L3 if any. Assume they are
664 * unified and use the D-side properties.
665 */
666 l2 = of_find_next_cache_node(cpu);
667 of_node_put(cpu);
668 if (l2) {
669 parse_cache_info(l2, false, &ppc64_caches.l2);
670 l3 = of_find_next_cache_node(l2);
671 of_node_put(l2);
672 }
673 if (l3) {
674 parse_cache_info(l3, false, &ppc64_caches.l3);
675 of_node_put(l3);
676 }
40ef8cbc
PM
677 }
678
9df549af 679 /* For use by binfmt_elf */
e2827fe5
BH
680 dcache_bsize = ppc64_caches.l1d.block_size;
681 icache_bsize = ppc64_caches.l1i.block_size;
9df549af 682
5a61ef74
NP
683 cur_cpu_spec->dcache_bsize = dcache_bsize;
684 cur_cpu_spec->icache_bsize = icache_bsize;
40ef8cbc
PM
685}
686
1af19331
NP
687/*
688 * This returns the limit below which memory accesses to the linear
689 * mapping are guarnateed not to cause an architectural exception (e.g.,
690 * TLB or SLB miss fault).
691 *
692 * This is used to allocate PACAs and various interrupt stacks that
693 * that are accessed early in interrupt handlers that must not cause
694 * re-entrant interrupts.
40bd587a 695 */
1af19331 696__init u64 ppc64_bolted_size(void)
095c7965 697{
e0d68273 698#ifdef CONFIG_PPC_BOOK3E_64
40bd587a 699 /* Freescale BookE bolts the entire linear mapping */
1af19331
NP
700 /* XXX: BookE ppc64_rma_limit setup seems to disagree? */
701 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E))
40bd587a
BH
702 return linear_map_top;
703 /* Other BookE, we assume the first GB is bolted */
704 return 1ul << 30;
705#else
1af19331 706 /* BookS radix, does not take faults on linear mapping */
d5507190
NP
707 if (early_radix_enabled())
708 return ULONG_MAX;
709
1af19331
NP
710 /* BookS hash, the first segment is bolted */
711 if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT))
095c7965 712 return 1UL << SID_SHIFT_1T;
095c7965 713 return 1UL << SID_SHIFT;
40bd587a 714#endif
095c7965
AB
715}
716
f3865f9a
NP
717static void *__init alloc_stack(unsigned long limit, int cpu)
718{
c8e409a3 719 void *ptr;
f3865f9a 720
66f93c5a
NP
721 BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
722
63289e7d 723 ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_ALIGN,
c8e409a3
CL
724 MEMBLOCK_LOW_LIMIT, limit,
725 early_cpu_to_node(cpu));
726 if (!ptr)
727 panic("cannot allocate stacks");
f3865f9a 728
c8e409a3 729 return ptr;
f3865f9a
NP
730}
731
b1923caa 732void __init irqstack_early_init(void)
40ef8cbc 733{
1af19331 734 u64 limit = ppc64_bolted_size();
40ef8cbc
PM
735 unsigned int i;
736
737 /*
8f4da26e 738 * Interrupt stacks must be in the first segment since we
d5507190
NP
739 * cannot afford to take SLB misses on them. They are not
740 * accessed in realmode.
40ef8cbc 741 */
0e551954 742 for_each_possible_cpu(i) {
f3865f9a
NP
743 softirq_ctx[i] = alloc_stack(limit, i);
744 hardirq_ctx[i] = alloc_stack(limit, i);
40ef8cbc
PM
745 }
746}
40ef8cbc 747
e0d68273 748#ifdef CONFIG_PPC_BOOK3E_64
b1923caa 749void __init exc_lvl_early_init(void)
2d27cfd3
BH
750{
751 unsigned int i;
752
753 for_each_possible_cpu(i) {
f3865f9a
NP
754 void *sp;
755
756 sp = alloc_stack(ULONG_MAX, i);
757 critirq_ctx[i] = sp;
758 paca_ptrs[i]->crit_kstack = sp + THREAD_SIZE;
160c7324 759
f3865f9a
NP
760 sp = alloc_stack(ULONG_MAX, i);
761 dbgirq_ctx[i] = sp;
762 paca_ptrs[i]->dbg_kstack = sp + THREAD_SIZE;
160c7324 763
f3865f9a
NP
764 sp = alloc_stack(ULONG_MAX, i);
765 mcheckirq_ctx[i] = sp;
766 paca_ptrs[i]->mc_kstack = sp + THREAD_SIZE;
2d27cfd3 767 }
d36b4c4f
KG
768
769 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
565c2f24 770 patch_exception(0x040, exc_debug_debug_book3e);
2d27cfd3 771}
2d27cfd3
BH
772#endif
773
40ef8cbc
PM
774/*
775 * Stack space used when we detect a bad kernel stack pointer, and
729b0f71
MS
776 * early in SMP boots before relocation is enabled. Exclusive emergency
777 * stack for machine checks.
40ef8cbc 778 */
b1923caa 779void __init emergency_stack_init(void)
40ef8cbc 780{
d2cbbd45 781 u64 limit, mce_limit;
40ef8cbc
PM
782 unsigned int i;
783
784 /*
785 * Emergency stacks must be under 256MB, we cannot afford to take
786 * SLB misses on them. The ABI also requires them to be 128-byte
787 * aligned.
788 *
789 * Since we use these as temporary stacks during secondary CPU
d5507190
NP
790 * bringup, machine check, system reset, and HMI, we need to get
791 * at them in real mode. This means they must also be within the RMO
792 * region.
34f19ff1
NP
793 *
794 * The IRQ stacks allocated elsewhere in this file are zeroed and
795 * initialized in kernel/irq.c. These are initialized here in order
796 * to have emergency stacks available as early as possible.
40ef8cbc 797 */
d2cbbd45
NP
798 limit = mce_limit = min(ppc64_bolted_size(), ppc64_rma_size);
799
800 /*
801 * Machine check on pseries calls rtas, but can't use the static
802 * rtas_args due to a machine check hitting while the lock is held.
803 * rtas args have to be under 4GB, so the machine check stack is
804 * limited to 4GB so args can be put on stack.
805 */
806 if (firmware_has_feature(FW_FEATURE_LPAR) && mce_limit > SZ_4G)
807 mce_limit = SZ_4G;
40ef8cbc 808
3243d874 809 for_each_possible_cpu(i) {
d608898a 810 paca_ptrs[i]->emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
729b0f71
MS
811
812#ifdef CONFIG_PPC_BOOK3S_64
b1ee8a3d 813 /* emergency stack for NMI exception handling. */
d608898a 814 paca_ptrs[i]->nmi_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
b1ee8a3d 815
729b0f71 816 /* emergency stack for machine check exception handling. */
d2cbbd45 817 paca_ptrs[i]->mc_emergency_sp = alloc_stack(mce_limit, i) + THREAD_SIZE;
729b0f71 818#endif
3243d874 819 }
40ef8cbc
PM
820}
821
7a0268fa 822#ifdef CONFIG_SMP
c2a7e818
TH
823static int pcpu_cpu_distance(unsigned int from, unsigned int to)
824{
ba4a648f 825 if (early_cpu_to_node(from) == early_cpu_to_node(to))
c2a7e818
TH
826 return LOCAL_DISTANCE;
827 else
828 return REMOTE_DISTANCE;
829}
830
1ca3fb3a 831static __init int pcpu_cpu_to_node(int cpu)
eb553f16 832{
1ca3fb3a 833 return early_cpu_to_node(cpu);
eb553f16
AK
834}
835
ae01f84b
AB
836unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
837EXPORT_SYMBOL(__per_cpu_offset);
eb553f16 838
c2a7e818
TH
839void __init setup_per_cpu_areas(void)
840{
841 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
842 size_t atom_size;
843 unsigned long delta;
844 unsigned int cpu;
eb553f16 845 int rc = -EINVAL;
c2a7e818
TH
846
847 /*
ffbe5d21 848 * BookE and BookS radix are historical values and should be revisited.
c2a7e818 849 */
e0d68273 850 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) {
ffbe5d21
NP
851 atom_size = SZ_1M;
852 } else if (radix_enabled()) {
c2a7e818 853 atom_size = PAGE_SIZE;
387e220a 854 } else if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU)) {
ffbe5d21
NP
855 /*
856 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
857 * to group units. For larger mappings, use 1M atom which
858 * should be large enough to contain a number of units.
859 */
860 if (mmu_linear_psize == MMU_PAGE_4K)
861 atom_size = PAGE_SIZE;
862 else
863 atom_size = SZ_1M;
864 }
c2a7e818 865
eb553f16
AK
866 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
867 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
23f91716 868 pcpu_cpu_to_node);
eb553f16
AK
869 if (rc)
870 pr_warn("PERCPU: %s allocator failed (%d), "
871 "falling back to page size\n",
872 pcpu_fc_names[pcpu_chosen_fc], rc);
873 }
874
875 if (rc < 0)
20c03576 876 rc = pcpu_page_first_chunk(0, pcpu_cpu_to_node);
c2a7e818
TH
877 if (rc < 0)
878 panic("cannot initialize percpu area (err=%d)", rc);
879
880 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
ae01f84b
AB
881 for_each_possible_cpu(cpu) {
882 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
d2e60075 883 paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu];
ae01f84b 884 }
7a0268fa
AB
885}
886#endif
4cb3cee0 887
50f9481e 888#ifdef CONFIG_MEMORY_HOTPLUG
a5d86257
AB
889unsigned long memory_block_size_bytes(void)
890{
891 if (ppc_md.memory_block_size)
892 return ppc_md.memory_block_size();
893
894 return MIN_MEMORY_BLOCK_SIZE;
895}
896#endif
4cb3cee0 897
ecd73cc5 898#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
4cb3cee0
BH
899struct ppc_pci_io ppc_pci_io;
900EXPORT_SYMBOL(ppc_pci_io);
ecd73cc5 901#endif
70412c55
NP
902
903#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
904u64 hw_nmi_get_sample_period(int watchdog_thresh)
905{
906 return ppc_proc_freq * watchdog_thresh;
907}
908#endif
909
910/*
911 * The perf based hardlockup detector breaks PMU event based branches, so
912 * disable it by default. Book3S has a soft-nmi hardlockup detector based
913 * on the decrementer interrupt, so it does not suffer from this problem.
914 *
633c8e98
NP
915 * It is likely to get false positives in KVM guests, so disable it there
916 * by default too. PowerVM will not stop or arbitrarily oversubscribe
917 * CPUs, but give a minimum regular allotment even with SPLPAR, so enable
918 * the detector for non-KVM guests, assume PowerVM.
70412c55
NP
919 */
920static int __init disable_hardlockup_detector(void)
921{
922#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
923 hardlockup_detector_disable();
924#else
633c8e98
NP
925 if (firmware_has_feature(FW_FEATURE_LPAR)) {
926 if (is_kvm_guest())
927 hardlockup_detector_disable();
928 }
70412c55
NP
929#endif
930
931 return 0;
932}
933early_initcall(disable_hardlockup_detector);