powerpc/fixmap: Use __fix_to_virt() instead of fix_to_virt()
[linux-2.6-block.git] / arch / powerpc / kernel / setup_64.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
40ef8cbc
PM
2/*
3 *
4 * Common boot and setup code.
5 *
6 * Copyright (C) 2001 PPC64 Team, IBM Corp
40ef8cbc
PM
7 */
8
4b16f8e2 9#include <linux/export.h>
40ef8cbc
PM
10#include <linux/string.h>
11#include <linux/sched.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/reboot.h>
15#include <linux/delay.h>
16#include <linux/initrd.h>
40ef8cbc
PM
17#include <linux/seq_file.h>
18#include <linux/ioport.h>
19#include <linux/console.h>
20#include <linux/utsname.h>
21#include <linux/tty.h>
22#include <linux/root_dev.h>
23#include <linux/notifier.h>
24#include <linux/cpu.h>
25#include <linux/unistd.h>
26#include <linux/serial.h>
27#include <linux/serial_8250.h>
57c8a661 28#include <linux/memblock.h>
12d04eef 29#include <linux/pci.h>
945feb17 30#include <linux/lockdep.h>
a5d86257 31#include <linux/memory.h>
c54b2bf1 32#include <linux/nmi.h>
a6146888 33
236003e6 34#include <asm/debugfs.h>
40ef8cbc 35#include <asm/io.h>
0cc4746c 36#include <asm/kdump.h>
40ef8cbc
PM
37#include <asm/prom.h>
38#include <asm/processor.h>
39#include <asm/pgtable.h>
40ef8cbc
PM
40#include <asm/smp.h>
41#include <asm/elf.h>
42#include <asm/machdep.h>
43#include <asm/paca.h>
40ef8cbc
PM
44#include <asm/time.h>
45#include <asm/cputable.h>
5a61ef74 46#include <asm/dt_cpu_ftrs.h>
40ef8cbc
PM
47#include <asm/sections.h>
48#include <asm/btext.h>
49#include <asm/nvram.h>
50#include <asm/setup.h>
40ef8cbc
PM
51#include <asm/rtas.h>
52#include <asm/iommu.h>
53#include <asm/serial.h>
54#include <asm/cache.h>
55#include <asm/page.h>
56#include <asm/mmu.h>
40ef8cbc 57#include <asm/firmware.h>
f78541dc 58#include <asm/xmon.h>
dcad47fc 59#include <asm/udbg.h>
593e537b 60#include <asm/kexec.h>
d36b4c4f 61#include <asm/code-patching.h>
5d31a96e 62#include <asm/livepatch.h>
d3cbff1b 63#include <asm/opal.h>
b1923caa 64#include <asm/cputhreads.h>
c2e480ba 65#include <asm/hw_irq.h>
2c86cd18 66#include <asm/feature-fixups.h>
69795cab 67#include <asm/kup.h>
40ef8cbc 68
1696d0fb
NP
69#include "setup.h"
70
8246aca7 71int spinning_secondaries;
40ef8cbc
PM
72u64 ppc64_pft_size;
73
dabcafd3 74struct ppc64_caches ppc64_caches = {
e2827fe5
BH
75 .l1d = {
76 .block_size = 0x40,
77 .log_block_size = 6,
78 },
79 .l1i = {
80 .block_size = 0x40,
81 .log_block_size = 6
82 },
dabcafd3 83};
40ef8cbc
PM
84EXPORT_SYMBOL_GPL(ppc64_caches);
85
28efc35f 86#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
b1923caa 87void __init setup_tlb_core_data(void)
28efc35f
SW
88{
89 int cpu;
90
82d86de2
SW
91 BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
92
28efc35f
SW
93 for_each_possible_cpu(cpu) {
94 int first = cpu_first_thread_sibling(cpu);
95
d9e1831a
SW
96 /*
97 * If we boot via kdump on a non-primary thread,
98 * make sure we point at the thread that actually
99 * set up this TLB.
100 */
101 if (cpu_first_thread_sibling(boot_cpuid) == first)
102 first = boot_cpuid;
103
d2e60075 104 paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd;
28efc35f
SW
105
106 /*
107 * If we have threads, we need either tlbsrx.
108 * or e6500 tablewalk mode, or else TLB handlers
109 * will be racy and could produce duplicate entries.
0d2b5cdc 110 * Should we panic instead?
28efc35f 111 */
0d2b5cdc
ME
112 WARN_ONCE(smt_enabled_at_boot >= 2 &&
113 !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
114 book3e_htw_mode != PPC_HTW_E6500,
115 "%s: unsupported MMU configuration\n", __func__);
28efc35f
SW
116 }
117}
28efc35f
SW
118#endif
119
40ef8cbc
PM
120#ifdef CONFIG_SMP
121
954e6da5 122static char *smt_enabled_cmdline;
40ef8cbc
PM
123
124/* Look for ibm,smt-enabled OF option */
b1923caa 125void __init check_smt_enabled(void)
40ef8cbc
PM
126{
127 struct device_node *dn;
a7f67bdf 128 const char *smt_option;
40ef8cbc 129
954e6da5
NF
130 /* Default to enabling all threads */
131 smt_enabled_at_boot = threads_per_core;
40ef8cbc 132
954e6da5
NF
133 /* Allow the command line to overrule the OF option */
134 if (smt_enabled_cmdline) {
135 if (!strcmp(smt_enabled_cmdline, "on"))
136 smt_enabled_at_boot = threads_per_core;
137 else if (!strcmp(smt_enabled_cmdline, "off"))
138 smt_enabled_at_boot = 0;
139 else {
1618bd53 140 int smt;
954e6da5
NF
141 int rc;
142
1618bd53 143 rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
954e6da5
NF
144 if (!rc)
145 smt_enabled_at_boot =
1618bd53 146 min(threads_per_core, smt);
954e6da5
NF
147 }
148 } else {
149 dn = of_find_node_by_path("/options");
150 if (dn) {
151 smt_option = of_get_property(dn, "ibm,smt-enabled",
152 NULL);
153
154 if (smt_option) {
155 if (!strcmp(smt_option, "on"))
156 smt_enabled_at_boot = threads_per_core;
157 else if (!strcmp(smt_option, "off"))
158 smt_enabled_at_boot = 0;
159 }
160
161 of_node_put(dn);
162 }
163 }
40ef8cbc
PM
164}
165
166/* Look for smt-enabled= cmdline option */
167static int __init early_smt_enabled(char *p)
168{
954e6da5 169 smt_enabled_cmdline = p;
40ef8cbc
PM
170 return 0;
171}
172early_param("smt-enabled", early_smt_enabled);
173
40ef8cbc
PM
174#endif /* CONFIG_SMP */
175
25e13814 176/** Fix up paca fields required for the boot cpu */
009776ba 177static void __init fixup_boot_paca(void)
25e13814
ME
178{
179 /* The boot cpu is started */
180 get_paca()->cpu_start = 1;
181 /* Allow percpu accesses to work until we setup percpu data */
182 get_paca()->data_offset = 0;
c2e480ba 183 /* Mark interrupts disabled in PACA */
4e26bc4a 184 irq_soft_mask_set(IRQS_DISABLED);
25e13814
ME
185}
186
009776ba 187static void __init configure_exceptions(void)
8f619b54 188{
633440f1 189 /*
d3cbff1b
BH
190 * Setup the trampolines from the lowmem exception vectors
191 * to the kdump kernel when not using a relocatable kernel.
633440f1 192 */
d3cbff1b
BH
193 setup_kdump_trampoline();
194
195 /* Under a PAPR hypervisor, we need hypercalls */
196 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
197 /* Enable AIL if possible */
198 pseries_enable_reloc_on_exc();
199
200 /*
201 * Tell the hypervisor that we want our exceptions to
202 * be taken in little endian mode.
203 *
204 * We don't call this for big endian as our calling convention
205 * makes us always enter in BE, and the call may fail under
206 * some circumstances with kdump.
207 */
208#ifdef __LITTLE_ENDIAN__
209 pseries_little_endian_exceptions();
210#endif
211 } else {
212 /* Set endian mode using OPAL */
213 if (firmware_has_feature(FW_FEATURE_OPAL))
214 opal_configure_cores();
215
c0a36013 216 /* AIL on native is done in cpu_ready_for_interrupts() */
8f619b54
BH
217 }
218}
219
d3cbff1b
BH
220static void cpu_ready_for_interrupts(void)
221{
c0a36013
BH
222 /*
223 * Enable AIL if supported, and we are in hypervisor mode. This
224 * is called once for every processor.
225 *
226 * If we are not in hypervisor mode the job is done once for
227 * the whole partition in configure_exceptions().
228 */
5511a45f
ME
229 if (cpu_has_feature(CPU_FTR_HVMODE) &&
230 cpu_has_feature(CPU_FTR_ARCH_207S)) {
c0a36013
BH
231 unsigned long lpcr = mfspr(SPRN_LPCR);
232 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
233 }
234
7ed23e1b 235 /*
dd9a8c5a
MN
236 * Set HFSCR:TM based on CPU features:
237 * In the special case of TM no suspend (P9N DD2.1), Linux is
238 * told TM is off via the dt-ftrs but told to (partially) use
239 * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM]
240 * will be off from dt-ftrs but we need to turn it on for the
241 * no suspend case.
7ed23e1b 242 */
dd9a8c5a
MN
243 if (cpu_has_feature(CPU_FTR_HVMODE)) {
244 if (cpu_has_feature(CPU_FTR_TM_COMP))
245 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM);
246 else
247 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
248 }
7ed23e1b 249
d3cbff1b
BH
250 /* Set IR and DR in PACA MSR */
251 get_paca()->kernel_msr = MSR_KERNEL;
252}
253
c0abd0c7
NP
254unsigned long spr_default_dscr = 0;
255
256void __init record_spr_defaults(void)
257{
258 if (early_cpu_has_feature(CPU_FTR_DSCR))
259 spr_default_dscr = mfspr(SPRN_DSCR);
260}
261
40ef8cbc
PM
262/*
263 * Early initialization entry point. This is called by head.S
264 * with MMU translation disabled. We rely on the "feature" of
265 * the CPU that ignores the top 2 bits of the address in real
266 * mode so we can access kernel globals normally provided we
267 * only toy with things in the RMO region. From here, we do
95f72d1e 268 * some early parsing of the device-tree to setup out MEMBLOCK
40ef8cbc
PM
269 * data structures, and allocate & initialize the hash table
270 * and segment tables so we can start running with translation
271 * enabled.
272 *
273 * It is this function which will call the probe() callback of
274 * the various platform types and copy the matching one to the
275 * global ppc_md structure. Your platform can eventually do
276 * some very early initializations from the probe() routine, but
277 * this is not recommended, be very careful as, for example, the
278 * device-tree is not accessible via normal means at this point.
279 */
280
281void __init early_setup(unsigned long dt_ptr)
282{
6a7e4064
GL
283 static __initdata struct paca_struct boot_paca;
284
24d96495
BH
285 /* -------- printk is _NOT_ safe to use here ! ------- */
286
5a61ef74
NP
287 /* Try new device tree based feature discovery ... */
288 if (!dt_cpu_ftrs_init(__va(dt_ptr)))
289 /* Otherwise use the old style CPU table */
290 identify_cpu(0, mfspr(SPRN_PVR));
42c4aaad 291
33dbcf72 292 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
1426d5a3
ME
293 initialise_paca(&boot_paca, 0);
294 setup_paca(&boot_paca);
25e13814 295 fixup_boot_paca();
33dbcf72 296
24d96495
BH
297 /* -------- printk is now safe to use ------- */
298
f2fd2513
BH
299 /* Enable early debugging if any specified (see udbg.h) */
300 udbg_early_init();
301
3b9176e9 302 udbg_printf(" -> %s(), dt_ptr: 0x%lx\n", __func__, dt_ptr);
40ef8cbc 303
40ef8cbc 304 /*
3c607ce2
LV
305 * Do early initialization using the flattened device
306 * tree, such as retrieving the physical memory map or
307 * calculating/retrieving the hash table size.
40ef8cbc
PM
308 */
309 early_init_devtree(__va(dt_ptr));
310
4df20460 311 /* Now we know the logical id of our boot cpu, setup the paca. */
4890aea6
NP
312 if (boot_cpuid != 0) {
313 /* Poison paca_ptrs[0] again if it's not the boot cpu */
314 memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0]));
315 }
d2e60075 316 setup_paca(paca_ptrs[boot_cpuid]);
25e13814 317 fixup_boot_paca();
4df20460 318
63c254a5 319 /*
d3cbff1b
BH
320 * Configure exception handlers. This include setting up trampolines
321 * if needed, setting exception endian mode, etc...
63c254a5 322 */
d3cbff1b 323 configure_exceptions();
0cc4746c 324
69795cab
CL
325 /*
326 * Configure Kernel Userspace Protection. This needs to happen before
327 * feature fixups for platforms that implement this using features.
328 */
329 setup_kup();
330
c4bd6cb8
BH
331 /* Apply all the dynamic patching */
332 apply_feature_fixups();
97f6e0cc 333 setup_feature_keys();
c4bd6cb8 334
9e8066f3
ME
335 /* Initialize the hash table or TLB handling */
336 early_init_mmu();
337
1696d0fb
NP
338 /*
339 * After firmware and early platform setup code has set things up,
340 * we note the SPR values for configurable control/performance
341 * registers, and use those as initial defaults.
342 */
343 record_spr_defaults();
344
a944a9c4
BH
345 /*
346 * At this point, we can let interrupts switch to virtual mode
347 * (the MMU has been setup), so adjust the MSR in the PACA to
8f619b54 348 * have IR and DR set and enable AIL if it exists
a944a9c4 349 */
8f619b54 350 cpu_ready_for_interrupts();
a944a9c4 351
d1039786
NR
352 /*
353 * We enable ftrace here, but since we only support DYNAMIC_FTRACE, it
354 * will only actually get enabled on the boot cpu much later once
355 * ftrace itself has been initialized.
356 */
357 this_cpu_enable_ftrace();
358
3b9176e9 359 udbg_printf(" <- %s()\n", __func__);
7191b615
BH
360
361#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
362 /*
3b9176e9 363 * This needs to be done *last* (after the above udbg_printf() even)
7191b615
BH
364 *
365 * Right after we return from this function, we turn on the MMU
366 * which means the real-mode access trick that btext does will
367 * no longer work, it needs to switch to using a real MMU
368 * mapping. This call will ensure that it does
369 */
370 btext_map();
371#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
40ef8cbc
PM
372}
373
799d6046
PM
374#ifdef CONFIG_SMP
375void early_setup_secondary(void)
376{
103b7827 377 /* Mark interrupts disabled in PACA */
4e26bc4a 378 irq_soft_mask_set(IRQS_DISABLED);
799d6046 379
757c74d2
BH
380 /* Initialize the hash table or TLB handling */
381 early_init_mmu_secondary();
a944a9c4 382
b28c9750
RC
383 /* Perform any KUP setup that is per-cpu */
384 setup_kup();
385
a944a9c4
BH
386 /*
387 * At this point, we can let interrupts switch to virtual mode
388 * (the MMU has been setup), so adjust the MSR in the PACA to
389 * have IR and DR set.
390 */
8f619b54 391 cpu_ready_for_interrupts();
799d6046
PM
392}
393
394#endif /* CONFIG_SMP */
40ef8cbc 395
8c1aef6a
NP
396void panic_smp_self_stop(void)
397{
398 hard_irq_disable();
399 spin_begin();
400 while (1)
401 spin_cpu_relax();
402}
403
da665885 404#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
567cf94d
SW
405static bool use_spinloop(void)
406{
339a3293
NP
407 if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
408 /*
409 * See comments in head_64.S -- not all platforms insert
410 * secondaries at __secondary_hold and wait at the spin
411 * loop.
412 */
413 if (firmware_has_feature(FW_FEATURE_OPAL))
414 return false;
567cf94d 415 return true;
339a3293 416 }
567cf94d
SW
417
418 /*
419 * When book3e boots from kexec, the ePAPR spin table does
420 * not get used.
421 */
422 return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
423}
424
b8f51021
ME
425void smp_release_cpus(void)
426{
758438a7 427 unsigned long *ptr;
9d07bc84 428 int i;
b8f51021 429
567cf94d
SW
430 if (!use_spinloop())
431 return;
432
b8f51021
ME
433 /* All secondary cpus are spinning on a common spinloop, release them
434 * all now so they can start to spin on their individual paca
435 * spinloops. For non SMP kernels, the secondary cpus never get out
436 * of the common spinloop.
1f6a93e4 437 */
b8f51021 438
758438a7
ME
439 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
440 - PHYSICAL_START);
2751b628 441 *ptr = ppc_function_entry(generic_secondary_smp_init);
9d07bc84
BH
442
443 /* And wait a bit for them to catch up */
444 for (i = 0; i < 100000; i++) {
445 mb();
446 HMT_low();
7ac87abb 447 if (spinning_secondaries == 0)
9d07bc84
BH
448 break;
449 udelay(1);
450 }
3b9176e9 451 pr_debug("spinning_secondaries = %d\n", spinning_secondaries);
b8f51021 452}
da665885 453#endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */
b8f51021 454
40ef8cbc 455/*
799d6046
PM
456 * Initialize some remaining members of the ppc64_caches and systemcfg
457 * structures
40ef8cbc
PM
458 * (at least until we get rid of them completely). This is mostly some
459 * cache informations about the CPU that will be used by cache flush
460 * routines and/or provided to userland
461 */
e2827fe5
BH
462
463static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
464 u32 bsize, u32 sets)
465{
466 info->size = size;
467 info->sets = sets;
468 info->line_size = lsize;
469 info->block_size = bsize;
470 info->log_block_size = __ilog2(bsize);
6ba422c7
AB
471 if (bsize)
472 info->blocks_per_page = PAGE_SIZE / bsize;
473 else
474 info->blocks_per_page = 0;
98a5f361
BH
475
476 if (sets == 0)
477 info->assoc = 0xffff;
478 else
479 info->assoc = size / (sets * lsize);
e2827fe5
BH
480}
481
482static bool __init parse_cache_info(struct device_node *np,
483 bool icache,
484 struct ppc_cache_info *info)
485{
486 static const char *ipropnames[] __initdata = {
487 "i-cache-size",
488 "i-cache-sets",
489 "i-cache-block-size",
490 "i-cache-line-size",
491 };
492 static const char *dpropnames[] __initdata = {
493 "d-cache-size",
494 "d-cache-sets",
495 "d-cache-block-size",
496 "d-cache-line-size",
497 };
498 const char **propnames = icache ? ipropnames : dpropnames;
499 const __be32 *sizep, *lsizep, *bsizep, *setsp;
500 u32 size, lsize, bsize, sets;
501 bool success = true;
502
503 size = 0;
504 sets = -1u;
505 lsize = bsize = cur_cpu_spec->dcache_bsize;
506 sizep = of_get_property(np, propnames[0], NULL);
507 if (sizep != NULL)
508 size = be32_to_cpu(*sizep);
509 setsp = of_get_property(np, propnames[1], NULL);
510 if (setsp != NULL)
511 sets = be32_to_cpu(*setsp);
512 bsizep = of_get_property(np, propnames[2], NULL);
513 lsizep = of_get_property(np, propnames[3], NULL);
514 if (bsizep == NULL)
515 bsizep = lsizep;
516 if (lsizep != NULL)
517 lsize = be32_to_cpu(*lsizep);
518 if (bsizep != NULL)
519 bsize = be32_to_cpu(*bsizep);
520 if (sizep == NULL || bsizep == NULL || lsizep == NULL)
521 success = false;
522
523 /*
524 * OF is weird .. it represents fully associative caches
525 * as "1 way" which doesn't make much sense and doesn't
526 * leave room for direct mapped. We'll assume that 0
527 * in OF means direct mapped for that reason.
528 */
529 if (sets == 1)
530 sets = 0;
531 else if (sets == 0)
532 sets = 1;
533
534 init_cache_info(info, size, lsize, bsize, sets);
535
536 return success;
537}
538
b1923caa 539void __init initialize_cache_info(void)
40ef8cbc 540{
608b4214
BH
541 struct device_node *cpu = NULL, *l2, *l3 = NULL;
542 u32 pvr;
40ef8cbc 543
608b4214
BH
544 /*
545 * All shipping POWER8 machines have a firmware bug that
546 * puts incorrect information in the device-tree. This will
547 * be (hopefully) fixed for future chips but for now hard
548 * code the values if we are running on one of these
549 */
550 pvr = PVR_VER(mfspr(SPRN_PVR));
551 if (pvr == PVR_POWER8 || pvr == PVR_POWER8E ||
552 pvr == PVR_POWER8NVL) {
553 /* size lsize blk sets */
554 init_cache_info(&ppc64_caches.l1i, 0x8000, 128, 128, 32);
555 init_cache_info(&ppc64_caches.l1d, 0x10000, 128, 128, 64);
556 init_cache_info(&ppc64_caches.l2, 0x80000, 128, 0, 512);
557 init_cache_info(&ppc64_caches.l3, 0x800000, 128, 0, 8192);
558 } else
559 cpu = of_find_node_by_type(NULL, "cpu");
40ef8cbc 560
e2827fe5
BH
561 /*
562 * We're assuming *all* of the CPUs have the same
563 * d-cache and i-cache sizes... -Peter
564 */
65e01f38
BH
565 if (cpu) {
566 if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
3b9176e9 567 pr_warn("Argh, can't find dcache properties !\n");
e2827fe5 568
65e01f38 569 if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
3b9176e9 570 pr_warn("Argh, can't find icache properties !\n");
65e01f38
BH
571
572 /*
573 * Try to find the L2 and L3 if any. Assume they are
574 * unified and use the D-side properties.
575 */
576 l2 = of_find_next_cache_node(cpu);
577 of_node_put(cpu);
578 if (l2) {
579 parse_cache_info(l2, false, &ppc64_caches.l2);
580 l3 = of_find_next_cache_node(l2);
581 of_node_put(l2);
582 }
583 if (l3) {
584 parse_cache_info(l3, false, &ppc64_caches.l3);
585 of_node_put(l3);
586 }
40ef8cbc
PM
587 }
588
9df549af 589 /* For use by binfmt_elf */
e2827fe5
BH
590 dcache_bsize = ppc64_caches.l1d.block_size;
591 icache_bsize = ppc64_caches.l1i.block_size;
9df549af 592
5a61ef74
NP
593 cur_cpu_spec->dcache_bsize = dcache_bsize;
594 cur_cpu_spec->icache_bsize = icache_bsize;
40ef8cbc
PM
595}
596
1af19331
NP
597/*
598 * This returns the limit below which memory accesses to the linear
599 * mapping are guarnateed not to cause an architectural exception (e.g.,
600 * TLB or SLB miss fault).
601 *
602 * This is used to allocate PACAs and various interrupt stacks that
603 * that are accessed early in interrupt handlers that must not cause
604 * re-entrant interrupts.
40bd587a 605 */
1af19331 606__init u64 ppc64_bolted_size(void)
095c7965 607{
40bd587a
BH
608#ifdef CONFIG_PPC_BOOK3E
609 /* Freescale BookE bolts the entire linear mapping */
1af19331
NP
610 /* XXX: BookE ppc64_rma_limit setup seems to disagree? */
611 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E))
40bd587a
BH
612 return linear_map_top;
613 /* Other BookE, we assume the first GB is bolted */
614 return 1ul << 30;
615#else
1af19331 616 /* BookS radix, does not take faults on linear mapping */
d5507190
NP
617 if (early_radix_enabled())
618 return ULONG_MAX;
619
1af19331
NP
620 /* BookS hash, the first segment is bolted */
621 if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT))
095c7965 622 return 1UL << SID_SHIFT_1T;
095c7965 623 return 1UL << SID_SHIFT;
40bd587a 624#endif
095c7965
AB
625}
626
f3865f9a
NP
627static void *__init alloc_stack(unsigned long limit, int cpu)
628{
c8e409a3 629 void *ptr;
f3865f9a 630
66f93c5a
NP
631 BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
632
c8e409a3
CL
633 ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_SIZE,
634 MEMBLOCK_LOW_LIMIT, limit,
635 early_cpu_to_node(cpu));
636 if (!ptr)
637 panic("cannot allocate stacks");
f3865f9a 638
c8e409a3 639 return ptr;
f3865f9a
NP
640}
641
b1923caa 642void __init irqstack_early_init(void)
40ef8cbc 643{
1af19331 644 u64 limit = ppc64_bolted_size();
40ef8cbc
PM
645 unsigned int i;
646
647 /*
8f4da26e 648 * Interrupt stacks must be in the first segment since we
d5507190
NP
649 * cannot afford to take SLB misses on them. They are not
650 * accessed in realmode.
40ef8cbc 651 */
0e551954 652 for_each_possible_cpu(i) {
f3865f9a
NP
653 softirq_ctx[i] = alloc_stack(limit, i);
654 hardirq_ctx[i] = alloc_stack(limit, i);
40ef8cbc
PM
655 }
656}
40ef8cbc 657
2d27cfd3 658#ifdef CONFIG_PPC_BOOK3E
b1923caa 659void __init exc_lvl_early_init(void)
2d27cfd3
BH
660{
661 unsigned int i;
662
663 for_each_possible_cpu(i) {
f3865f9a
NP
664 void *sp;
665
666 sp = alloc_stack(ULONG_MAX, i);
667 critirq_ctx[i] = sp;
668 paca_ptrs[i]->crit_kstack = sp + THREAD_SIZE;
160c7324 669
f3865f9a
NP
670 sp = alloc_stack(ULONG_MAX, i);
671 dbgirq_ctx[i] = sp;
672 paca_ptrs[i]->dbg_kstack = sp + THREAD_SIZE;
160c7324 673
f3865f9a
NP
674 sp = alloc_stack(ULONG_MAX, i);
675 mcheckirq_ctx[i] = sp;
676 paca_ptrs[i]->mc_kstack = sp + THREAD_SIZE;
2d27cfd3 677 }
d36b4c4f
KG
678
679 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
565c2f24 680 patch_exception(0x040, exc_debug_debug_book3e);
2d27cfd3 681}
2d27cfd3
BH
682#endif
683
40ef8cbc
PM
684/*
685 * Stack space used when we detect a bad kernel stack pointer, and
729b0f71
MS
686 * early in SMP boots before relocation is enabled. Exclusive emergency
687 * stack for machine checks.
40ef8cbc 688 */
b1923caa 689void __init emergency_stack_init(void)
40ef8cbc 690{
095c7965 691 u64 limit;
40ef8cbc
PM
692 unsigned int i;
693
694 /*
695 * Emergency stacks must be under 256MB, we cannot afford to take
696 * SLB misses on them. The ABI also requires them to be 128-byte
697 * aligned.
698 *
699 * Since we use these as temporary stacks during secondary CPU
d5507190
NP
700 * bringup, machine check, system reset, and HMI, we need to get
701 * at them in real mode. This means they must also be within the RMO
702 * region.
34f19ff1
NP
703 *
704 * The IRQ stacks allocated elsewhere in this file are zeroed and
705 * initialized in kernel/irq.c. These are initialized here in order
706 * to have emergency stacks available as early as possible.
40ef8cbc 707 */
1af19331 708 limit = min(ppc64_bolted_size(), ppc64_rma_size);
40ef8cbc 709
3243d874 710 for_each_possible_cpu(i) {
d608898a 711 paca_ptrs[i]->emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
729b0f71
MS
712
713#ifdef CONFIG_PPC_BOOK3S_64
b1ee8a3d 714 /* emergency stack for NMI exception handling. */
d608898a 715 paca_ptrs[i]->nmi_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
b1ee8a3d 716
729b0f71 717 /* emergency stack for machine check exception handling. */
d608898a 718 paca_ptrs[i]->mc_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
729b0f71 719#endif
3243d874 720 }
40ef8cbc
PM
721}
722
7a0268fa 723#ifdef CONFIG_SMP
c2a7e818
TH
724#define PCPU_DYN_SIZE ()
725
726static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
7a0268fa 727{
ccfa2a0f 728 return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
97ad1087 729 MEMBLOCK_ALLOC_ACCESSIBLE,
ccfa2a0f
MR
730 early_cpu_to_node(cpu));
731
c2a7e818 732}
7a0268fa 733
c2a7e818
TH
734static void __init pcpu_fc_free(void *ptr, size_t size)
735{
2013288f 736 memblock_free(__pa(ptr), size);
c2a7e818 737}
7a0268fa 738
c2a7e818
TH
739static int pcpu_cpu_distance(unsigned int from, unsigned int to)
740{
ba4a648f 741 if (early_cpu_to_node(from) == early_cpu_to_node(to))
c2a7e818
TH
742 return LOCAL_DISTANCE;
743 else
744 return REMOTE_DISTANCE;
745}
746
ae01f84b
AB
747unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
748EXPORT_SYMBOL(__per_cpu_offset);
749
c2a7e818
TH
750void __init setup_per_cpu_areas(void)
751{
752 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
753 size_t atom_size;
754 unsigned long delta;
755 unsigned int cpu;
756 int rc;
757
758 /*
759 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
760 * to group units. For larger mappings, use 1M atom which
761 * should be large enough to contain a number of units.
762 */
763 if (mmu_linear_psize == MMU_PAGE_4K)
764 atom_size = PAGE_SIZE;
765 else
766 atom_size = 1 << 20;
767
768 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
769 pcpu_fc_alloc, pcpu_fc_free);
770 if (rc < 0)
771 panic("cannot initialize percpu area (err=%d)", rc);
772
773 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
ae01f84b
AB
774 for_each_possible_cpu(cpu) {
775 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
d2e60075 776 paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu];
ae01f84b 777 }
7a0268fa
AB
778}
779#endif
4cb3cee0 780
a5d86257
AB
781#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
782unsigned long memory_block_size_bytes(void)
783{
784 if (ppc_md.memory_block_size)
785 return ppc_md.memory_block_size();
786
787 return MIN_MEMORY_BLOCK_SIZE;
788}
789#endif
4cb3cee0 790
ecd73cc5 791#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
4cb3cee0
BH
792struct ppc_pci_io ppc_pci_io;
793EXPORT_SYMBOL(ppc_pci_io);
ecd73cc5 794#endif
70412c55
NP
795
796#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
797u64 hw_nmi_get_sample_period(int watchdog_thresh)
798{
799 return ppc_proc_freq * watchdog_thresh;
800}
801#endif
802
803/*
804 * The perf based hardlockup detector breaks PMU event based branches, so
805 * disable it by default. Book3S has a soft-nmi hardlockup detector based
806 * on the decrementer interrupt, so it does not suffer from this problem.
807 *
808 * It is likely to get false positives in VM guests, so disable it there
809 * by default too.
810 */
811static int __init disable_hardlockup_detector(void)
812{
813#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
814 hardlockup_detector_disable();
815#else
816 if (firmware_has_feature(FW_FEATURE_LPAR))
817 hardlockup_detector_disable();
818#endif
819
820 return 0;
821}
822early_initcall(disable_hardlockup_detector);
aa8a5e00
ME
823
824#ifdef CONFIG_PPC_BOOK3S_64
825static enum l1d_flush_type enabled_flush_types;
826static void *l1d_flush_fallback_area;
bc9c9304 827static bool no_rfi_flush;
aa8a5e00
ME
828bool rfi_flush;
829
bc9c9304
ME
830static int __init handle_no_rfi_flush(char *p)
831{
832 pr_info("rfi-flush: disabled on command line.");
833 no_rfi_flush = true;
834 return 0;
835}
836early_param("no_rfi_flush", handle_no_rfi_flush);
837
838/*
839 * The RFI flush is not KPTI, but because users will see doco that says to use
840 * nopti we hijack that option here to also disable the RFI flush.
841 */
842static int __init handle_no_pti(char *p)
843{
844 pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
845 handle_no_rfi_flush(NULL);
846 return 0;
847}
848early_param("nopti", handle_no_pti);
849
aa8a5e00
ME
850static void do_nothing(void *unused)
851{
852 /*
853 * We don't need to do the flush explicitly, just enter+exit kernel is
854 * sufficient, the RFI exit handlers will do the right thing.
855 */
856}
857
858void rfi_flush_enable(bool enable)
859{
aa8a5e00
ME
860 if (enable) {
861 do_rfi_flush_fixups(enabled_flush_types);
862 on_each_cpu(do_nothing, NULL, 1);
863 } else
864 do_rfi_flush_fixups(L1D_FLUSH_NONE);
865
866 rfi_flush = enable;
867}
868
501a78cb 869static void __ref init_fallback_flush(void)
aa8a5e00
ME
870{
871 u64 l1d_size, limit;
872 int cpu;
873
abf110f3
ME
874 /* Only allocate the fallback flush area once (at boot time). */
875 if (l1d_flush_fallback_area)
876 return;
877
aa8a5e00 878 l1d_size = ppc64_caches.l1d.size;
9dfbf78e
MS
879
880 /*
881 * If there is no d-cache-size property in the device tree, l1d_size
882 * could be zero. That leads to the loop in the asm wrapping around to
883 * 2^64-1, and then walking off the end of the fallback area and
884 * eventually causing a page fault which is fatal. Just default to
885 * something vaguely sane.
886 */
887 if (!l1d_size)
888 l1d_size = (64 * 1024);
889
ebf0b6a8 890 limit = min(ppc64_bolted_size(), ppc64_rma_size);
aa8a5e00
ME
891
892 /*
893 * Align to L1d size, and size it at 2x L1d size, to catch possible
894 * hardware prefetch runoff. We don't have a recipe for load patterns to
895 * reliably avoid the prefetcher.
896 */
f806714f
MR
897 l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
898 l1d_size, MEMBLOCK_LOW_LIMIT,
899 limit, NUMA_NO_NODE);
8a7f97b9
MR
900 if (!l1d_flush_fallback_area)
901 panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
902 __func__, l1d_size * 2, l1d_size, &limit);
903
aa8a5e00
ME
904
905 for_each_possible_cpu(cpu) {
d2e60075
NP
906 struct paca_struct *paca = paca_ptrs[cpu];
907 paca->rfi_flush_fallback_area = l1d_flush_fallback_area;
908 paca->l1d_flush_size = l1d_size;
aa8a5e00
ME
909 }
910}
911
abf110f3 912void setup_rfi_flush(enum l1d_flush_type types, bool enable)
aa8a5e00
ME
913{
914 if (types & L1D_FLUSH_FALLBACK) {
0063d61c 915 pr_info("rfi-flush: fallback displacement flush available\n");
aa8a5e00
ME
916 init_fallback_flush();
917 }
918
919 if (types & L1D_FLUSH_ORI)
0063d61c 920 pr_info("rfi-flush: ori type flush available\n");
aa8a5e00
ME
921
922 if (types & L1D_FLUSH_MTTRIG)
0063d61c 923 pr_info("rfi-flush: mttrig type flush available\n");
aa8a5e00
ME
924
925 enabled_flush_types = types;
926
782e69ef 927 if (!no_rfi_flush && !cpu_mitigations_off())
bc9c9304 928 rfi_flush_enable(enable);
aa8a5e00 929}
fd6e440f 930
236003e6
ME
931#ifdef CONFIG_DEBUG_FS
932static int rfi_flush_set(void *data, u64 val)
933{
1e2a9fc7
ME
934 bool enable;
935
236003e6 936 if (val == 1)
1e2a9fc7 937 enable = true;
236003e6 938 else if (val == 0)
1e2a9fc7 939 enable = false;
236003e6
ME
940 else
941 return -EINVAL;
942
1e2a9fc7
ME
943 /* Only do anything if we're changing state */
944 if (enable != rfi_flush)
945 rfi_flush_enable(enable);
946
236003e6
ME
947 return 0;
948}
949
950static int rfi_flush_get(void *data, u64 *val)
951{
952 *val = rfi_flush ? 1 : 0;
953 return 0;
954}
955
956DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
957
958static __init int rfi_flush_debugfs_init(void)
959{
960 debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
961 return 0;
962}
963device_initcall(rfi_flush_debugfs_init);
964#endif
aa8a5e00 965#endif /* CONFIG_PPC_BOOK3S_64 */