mm: remove include/linux/bootmem.h
[linux-2.6-block.git] / arch / powerpc / kernel / setup_64.c
CommitLineData
40ef8cbc
PM
1/*
2 *
3 * Common boot and setup code.
4 *
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
4b16f8e2 13#include <linux/export.h>
40ef8cbc
PM
14#include <linux/string.h>
15#include <linux/sched.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/reboot.h>
19#include <linux/delay.h>
20#include <linux/initrd.h>
40ef8cbc
PM
21#include <linux/seq_file.h>
22#include <linux/ioport.h>
23#include <linux/console.h>
24#include <linux/utsname.h>
25#include <linux/tty.h>
26#include <linux/root_dev.h>
27#include <linux/notifier.h>
28#include <linux/cpu.h>
29#include <linux/unistd.h>
30#include <linux/serial.h>
31#include <linux/serial_8250.h>
57c8a661 32#include <linux/memblock.h>
12d04eef 33#include <linux/pci.h>
945feb17 34#include <linux/lockdep.h>
a5d86257 35#include <linux/memory.h>
c54b2bf1 36#include <linux/nmi.h>
a6146888 37
236003e6 38#include <asm/debugfs.h>
40ef8cbc 39#include <asm/io.h>
0cc4746c 40#include <asm/kdump.h>
40ef8cbc
PM
41#include <asm/prom.h>
42#include <asm/processor.h>
43#include <asm/pgtable.h>
40ef8cbc
PM
44#include <asm/smp.h>
45#include <asm/elf.h>
46#include <asm/machdep.h>
47#include <asm/paca.h>
40ef8cbc
PM
48#include <asm/time.h>
49#include <asm/cputable.h>
5a61ef74 50#include <asm/dt_cpu_ftrs.h>
40ef8cbc
PM
51#include <asm/sections.h>
52#include <asm/btext.h>
53#include <asm/nvram.h>
54#include <asm/setup.h>
40ef8cbc
PM
55#include <asm/rtas.h>
56#include <asm/iommu.h>
57#include <asm/serial.h>
58#include <asm/cache.h>
59#include <asm/page.h>
60#include <asm/mmu.h>
40ef8cbc 61#include <asm/firmware.h>
f78541dc 62#include <asm/xmon.h>
dcad47fc 63#include <asm/udbg.h>
593e537b 64#include <asm/kexec.h>
d36b4c4f 65#include <asm/code-patching.h>
5d31a96e 66#include <asm/livepatch.h>
d3cbff1b 67#include <asm/opal.h>
b1923caa 68#include <asm/cputhreads.h>
c2e480ba 69#include <asm/hw_irq.h>
2c86cd18 70#include <asm/feature-fixups.h>
40ef8cbc 71
1696d0fb
NP
72#include "setup.h"
73
40ef8cbc
PM
74#ifdef DEBUG
75#define DBG(fmt...) udbg_printf(fmt)
76#else
77#define DBG(fmt...)
78#endif
79
8246aca7 80int spinning_secondaries;
40ef8cbc
PM
81u64 ppc64_pft_size;
82
dabcafd3 83struct ppc64_caches ppc64_caches = {
e2827fe5
BH
84 .l1d = {
85 .block_size = 0x40,
86 .log_block_size = 6,
87 },
88 .l1i = {
89 .block_size = 0x40,
90 .log_block_size = 6
91 },
dabcafd3 92};
40ef8cbc
PM
93EXPORT_SYMBOL_GPL(ppc64_caches);
94
28efc35f 95#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
b1923caa 96void __init setup_tlb_core_data(void)
28efc35f
SW
97{
98 int cpu;
99
82d86de2
SW
100 BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
101
28efc35f
SW
102 for_each_possible_cpu(cpu) {
103 int first = cpu_first_thread_sibling(cpu);
104
d9e1831a
SW
105 /*
106 * If we boot via kdump on a non-primary thread,
107 * make sure we point at the thread that actually
108 * set up this TLB.
109 */
110 if (cpu_first_thread_sibling(boot_cpuid) == first)
111 first = boot_cpuid;
112
d2e60075 113 paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd;
28efc35f
SW
114
115 /*
116 * If we have threads, we need either tlbsrx.
117 * or e6500 tablewalk mode, or else TLB handlers
118 * will be racy and could produce duplicate entries.
0d2b5cdc 119 * Should we panic instead?
28efc35f 120 */
0d2b5cdc
ME
121 WARN_ONCE(smt_enabled_at_boot >= 2 &&
122 !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
123 book3e_htw_mode != PPC_HTW_E6500,
124 "%s: unsupported MMU configuration\n", __func__);
28efc35f
SW
125 }
126}
28efc35f
SW
127#endif
128
40ef8cbc
PM
129#ifdef CONFIG_SMP
130
954e6da5 131static char *smt_enabled_cmdline;
40ef8cbc
PM
132
133/* Look for ibm,smt-enabled OF option */
b1923caa 134void __init check_smt_enabled(void)
40ef8cbc
PM
135{
136 struct device_node *dn;
a7f67bdf 137 const char *smt_option;
40ef8cbc 138
954e6da5
NF
139 /* Default to enabling all threads */
140 smt_enabled_at_boot = threads_per_core;
40ef8cbc 141
954e6da5
NF
142 /* Allow the command line to overrule the OF option */
143 if (smt_enabled_cmdline) {
144 if (!strcmp(smt_enabled_cmdline, "on"))
145 smt_enabled_at_boot = threads_per_core;
146 else if (!strcmp(smt_enabled_cmdline, "off"))
147 smt_enabled_at_boot = 0;
148 else {
1618bd53 149 int smt;
954e6da5
NF
150 int rc;
151
1618bd53 152 rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
954e6da5
NF
153 if (!rc)
154 smt_enabled_at_boot =
1618bd53 155 min(threads_per_core, smt);
954e6da5
NF
156 }
157 } else {
158 dn = of_find_node_by_path("/options");
159 if (dn) {
160 smt_option = of_get_property(dn, "ibm,smt-enabled",
161 NULL);
162
163 if (smt_option) {
164 if (!strcmp(smt_option, "on"))
165 smt_enabled_at_boot = threads_per_core;
166 else if (!strcmp(smt_option, "off"))
167 smt_enabled_at_boot = 0;
168 }
169
170 of_node_put(dn);
171 }
172 }
40ef8cbc
PM
173}
174
175/* Look for smt-enabled= cmdline option */
176static int __init early_smt_enabled(char *p)
177{
954e6da5 178 smt_enabled_cmdline = p;
40ef8cbc
PM
179 return 0;
180}
181early_param("smt-enabled", early_smt_enabled);
182
40ef8cbc
PM
183#endif /* CONFIG_SMP */
184
25e13814 185/** Fix up paca fields required for the boot cpu */
009776ba 186static void __init fixup_boot_paca(void)
25e13814
ME
187{
188 /* The boot cpu is started */
189 get_paca()->cpu_start = 1;
190 /* Allow percpu accesses to work until we setup percpu data */
191 get_paca()->data_offset = 0;
c2e480ba 192 /* Mark interrupts disabled in PACA */
4e26bc4a 193 irq_soft_mask_set(IRQS_DISABLED);
25e13814
ME
194}
195
009776ba 196static void __init configure_exceptions(void)
8f619b54 197{
633440f1 198 /*
d3cbff1b
BH
199 * Setup the trampolines from the lowmem exception vectors
200 * to the kdump kernel when not using a relocatable kernel.
633440f1 201 */
d3cbff1b
BH
202 setup_kdump_trampoline();
203
204 /* Under a PAPR hypervisor, we need hypercalls */
205 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
206 /* Enable AIL if possible */
207 pseries_enable_reloc_on_exc();
208
209 /*
210 * Tell the hypervisor that we want our exceptions to
211 * be taken in little endian mode.
212 *
213 * We don't call this for big endian as our calling convention
214 * makes us always enter in BE, and the call may fail under
215 * some circumstances with kdump.
216 */
217#ifdef __LITTLE_ENDIAN__
218 pseries_little_endian_exceptions();
219#endif
220 } else {
221 /* Set endian mode using OPAL */
222 if (firmware_has_feature(FW_FEATURE_OPAL))
223 opal_configure_cores();
224
c0a36013 225 /* AIL on native is done in cpu_ready_for_interrupts() */
8f619b54
BH
226 }
227}
228
d3cbff1b
BH
229static void cpu_ready_for_interrupts(void)
230{
c0a36013
BH
231 /*
232 * Enable AIL if supported, and we are in hypervisor mode. This
233 * is called once for every processor.
234 *
235 * If we are not in hypervisor mode the job is done once for
236 * the whole partition in configure_exceptions().
237 */
5511a45f
ME
238 if (cpu_has_feature(CPU_FTR_HVMODE) &&
239 cpu_has_feature(CPU_FTR_ARCH_207S)) {
c0a36013
BH
240 unsigned long lpcr = mfspr(SPRN_LPCR);
241 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
242 }
243
7ed23e1b 244 /*
dd9a8c5a
MN
245 * Set HFSCR:TM based on CPU features:
246 * In the special case of TM no suspend (P9N DD2.1), Linux is
247 * told TM is off via the dt-ftrs but told to (partially) use
248 * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM]
249 * will be off from dt-ftrs but we need to turn it on for the
250 * no suspend case.
7ed23e1b 251 */
dd9a8c5a
MN
252 if (cpu_has_feature(CPU_FTR_HVMODE)) {
253 if (cpu_has_feature(CPU_FTR_TM_COMP))
254 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM);
255 else
256 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
257 }
7ed23e1b 258
d3cbff1b
BH
259 /* Set IR and DR in PACA MSR */
260 get_paca()->kernel_msr = MSR_KERNEL;
261}
262
c0abd0c7
NP
263unsigned long spr_default_dscr = 0;
264
265void __init record_spr_defaults(void)
266{
267 if (early_cpu_has_feature(CPU_FTR_DSCR))
268 spr_default_dscr = mfspr(SPRN_DSCR);
269}
270
40ef8cbc
PM
271/*
272 * Early initialization entry point. This is called by head.S
273 * with MMU translation disabled. We rely on the "feature" of
274 * the CPU that ignores the top 2 bits of the address in real
275 * mode so we can access kernel globals normally provided we
276 * only toy with things in the RMO region. From here, we do
95f72d1e 277 * some early parsing of the device-tree to setup out MEMBLOCK
40ef8cbc
PM
278 * data structures, and allocate & initialize the hash table
279 * and segment tables so we can start running with translation
280 * enabled.
281 *
282 * It is this function which will call the probe() callback of
283 * the various platform types and copy the matching one to the
284 * global ppc_md structure. Your platform can eventually do
285 * some very early initializations from the probe() routine, but
286 * this is not recommended, be very careful as, for example, the
287 * device-tree is not accessible via normal means at this point.
288 */
289
290void __init early_setup(unsigned long dt_ptr)
291{
6a7e4064
GL
292 static __initdata struct paca_struct boot_paca;
293
24d96495
BH
294 /* -------- printk is _NOT_ safe to use here ! ------- */
295
5a61ef74
NP
296 /* Try new device tree based feature discovery ... */
297 if (!dt_cpu_ftrs_init(__va(dt_ptr)))
298 /* Otherwise use the old style CPU table */
299 identify_cpu(0, mfspr(SPRN_PVR));
42c4aaad 300
33dbcf72 301 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
1426d5a3
ME
302 initialise_paca(&boot_paca, 0);
303 setup_paca(&boot_paca);
25e13814 304 fixup_boot_paca();
33dbcf72 305
24d96495
BH
306 /* -------- printk is now safe to use ------- */
307
f2fd2513
BH
308 /* Enable early debugging if any specified (see udbg.h) */
309 udbg_early_init();
310
e8222502 311 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
40ef8cbc 312
40ef8cbc 313 /*
3c607ce2
LV
314 * Do early initialization using the flattened device
315 * tree, such as retrieving the physical memory map or
316 * calculating/retrieving the hash table size.
40ef8cbc
PM
317 */
318 early_init_devtree(__va(dt_ptr));
319
4df20460 320 /* Now we know the logical id of our boot cpu, setup the paca. */
4890aea6
NP
321 if (boot_cpuid != 0) {
322 /* Poison paca_ptrs[0] again if it's not the boot cpu */
323 memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0]));
324 }
d2e60075 325 setup_paca(paca_ptrs[boot_cpuid]);
25e13814 326 fixup_boot_paca();
4df20460 327
63c254a5 328 /*
d3cbff1b
BH
329 * Configure exception handlers. This include setting up trampolines
330 * if needed, setting exception endian mode, etc...
63c254a5 331 */
d3cbff1b 332 configure_exceptions();
0cc4746c 333
c4bd6cb8
BH
334 /* Apply all the dynamic patching */
335 apply_feature_fixups();
97f6e0cc 336 setup_feature_keys();
c4bd6cb8 337
9e8066f3
ME
338 /* Initialize the hash table or TLB handling */
339 early_init_mmu();
340
1696d0fb
NP
341 /*
342 * After firmware and early platform setup code has set things up,
343 * we note the SPR values for configurable control/performance
344 * registers, and use those as initial defaults.
345 */
346 record_spr_defaults();
347
a944a9c4
BH
348 /*
349 * At this point, we can let interrupts switch to virtual mode
350 * (the MMU has been setup), so adjust the MSR in the PACA to
8f619b54 351 * have IR and DR set and enable AIL if it exists
a944a9c4 352 */
8f619b54 353 cpu_ready_for_interrupts();
a944a9c4 354
d1039786
NR
355 /*
356 * We enable ftrace here, but since we only support DYNAMIC_FTRACE, it
357 * will only actually get enabled on the boot cpu much later once
358 * ftrace itself has been initialized.
359 */
360 this_cpu_enable_ftrace();
361
40ef8cbc 362 DBG(" <- early_setup()\n");
7191b615
BH
363
364#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
365 /*
366 * This needs to be done *last* (after the above DBG() even)
367 *
368 * Right after we return from this function, we turn on the MMU
369 * which means the real-mode access trick that btext does will
370 * no longer work, it needs to switch to using a real MMU
371 * mapping. This call will ensure that it does
372 */
373 btext_map();
374#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
40ef8cbc
PM
375}
376
799d6046
PM
377#ifdef CONFIG_SMP
378void early_setup_secondary(void)
379{
103b7827 380 /* Mark interrupts disabled in PACA */
4e26bc4a 381 irq_soft_mask_set(IRQS_DISABLED);
799d6046 382
757c74d2
BH
383 /* Initialize the hash table or TLB handling */
384 early_init_mmu_secondary();
a944a9c4
BH
385
386 /*
387 * At this point, we can let interrupts switch to virtual mode
388 * (the MMU has been setup), so adjust the MSR in the PACA to
389 * have IR and DR set.
390 */
8f619b54 391 cpu_ready_for_interrupts();
799d6046
PM
392}
393
394#endif /* CONFIG_SMP */
40ef8cbc 395
8c1aef6a
NP
396void panic_smp_self_stop(void)
397{
398 hard_irq_disable();
399 spin_begin();
400 while (1)
401 spin_cpu_relax();
402}
403
da665885 404#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
567cf94d
SW
405static bool use_spinloop(void)
406{
339a3293
NP
407 if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
408 /*
409 * See comments in head_64.S -- not all platforms insert
410 * secondaries at __secondary_hold and wait at the spin
411 * loop.
412 */
413 if (firmware_has_feature(FW_FEATURE_OPAL))
414 return false;
567cf94d 415 return true;
339a3293 416 }
567cf94d
SW
417
418 /*
419 * When book3e boots from kexec, the ePAPR spin table does
420 * not get used.
421 */
422 return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
423}
424
b8f51021
ME
425void smp_release_cpus(void)
426{
758438a7 427 unsigned long *ptr;
9d07bc84 428 int i;
b8f51021 429
567cf94d
SW
430 if (!use_spinloop())
431 return;
432
b8f51021
ME
433 DBG(" -> smp_release_cpus()\n");
434
435 /* All secondary cpus are spinning on a common spinloop, release them
436 * all now so they can start to spin on their individual paca
437 * spinloops. For non SMP kernels, the secondary cpus never get out
438 * of the common spinloop.
1f6a93e4 439 */
b8f51021 440
758438a7
ME
441 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
442 - PHYSICAL_START);
2751b628 443 *ptr = ppc_function_entry(generic_secondary_smp_init);
9d07bc84
BH
444
445 /* And wait a bit for them to catch up */
446 for (i = 0; i < 100000; i++) {
447 mb();
448 HMT_low();
7ac87abb 449 if (spinning_secondaries == 0)
9d07bc84
BH
450 break;
451 udelay(1);
452 }
7ac87abb 453 DBG("spinning_secondaries = %d\n", spinning_secondaries);
b8f51021
ME
454
455 DBG(" <- smp_release_cpus()\n");
456}
da665885 457#endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */
b8f51021 458
40ef8cbc 459/*
799d6046
PM
460 * Initialize some remaining members of the ppc64_caches and systemcfg
461 * structures
40ef8cbc
PM
462 * (at least until we get rid of them completely). This is mostly some
463 * cache informations about the CPU that will be used by cache flush
464 * routines and/or provided to userland
465 */
e2827fe5
BH
466
467static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
468 u32 bsize, u32 sets)
469{
470 info->size = size;
471 info->sets = sets;
472 info->line_size = lsize;
473 info->block_size = bsize;
474 info->log_block_size = __ilog2(bsize);
6ba422c7
AB
475 if (bsize)
476 info->blocks_per_page = PAGE_SIZE / bsize;
477 else
478 info->blocks_per_page = 0;
98a5f361
BH
479
480 if (sets == 0)
481 info->assoc = 0xffff;
482 else
483 info->assoc = size / (sets * lsize);
e2827fe5
BH
484}
485
486static bool __init parse_cache_info(struct device_node *np,
487 bool icache,
488 struct ppc_cache_info *info)
489{
490 static const char *ipropnames[] __initdata = {
491 "i-cache-size",
492 "i-cache-sets",
493 "i-cache-block-size",
494 "i-cache-line-size",
495 };
496 static const char *dpropnames[] __initdata = {
497 "d-cache-size",
498 "d-cache-sets",
499 "d-cache-block-size",
500 "d-cache-line-size",
501 };
502 const char **propnames = icache ? ipropnames : dpropnames;
503 const __be32 *sizep, *lsizep, *bsizep, *setsp;
504 u32 size, lsize, bsize, sets;
505 bool success = true;
506
507 size = 0;
508 sets = -1u;
509 lsize = bsize = cur_cpu_spec->dcache_bsize;
510 sizep = of_get_property(np, propnames[0], NULL);
511 if (sizep != NULL)
512 size = be32_to_cpu(*sizep);
513 setsp = of_get_property(np, propnames[1], NULL);
514 if (setsp != NULL)
515 sets = be32_to_cpu(*setsp);
516 bsizep = of_get_property(np, propnames[2], NULL);
517 lsizep = of_get_property(np, propnames[3], NULL);
518 if (bsizep == NULL)
519 bsizep = lsizep;
520 if (lsizep != NULL)
521 lsize = be32_to_cpu(*lsizep);
522 if (bsizep != NULL)
523 bsize = be32_to_cpu(*bsizep);
524 if (sizep == NULL || bsizep == NULL || lsizep == NULL)
525 success = false;
526
527 /*
528 * OF is weird .. it represents fully associative caches
529 * as "1 way" which doesn't make much sense and doesn't
530 * leave room for direct mapped. We'll assume that 0
531 * in OF means direct mapped for that reason.
532 */
533 if (sets == 1)
534 sets = 0;
535 else if (sets == 0)
536 sets = 1;
537
538 init_cache_info(info, size, lsize, bsize, sets);
539
540 return success;
541}
542
b1923caa 543void __init initialize_cache_info(void)
40ef8cbc 544{
608b4214
BH
545 struct device_node *cpu = NULL, *l2, *l3 = NULL;
546 u32 pvr;
40ef8cbc
PM
547
548 DBG(" -> initialize_cache_info()\n");
549
608b4214
BH
550 /*
551 * All shipping POWER8 machines have a firmware bug that
552 * puts incorrect information in the device-tree. This will
553 * be (hopefully) fixed for future chips but for now hard
554 * code the values if we are running on one of these
555 */
556 pvr = PVR_VER(mfspr(SPRN_PVR));
557 if (pvr == PVR_POWER8 || pvr == PVR_POWER8E ||
558 pvr == PVR_POWER8NVL) {
559 /* size lsize blk sets */
560 init_cache_info(&ppc64_caches.l1i, 0x8000, 128, 128, 32);
561 init_cache_info(&ppc64_caches.l1d, 0x10000, 128, 128, 64);
562 init_cache_info(&ppc64_caches.l2, 0x80000, 128, 0, 512);
563 init_cache_info(&ppc64_caches.l3, 0x800000, 128, 0, 8192);
564 } else
565 cpu = of_find_node_by_type(NULL, "cpu");
40ef8cbc 566
e2827fe5
BH
567 /*
568 * We're assuming *all* of the CPUs have the same
569 * d-cache and i-cache sizes... -Peter
570 */
65e01f38
BH
571 if (cpu) {
572 if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
e2827fe5
BH
573 DBG("Argh, can't find dcache properties !\n");
574
65e01f38 575 if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
e2827fe5 576 DBG("Argh, can't find icache properties !\n");
65e01f38
BH
577
578 /*
579 * Try to find the L2 and L3 if any. Assume they are
580 * unified and use the D-side properties.
581 */
582 l2 = of_find_next_cache_node(cpu);
583 of_node_put(cpu);
584 if (l2) {
585 parse_cache_info(l2, false, &ppc64_caches.l2);
586 l3 = of_find_next_cache_node(l2);
587 of_node_put(l2);
588 }
589 if (l3) {
590 parse_cache_info(l3, false, &ppc64_caches.l3);
591 of_node_put(l3);
592 }
40ef8cbc
PM
593 }
594
9df549af 595 /* For use by binfmt_elf */
e2827fe5
BH
596 dcache_bsize = ppc64_caches.l1d.block_size;
597 icache_bsize = ppc64_caches.l1i.block_size;
9df549af 598
5a61ef74
NP
599 cur_cpu_spec->dcache_bsize = dcache_bsize;
600 cur_cpu_spec->icache_bsize = icache_bsize;
601
40ef8cbc
PM
602 DBG(" <- initialize_cache_info()\n");
603}
604
1af19331
NP
605/*
606 * This returns the limit below which memory accesses to the linear
607 * mapping are guarnateed not to cause an architectural exception (e.g.,
608 * TLB or SLB miss fault).
609 *
610 * This is used to allocate PACAs and various interrupt stacks that
611 * that are accessed early in interrupt handlers that must not cause
612 * re-entrant interrupts.
40bd587a 613 */
1af19331 614__init u64 ppc64_bolted_size(void)
095c7965 615{
40bd587a
BH
616#ifdef CONFIG_PPC_BOOK3E
617 /* Freescale BookE bolts the entire linear mapping */
1af19331
NP
618 /* XXX: BookE ppc64_rma_limit setup seems to disagree? */
619 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E))
40bd587a
BH
620 return linear_map_top;
621 /* Other BookE, we assume the first GB is bolted */
622 return 1ul << 30;
623#else
1af19331 624 /* BookS radix, does not take faults on linear mapping */
d5507190
NP
625 if (early_radix_enabled())
626 return ULONG_MAX;
627
1af19331
NP
628 /* BookS hash, the first segment is bolted */
629 if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT))
095c7965 630 return 1UL << SID_SHIFT_1T;
095c7965 631 return 1UL << SID_SHIFT;
40bd587a 632#endif
095c7965
AB
633}
634
f3865f9a
NP
635static void *__init alloc_stack(unsigned long limit, int cpu)
636{
637 unsigned long pa;
638
639 pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit,
640 early_cpu_to_node(cpu), MEMBLOCK_NONE);
641 if (!pa) {
642 pa = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
643 if (!pa)
644 panic("cannot allocate stacks");
645 }
646
647 return __va(pa);
648}
649
b1923caa 650void __init irqstack_early_init(void)
40ef8cbc 651{
1af19331 652 u64 limit = ppc64_bolted_size();
40ef8cbc
PM
653 unsigned int i;
654
655 /*
8f4da26e 656 * Interrupt stacks must be in the first segment since we
d5507190
NP
657 * cannot afford to take SLB misses on them. They are not
658 * accessed in realmode.
40ef8cbc 659 */
0e551954 660 for_each_possible_cpu(i) {
f3865f9a
NP
661 softirq_ctx[i] = alloc_stack(limit, i);
662 hardirq_ctx[i] = alloc_stack(limit, i);
40ef8cbc
PM
663 }
664}
40ef8cbc 665
2d27cfd3 666#ifdef CONFIG_PPC_BOOK3E
b1923caa 667void __init exc_lvl_early_init(void)
2d27cfd3
BH
668{
669 unsigned int i;
670
671 for_each_possible_cpu(i) {
f3865f9a
NP
672 void *sp;
673
674 sp = alloc_stack(ULONG_MAX, i);
675 critirq_ctx[i] = sp;
676 paca_ptrs[i]->crit_kstack = sp + THREAD_SIZE;
160c7324 677
f3865f9a
NP
678 sp = alloc_stack(ULONG_MAX, i);
679 dbgirq_ctx[i] = sp;
680 paca_ptrs[i]->dbg_kstack = sp + THREAD_SIZE;
160c7324 681
f3865f9a
NP
682 sp = alloc_stack(ULONG_MAX, i);
683 mcheckirq_ctx[i] = sp;
684 paca_ptrs[i]->mc_kstack = sp + THREAD_SIZE;
2d27cfd3 685 }
d36b4c4f
KG
686
687 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
565c2f24 688 patch_exception(0x040, exc_debug_debug_book3e);
2d27cfd3 689}
2d27cfd3
BH
690#endif
691
34f19ff1
NP
692/*
693 * Emergency stacks are used for a range of things, from asynchronous
694 * NMIs (system reset, machine check) to synchronous, process context.
695 * We set preempt_count to zero, even though that isn't necessarily correct. To
696 * get the right value we'd need to copy it from the previous thread_info, but
697 * doing that might fault causing more problems.
698 * TODO: what to do with accounting?
699 */
700static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
701{
702 ti->task = NULL;
703 ti->cpu = cpu;
704 ti->preempt_count = 0;
705 ti->local_flags = 0;
706 ti->flags = 0;
707 klp_init_thread_info(ti);
708}
709
40ef8cbc
PM
710/*
711 * Stack space used when we detect a bad kernel stack pointer, and
729b0f71
MS
712 * early in SMP boots before relocation is enabled. Exclusive emergency
713 * stack for machine checks.
40ef8cbc 714 */
b1923caa 715void __init emergency_stack_init(void)
40ef8cbc 716{
095c7965 717 u64 limit;
40ef8cbc
PM
718 unsigned int i;
719
720 /*
721 * Emergency stacks must be under 256MB, we cannot afford to take
722 * SLB misses on them. The ABI also requires them to be 128-byte
723 * aligned.
724 *
725 * Since we use these as temporary stacks during secondary CPU
d5507190
NP
726 * bringup, machine check, system reset, and HMI, we need to get
727 * at them in real mode. This means they must also be within the RMO
728 * region.
34f19ff1
NP
729 *
730 * The IRQ stacks allocated elsewhere in this file are zeroed and
731 * initialized in kernel/irq.c. These are initialized here in order
732 * to have emergency stacks available as early as possible.
40ef8cbc 733 */
1af19331 734 limit = min(ppc64_bolted_size(), ppc64_rma_size);
40ef8cbc 735
3243d874 736 for_each_possible_cpu(i) {
5d31a96e 737 struct thread_info *ti;
f3865f9a
NP
738
739 ti = alloc_stack(limit, i);
34f19ff1
NP
740 memset(ti, 0, THREAD_SIZE);
741 emerg_stack_init_thread_info(ti, i);
d2e60075 742 paca_ptrs[i]->emergency_sp = (void *)ti + THREAD_SIZE;
729b0f71
MS
743
744#ifdef CONFIG_PPC_BOOK3S_64
b1ee8a3d 745 /* emergency stack for NMI exception handling. */
f3865f9a 746 ti = alloc_stack(limit, i);
34f19ff1
NP
747 memset(ti, 0, THREAD_SIZE);
748 emerg_stack_init_thread_info(ti, i);
d2e60075 749 paca_ptrs[i]->nmi_emergency_sp = (void *)ti + THREAD_SIZE;
b1ee8a3d 750
729b0f71 751 /* emergency stack for machine check exception handling. */
f3865f9a 752 ti = alloc_stack(limit, i);
34f19ff1
NP
753 memset(ti, 0, THREAD_SIZE);
754 emerg_stack_init_thread_info(ti, i);
d2e60075 755 paca_ptrs[i]->mc_emergency_sp = (void *)ti + THREAD_SIZE;
729b0f71 756#endif
3243d874 757 }
40ef8cbc
PM
758}
759
7a0268fa 760#ifdef CONFIG_SMP
c2a7e818
TH
761#define PCPU_DYN_SIZE ()
762
763static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
7a0268fa 764{
ccfa2a0f 765 return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
97ad1087 766 MEMBLOCK_ALLOC_ACCESSIBLE,
ccfa2a0f
MR
767 early_cpu_to_node(cpu));
768
c2a7e818 769}
7a0268fa 770
c2a7e818
TH
771static void __init pcpu_fc_free(void *ptr, size_t size)
772{
2013288f 773 memblock_free(__pa(ptr), size);
c2a7e818 774}
7a0268fa 775
c2a7e818
TH
776static int pcpu_cpu_distance(unsigned int from, unsigned int to)
777{
ba4a648f 778 if (early_cpu_to_node(from) == early_cpu_to_node(to))
c2a7e818
TH
779 return LOCAL_DISTANCE;
780 else
781 return REMOTE_DISTANCE;
782}
783
ae01f84b
AB
784unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
785EXPORT_SYMBOL(__per_cpu_offset);
786
c2a7e818
TH
787void __init setup_per_cpu_areas(void)
788{
789 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
790 size_t atom_size;
791 unsigned long delta;
792 unsigned int cpu;
793 int rc;
794
795 /*
796 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
797 * to group units. For larger mappings, use 1M atom which
798 * should be large enough to contain a number of units.
799 */
800 if (mmu_linear_psize == MMU_PAGE_4K)
801 atom_size = PAGE_SIZE;
802 else
803 atom_size = 1 << 20;
804
805 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
806 pcpu_fc_alloc, pcpu_fc_free);
807 if (rc < 0)
808 panic("cannot initialize percpu area (err=%d)", rc);
809
810 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
ae01f84b
AB
811 for_each_possible_cpu(cpu) {
812 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
d2e60075 813 paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu];
ae01f84b 814 }
7a0268fa
AB
815}
816#endif
4cb3cee0 817
a5d86257
AB
818#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
819unsigned long memory_block_size_bytes(void)
820{
821 if (ppc_md.memory_block_size)
822 return ppc_md.memory_block_size();
823
824 return MIN_MEMORY_BLOCK_SIZE;
825}
826#endif
4cb3cee0 827
ecd73cc5 828#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
4cb3cee0
BH
829struct ppc_pci_io ppc_pci_io;
830EXPORT_SYMBOL(ppc_pci_io);
ecd73cc5 831#endif
70412c55
NP
832
833#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
834u64 hw_nmi_get_sample_period(int watchdog_thresh)
835{
836 return ppc_proc_freq * watchdog_thresh;
837}
838#endif
839
840/*
841 * The perf based hardlockup detector breaks PMU event based branches, so
842 * disable it by default. Book3S has a soft-nmi hardlockup detector based
843 * on the decrementer interrupt, so it does not suffer from this problem.
844 *
845 * It is likely to get false positives in VM guests, so disable it there
846 * by default too.
847 */
848static int __init disable_hardlockup_detector(void)
849{
850#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
851 hardlockup_detector_disable();
852#else
853 if (firmware_has_feature(FW_FEATURE_LPAR))
854 hardlockup_detector_disable();
855#endif
856
857 return 0;
858}
859early_initcall(disable_hardlockup_detector);
aa8a5e00
ME
860
861#ifdef CONFIG_PPC_BOOK3S_64
862static enum l1d_flush_type enabled_flush_types;
863static void *l1d_flush_fallback_area;
bc9c9304 864static bool no_rfi_flush;
aa8a5e00
ME
865bool rfi_flush;
866
bc9c9304
ME
867static int __init handle_no_rfi_flush(char *p)
868{
869 pr_info("rfi-flush: disabled on command line.");
870 no_rfi_flush = true;
871 return 0;
872}
873early_param("no_rfi_flush", handle_no_rfi_flush);
874
875/*
876 * The RFI flush is not KPTI, but because users will see doco that says to use
877 * nopti we hijack that option here to also disable the RFI flush.
878 */
879static int __init handle_no_pti(char *p)
880{
881 pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
882 handle_no_rfi_flush(NULL);
883 return 0;
884}
885early_param("nopti", handle_no_pti);
886
aa8a5e00
ME
887static void do_nothing(void *unused)
888{
889 /*
890 * We don't need to do the flush explicitly, just enter+exit kernel is
891 * sufficient, the RFI exit handlers will do the right thing.
892 */
893}
894
895void rfi_flush_enable(bool enable)
896{
aa8a5e00
ME
897 if (enable) {
898 do_rfi_flush_fixups(enabled_flush_types);
899 on_each_cpu(do_nothing, NULL, 1);
900 } else
901 do_rfi_flush_fixups(L1D_FLUSH_NONE);
902
903 rfi_flush = enable;
904}
905
501a78cb 906static void __ref init_fallback_flush(void)
aa8a5e00
ME
907{
908 u64 l1d_size, limit;
909 int cpu;
910
abf110f3
ME
911 /* Only allocate the fallback flush area once (at boot time). */
912 if (l1d_flush_fallback_area)
913 return;
914
aa8a5e00 915 l1d_size = ppc64_caches.l1d.size;
9dfbf78e
MS
916
917 /*
918 * If there is no d-cache-size property in the device tree, l1d_size
919 * could be zero. That leads to the loop in the asm wrapping around to
920 * 2^64-1, and then walking off the end of the fallback area and
921 * eventually causing a page fault which is fatal. Just default to
922 * something vaguely sane.
923 */
924 if (!l1d_size)
925 l1d_size = (64 * 1024);
926
ebf0b6a8 927 limit = min(ppc64_bolted_size(), ppc64_rma_size);
aa8a5e00
ME
928
929 /*
930 * Align to L1d size, and size it at 2x L1d size, to catch possible
931 * hardware prefetch runoff. We don't have a recipe for load patterns to
932 * reliably avoid the prefetcher.
933 */
934 l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
935 memset(l1d_flush_fallback_area, 0, l1d_size * 2);
936
937 for_each_possible_cpu(cpu) {
d2e60075
NP
938 struct paca_struct *paca = paca_ptrs[cpu];
939 paca->rfi_flush_fallback_area = l1d_flush_fallback_area;
940 paca->l1d_flush_size = l1d_size;
aa8a5e00
ME
941 }
942}
943
abf110f3 944void setup_rfi_flush(enum l1d_flush_type types, bool enable)
aa8a5e00
ME
945{
946 if (types & L1D_FLUSH_FALLBACK) {
0063d61c 947 pr_info("rfi-flush: fallback displacement flush available\n");
aa8a5e00
ME
948 init_fallback_flush();
949 }
950
951 if (types & L1D_FLUSH_ORI)
0063d61c 952 pr_info("rfi-flush: ori type flush available\n");
aa8a5e00
ME
953
954 if (types & L1D_FLUSH_MTTRIG)
0063d61c 955 pr_info("rfi-flush: mttrig type flush available\n");
aa8a5e00
ME
956
957 enabled_flush_types = types;
958
bc9c9304
ME
959 if (!no_rfi_flush)
960 rfi_flush_enable(enable);
aa8a5e00 961}
fd6e440f 962
236003e6
ME
963#ifdef CONFIG_DEBUG_FS
964static int rfi_flush_set(void *data, u64 val)
965{
1e2a9fc7
ME
966 bool enable;
967
236003e6 968 if (val == 1)
1e2a9fc7 969 enable = true;
236003e6 970 else if (val == 0)
1e2a9fc7 971 enable = false;
236003e6
ME
972 else
973 return -EINVAL;
974
1e2a9fc7
ME
975 /* Only do anything if we're changing state */
976 if (enable != rfi_flush)
977 rfi_flush_enable(enable);
978
236003e6
ME
979 return 0;
980}
981
982static int rfi_flush_get(void *data, u64 *val)
983{
984 *val = rfi_flush ? 1 : 0;
985 return 0;
986}
987
988DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
989
990static __init int rfi_flush_debugfs_init(void)
991{
992 debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
993 return 0;
994}
995device_initcall(rfi_flush_debugfs_init);
996#endif
aa8a5e00 997#endif /* CONFIG_PPC_BOOK3S_64 */