Merge tag 'drm-vc4-fixes-2016-09-14' of https://github.com/anholt/linux into drm...
[linux-2.6-block.git] / arch / powerpc / kernel / setup_64.c
CommitLineData
40ef8cbc
PM
1/*
2 *
3 * Common boot and setup code.
4 *
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
7191b615 13#define DEBUG
40ef8cbc 14
4b16f8e2 15#include <linux/export.h>
40ef8cbc
PM
16#include <linux/string.h>
17#include <linux/sched.h>
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <linux/reboot.h>
21#include <linux/delay.h>
22#include <linux/initrd.h>
40ef8cbc
PM
23#include <linux/seq_file.h>
24#include <linux/ioport.h>
25#include <linux/console.h>
26#include <linux/utsname.h>
27#include <linux/tty.h>
28#include <linux/root_dev.h>
29#include <linux/notifier.h>
30#include <linux/cpu.h>
31#include <linux/unistd.h>
32#include <linux/serial.h>
33#include <linux/serial_8250.h>
7a0268fa 34#include <linux/bootmem.h>
12d04eef 35#include <linux/pci.h>
945feb17 36#include <linux/lockdep.h>
95f72d1e 37#include <linux/memblock.h>
a5d86257 38#include <linux/memory.h>
c54b2bf1 39#include <linux/nmi.h>
a6146888 40
40ef8cbc 41#include <asm/io.h>
0cc4746c 42#include <asm/kdump.h>
40ef8cbc
PM
43#include <asm/prom.h>
44#include <asm/processor.h>
45#include <asm/pgtable.h>
40ef8cbc
PM
46#include <asm/smp.h>
47#include <asm/elf.h>
48#include <asm/machdep.h>
49#include <asm/paca.h>
40ef8cbc
PM
50#include <asm/time.h>
51#include <asm/cputable.h>
52#include <asm/sections.h>
53#include <asm/btext.h>
54#include <asm/nvram.h>
55#include <asm/setup.h>
40ef8cbc
PM
56#include <asm/rtas.h>
57#include <asm/iommu.h>
58#include <asm/serial.h>
59#include <asm/cache.h>
60#include <asm/page.h>
61#include <asm/mmu.h>
40ef8cbc 62#include <asm/firmware.h>
f78541dc 63#include <asm/xmon.h>
dcad47fc 64#include <asm/udbg.h>
593e537b 65#include <asm/kexec.h>
d36b4c4f 66#include <asm/code-patching.h>
5d31a96e 67#include <asm/livepatch.h>
d3cbff1b 68#include <asm/opal.h>
b1923caa 69#include <asm/cputhreads.h>
40ef8cbc
PM
70
71#ifdef DEBUG
72#define DBG(fmt...) udbg_printf(fmt)
73#else
74#define DBG(fmt...)
75#endif
76
8246aca7 77int spinning_secondaries;
40ef8cbc
PM
78u64 ppc64_pft_size;
79
dabcafd3
OJ
80/* Pick defaults since we might want to patch instructions
81 * before we've read this from the device tree.
82 */
83struct ppc64_caches ppc64_caches = {
5a2fe38d
OJ
84 .dline_size = 0x40,
85 .log_dline_size = 6,
86 .iline_size = 0x40,
87 .log_iline_size = 6
dabcafd3 88};
40ef8cbc
PM
89EXPORT_SYMBOL_GPL(ppc64_caches);
90
91/*
92 * These are used in binfmt_elf.c to put aux entries on the stack
93 * for each elf executable being started.
94 */
95int dcache_bsize;
96int icache_bsize;
97int ucache_bsize;
98
28efc35f 99#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
b1923caa 100void __init setup_tlb_core_data(void)
28efc35f
SW
101{
102 int cpu;
103
82d86de2
SW
104 BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
105
28efc35f
SW
106 for_each_possible_cpu(cpu) {
107 int first = cpu_first_thread_sibling(cpu);
108
d9e1831a
SW
109 /*
110 * If we boot via kdump on a non-primary thread,
111 * make sure we point at the thread that actually
112 * set up this TLB.
113 */
114 if (cpu_first_thread_sibling(boot_cpuid) == first)
115 first = boot_cpuid;
116
28efc35f
SW
117 paca[cpu].tcd_ptr = &paca[first].tcd;
118
119 /*
120 * If we have threads, we need either tlbsrx.
121 * or e6500 tablewalk mode, or else TLB handlers
122 * will be racy and could produce duplicate entries.
123 */
124 if (smt_enabled_at_boot >= 2 &&
125 !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
126 book3e_htw_mode != PPC_HTW_E6500) {
127 /* Should we panic instead? */
128 WARN_ONCE("%s: unsupported MMU configuration -- expect problems\n",
129 __func__);
130 }
131 }
132}
28efc35f
SW
133#endif
134
40ef8cbc
PM
135#ifdef CONFIG_SMP
136
954e6da5 137static char *smt_enabled_cmdline;
40ef8cbc
PM
138
139/* Look for ibm,smt-enabled OF option */
b1923caa 140void __init check_smt_enabled(void)
40ef8cbc
PM
141{
142 struct device_node *dn;
a7f67bdf 143 const char *smt_option;
40ef8cbc 144
954e6da5
NF
145 /* Default to enabling all threads */
146 smt_enabled_at_boot = threads_per_core;
40ef8cbc 147
954e6da5
NF
148 /* Allow the command line to overrule the OF option */
149 if (smt_enabled_cmdline) {
150 if (!strcmp(smt_enabled_cmdline, "on"))
151 smt_enabled_at_boot = threads_per_core;
152 else if (!strcmp(smt_enabled_cmdline, "off"))
153 smt_enabled_at_boot = 0;
154 else {
1618bd53 155 int smt;
954e6da5
NF
156 int rc;
157
1618bd53 158 rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
954e6da5
NF
159 if (!rc)
160 smt_enabled_at_boot =
1618bd53 161 min(threads_per_core, smt);
954e6da5
NF
162 }
163 } else {
164 dn = of_find_node_by_path("/options");
165 if (dn) {
166 smt_option = of_get_property(dn, "ibm,smt-enabled",
167 NULL);
168
169 if (smt_option) {
170 if (!strcmp(smt_option, "on"))
171 smt_enabled_at_boot = threads_per_core;
172 else if (!strcmp(smt_option, "off"))
173 smt_enabled_at_boot = 0;
174 }
175
176 of_node_put(dn);
177 }
178 }
40ef8cbc
PM
179}
180
181/* Look for smt-enabled= cmdline option */
182static int __init early_smt_enabled(char *p)
183{
954e6da5 184 smt_enabled_cmdline = p;
40ef8cbc
PM
185 return 0;
186}
187early_param("smt-enabled", early_smt_enabled);
188
40ef8cbc
PM
189#endif /* CONFIG_SMP */
190
25e13814 191/** Fix up paca fields required for the boot cpu */
009776ba 192static void __init fixup_boot_paca(void)
25e13814
ME
193{
194 /* The boot cpu is started */
195 get_paca()->cpu_start = 1;
196 /* Allow percpu accesses to work until we setup percpu data */
197 get_paca()->data_offset = 0;
198}
199
009776ba 200static void __init configure_exceptions(void)
8f619b54 201{
633440f1 202 /*
d3cbff1b
BH
203 * Setup the trampolines from the lowmem exception vectors
204 * to the kdump kernel when not using a relocatable kernel.
633440f1 205 */
d3cbff1b
BH
206 setup_kdump_trampoline();
207
208 /* Under a PAPR hypervisor, we need hypercalls */
209 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
210 /* Enable AIL if possible */
211 pseries_enable_reloc_on_exc();
212
213 /*
214 * Tell the hypervisor that we want our exceptions to
215 * be taken in little endian mode.
216 *
217 * We don't call this for big endian as our calling convention
218 * makes us always enter in BE, and the call may fail under
219 * some circumstances with kdump.
220 */
221#ifdef __LITTLE_ENDIAN__
222 pseries_little_endian_exceptions();
223#endif
224 } else {
225 /* Set endian mode using OPAL */
226 if (firmware_has_feature(FW_FEATURE_OPAL))
227 opal_configure_cores();
228
229 /* Enable AIL if supported, and we are in hypervisor mode */
b8f1b4f8
AK
230 if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
231 early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
d3cbff1b
BH
232 unsigned long lpcr = mfspr(SPRN_LPCR);
233 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
234 }
8f619b54
BH
235 }
236}
237
d3cbff1b
BH
238static void cpu_ready_for_interrupts(void)
239{
240 /* Set IR and DR in PACA MSR */
241 get_paca()->kernel_msr = MSR_KERNEL;
242}
243
40ef8cbc
PM
244/*
245 * Early initialization entry point. This is called by head.S
246 * with MMU translation disabled. We rely on the "feature" of
247 * the CPU that ignores the top 2 bits of the address in real
248 * mode so we can access kernel globals normally provided we
249 * only toy with things in the RMO region. From here, we do
95f72d1e 250 * some early parsing of the device-tree to setup out MEMBLOCK
40ef8cbc
PM
251 * data structures, and allocate & initialize the hash table
252 * and segment tables so we can start running with translation
253 * enabled.
254 *
255 * It is this function which will call the probe() callback of
256 * the various platform types and copy the matching one to the
257 * global ppc_md structure. Your platform can eventually do
258 * some very early initializations from the probe() routine, but
259 * this is not recommended, be very careful as, for example, the
260 * device-tree is not accessible via normal means at this point.
261 */
262
263void __init early_setup(unsigned long dt_ptr)
264{
6a7e4064
GL
265 static __initdata struct paca_struct boot_paca;
266
24d96495
BH
267 /* -------- printk is _NOT_ safe to use here ! ------- */
268
42c4aaad 269 /* Identify CPU type */
974a76f5 270 identify_cpu(0, mfspr(SPRN_PVR));
42c4aaad 271
33dbcf72 272 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
1426d5a3
ME
273 initialise_paca(&boot_paca, 0);
274 setup_paca(&boot_paca);
25e13814 275 fixup_boot_paca();
33dbcf72 276
24d96495
BH
277 /* -------- printk is now safe to use ------- */
278
f2fd2513
BH
279 /* Enable early debugging if any specified (see udbg.h) */
280 udbg_early_init();
281
e8222502 282 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
40ef8cbc 283
40ef8cbc 284 /*
3c607ce2
LV
285 * Do early initialization using the flattened device
286 * tree, such as retrieving the physical memory map or
287 * calculating/retrieving the hash table size.
40ef8cbc
PM
288 */
289 early_init_devtree(__va(dt_ptr));
290
4df20460 291 /* Now we know the logical id of our boot cpu, setup the paca. */
1426d5a3 292 setup_paca(&paca[boot_cpuid]);
25e13814 293 fixup_boot_paca();
4df20460 294
63c254a5 295 /*
d3cbff1b
BH
296 * Configure exception handlers. This include setting up trampolines
297 * if needed, setting exception endian mode, etc...
63c254a5 298 */
d3cbff1b 299 configure_exceptions();
0cc4746c 300
c4bd6cb8
BH
301 /* Apply all the dynamic patching */
302 apply_feature_fixups();
97f6e0cc 303 setup_feature_keys();
c4bd6cb8 304
9e8066f3
ME
305 /* Initialize the hash table or TLB handling */
306 early_init_mmu();
307
a944a9c4
BH
308 /*
309 * At this point, we can let interrupts switch to virtual mode
310 * (the MMU has been setup), so adjust the MSR in the PACA to
8f619b54 311 * have IR and DR set and enable AIL if it exists
a944a9c4 312 */
8f619b54 313 cpu_ready_for_interrupts();
a944a9c4 314
40ef8cbc 315 DBG(" <- early_setup()\n");
7191b615
BH
316
317#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
318 /*
319 * This needs to be done *last* (after the above DBG() even)
320 *
321 * Right after we return from this function, we turn on the MMU
322 * which means the real-mode access trick that btext does will
323 * no longer work, it needs to switch to using a real MMU
324 * mapping. This call will ensure that it does
325 */
326 btext_map();
327#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
40ef8cbc
PM
328}
329
799d6046
PM
330#ifdef CONFIG_SMP
331void early_setup_secondary(void)
332{
103b7827 333 /* Mark interrupts disabled in PACA */
757c74d2 334 get_paca()->soft_enabled = 0;
799d6046 335
757c74d2
BH
336 /* Initialize the hash table or TLB handling */
337 early_init_mmu_secondary();
a944a9c4
BH
338
339 /*
340 * At this point, we can let interrupts switch to virtual mode
341 * (the MMU has been setup), so adjust the MSR in the PACA to
342 * have IR and DR set.
343 */
8f619b54 344 cpu_ready_for_interrupts();
799d6046
PM
345}
346
347#endif /* CONFIG_SMP */
40ef8cbc 348
b8f51021 349#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
567cf94d
SW
350static bool use_spinloop(void)
351{
352 if (!IS_ENABLED(CONFIG_PPC_BOOK3E))
353 return true;
354
355 /*
356 * When book3e boots from kexec, the ePAPR spin table does
357 * not get used.
358 */
359 return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
360}
361
b8f51021
ME
362void smp_release_cpus(void)
363{
758438a7 364 unsigned long *ptr;
9d07bc84 365 int i;
b8f51021 366
567cf94d
SW
367 if (!use_spinloop())
368 return;
369
b8f51021
ME
370 DBG(" -> smp_release_cpus()\n");
371
372 /* All secondary cpus are spinning on a common spinloop, release them
373 * all now so they can start to spin on their individual paca
374 * spinloops. For non SMP kernels, the secondary cpus never get out
375 * of the common spinloop.
1f6a93e4 376 */
b8f51021 377
758438a7
ME
378 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
379 - PHYSICAL_START);
2751b628 380 *ptr = ppc_function_entry(generic_secondary_smp_init);
9d07bc84
BH
381
382 /* And wait a bit for them to catch up */
383 for (i = 0; i < 100000; i++) {
384 mb();
385 HMT_low();
7ac87abb 386 if (spinning_secondaries == 0)
9d07bc84
BH
387 break;
388 udelay(1);
389 }
7ac87abb 390 DBG("spinning_secondaries = %d\n", spinning_secondaries);
b8f51021
ME
391
392 DBG(" <- smp_release_cpus()\n");
393}
394#endif /* CONFIG_SMP || CONFIG_KEXEC */
395
40ef8cbc 396/*
799d6046
PM
397 * Initialize some remaining members of the ppc64_caches and systemcfg
398 * structures
40ef8cbc
PM
399 * (at least until we get rid of them completely). This is mostly some
400 * cache informations about the CPU that will be used by cache flush
401 * routines and/or provided to userland
402 */
b1923caa 403void __init initialize_cache_info(void)
40ef8cbc
PM
404{
405 struct device_node *np;
406 unsigned long num_cpus = 0;
407
408 DBG(" -> initialize_cache_info()\n");
409
94db7c5e 410 for_each_node_by_type(np, "cpu") {
40ef8cbc
PM
411 num_cpus += 1;
412
dfbe93a2
AB
413 /*
414 * We're assuming *all* of the CPUs have the same
40ef8cbc
PM
415 * d-cache and i-cache sizes... -Peter
416 */
dfbe93a2 417 if (num_cpus == 1) {
7946d5a5 418 const __be32 *sizep, *lsizep;
40ef8cbc 419 u32 size, lsize;
40ef8cbc
PM
420
421 size = 0;
422 lsize = cur_cpu_spec->dcache_bsize;
e2eb6392 423 sizep = of_get_property(np, "d-cache-size", NULL);
40ef8cbc 424 if (sizep != NULL)
7946d5a5 425 size = be32_to_cpu(*sizep);
dfbe93a2
AB
426 lsizep = of_get_property(np, "d-cache-block-size",
427 NULL);
20474abd
BH
428 /* fallback if block size missing */
429 if (lsizep == NULL)
dfbe93a2
AB
430 lsizep = of_get_property(np,
431 "d-cache-line-size",
432 NULL);
40ef8cbc 433 if (lsizep != NULL)
7946d5a5 434 lsize = be32_to_cpu(*lsizep);
b0d436c7 435 if (sizep == NULL || lsizep == NULL)
40ef8cbc
PM
436 DBG("Argh, can't find dcache properties ! "
437 "sizep: %p, lsizep: %p\n", sizep, lsizep);
438
a7f290da
BH
439 ppc64_caches.dsize = size;
440 ppc64_caches.dline_size = lsize;
40ef8cbc
PM
441 ppc64_caches.log_dline_size = __ilog2(lsize);
442 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
443
444 size = 0;
445 lsize = cur_cpu_spec->icache_bsize;
e2eb6392 446 sizep = of_get_property(np, "i-cache-size", NULL);
40ef8cbc 447 if (sizep != NULL)
7946d5a5 448 size = be32_to_cpu(*sizep);
dfbe93a2
AB
449 lsizep = of_get_property(np, "i-cache-block-size",
450 NULL);
20474abd 451 if (lsizep == NULL)
dfbe93a2
AB
452 lsizep = of_get_property(np,
453 "i-cache-line-size",
454 NULL);
40ef8cbc 455 if (lsizep != NULL)
7946d5a5 456 lsize = be32_to_cpu(*lsizep);
b0d436c7 457 if (sizep == NULL || lsizep == NULL)
40ef8cbc
PM
458 DBG("Argh, can't find icache properties ! "
459 "sizep: %p, lsizep: %p\n", sizep, lsizep);
460
a7f290da
BH
461 ppc64_caches.isize = size;
462 ppc64_caches.iline_size = lsize;
40ef8cbc
PM
463 ppc64_caches.log_iline_size = __ilog2(lsize);
464 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
465 }
466 }
467
9df549af
BH
468 /* For use by binfmt_elf */
469 dcache_bsize = ppc64_caches.dline_size;
470 icache_bsize = ppc64_caches.iline_size;
471
40ef8cbc
PM
472 DBG(" <- initialize_cache_info()\n");
473}
474
40bd587a
BH
475/* This returns the limit below which memory accesses to the linear
476 * mapping are guarnateed not to cause a TLB or SLB miss. This is
477 * used to allocate interrupt or emergency stacks for which our
478 * exception entry path doesn't deal with being interrupted.
479 */
009776ba 480static __init u64 safe_stack_limit(void)
095c7965 481{
40bd587a
BH
482#ifdef CONFIG_PPC_BOOK3E
483 /* Freescale BookE bolts the entire linear mapping */
484 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
485 return linear_map_top;
486 /* Other BookE, we assume the first GB is bolted */
487 return 1ul << 30;
488#else
489 /* BookS, the first segment is bolted */
490 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
095c7965 491 return 1UL << SID_SHIFT_1T;
095c7965 492 return 1UL << SID_SHIFT;
40bd587a 493#endif
095c7965
AB
494}
495
b1923caa 496void __init irqstack_early_init(void)
40ef8cbc 497{
40bd587a 498 u64 limit = safe_stack_limit();
40ef8cbc
PM
499 unsigned int i;
500
501 /*
8f4da26e
AB
502 * Interrupt stacks must be in the first segment since we
503 * cannot afford to take SLB misses on them.
40ef8cbc 504 */
0e551954 505 for_each_possible_cpu(i) {
3c726f8d 506 softirq_ctx[i] = (struct thread_info *)
95f72d1e 507 __va(memblock_alloc_base(THREAD_SIZE,
095c7965 508 THREAD_SIZE, limit));
3c726f8d 509 hardirq_ctx[i] = (struct thread_info *)
95f72d1e 510 __va(memblock_alloc_base(THREAD_SIZE,
095c7965 511 THREAD_SIZE, limit));
40ef8cbc
PM
512 }
513}
40ef8cbc 514
2d27cfd3 515#ifdef CONFIG_PPC_BOOK3E
b1923caa 516void __init exc_lvl_early_init(void)
2d27cfd3
BH
517{
518 unsigned int i;
160c7324 519 unsigned long sp;
2d27cfd3
BH
520
521 for_each_possible_cpu(i) {
160c7324
TC
522 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
523 critirq_ctx[i] = (struct thread_info *)__va(sp);
524 paca[i].crit_kstack = __va(sp + THREAD_SIZE);
525
526 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
527 dbgirq_ctx[i] = (struct thread_info *)__va(sp);
528 paca[i].dbg_kstack = __va(sp + THREAD_SIZE);
529
530 sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
531 mcheckirq_ctx[i] = (struct thread_info *)__va(sp);
532 paca[i].mc_kstack = __va(sp + THREAD_SIZE);
2d27cfd3 533 }
d36b4c4f
KG
534
535 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
565c2f24 536 patch_exception(0x040, exc_debug_debug_book3e);
2d27cfd3 537}
2d27cfd3
BH
538#endif
539
40ef8cbc
PM
540/*
541 * Stack space used when we detect a bad kernel stack pointer, and
729b0f71
MS
542 * early in SMP boots before relocation is enabled. Exclusive emergency
543 * stack for machine checks.
40ef8cbc 544 */
b1923caa 545void __init emergency_stack_init(void)
40ef8cbc 546{
095c7965 547 u64 limit;
40ef8cbc
PM
548 unsigned int i;
549
550 /*
551 * Emergency stacks must be under 256MB, we cannot afford to take
552 * SLB misses on them. The ABI also requires them to be 128-byte
553 * aligned.
554 *
555 * Since we use these as temporary stacks during secondary CPU
556 * bringup, we need to get at them in real mode. This means they
557 * must also be within the RMO region.
558 */
40bd587a 559 limit = min(safe_stack_limit(), ppc64_rma_size);
40ef8cbc 560
3243d874 561 for_each_possible_cpu(i) {
5d31a96e
ME
562 struct thread_info *ti;
563 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
564 klp_init_thread_info(ti);
565 paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
729b0f71
MS
566
567#ifdef CONFIG_PPC_BOOK3S_64
568 /* emergency stack for machine check exception handling. */
5d31a96e
ME
569 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
570 klp_init_thread_info(ti);
571 paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
729b0f71 572#endif
3243d874 573 }
40ef8cbc
PM
574}
575
7a0268fa 576#ifdef CONFIG_SMP
c2a7e818
TH
577#define PCPU_DYN_SIZE ()
578
579static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
7a0268fa 580{
c2a7e818
TH
581 return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
582 __pa(MAX_DMA_ADDRESS));
583}
7a0268fa 584
c2a7e818
TH
585static void __init pcpu_fc_free(void *ptr, size_t size)
586{
587 free_bootmem(__pa(ptr), size);
588}
7a0268fa 589
c2a7e818
TH
590static int pcpu_cpu_distance(unsigned int from, unsigned int to)
591{
592 if (cpu_to_node(from) == cpu_to_node(to))
593 return LOCAL_DISTANCE;
594 else
595 return REMOTE_DISTANCE;
596}
597
ae01f84b
AB
598unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
599EXPORT_SYMBOL(__per_cpu_offset);
600
c2a7e818
TH
601void __init setup_per_cpu_areas(void)
602{
603 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
604 size_t atom_size;
605 unsigned long delta;
606 unsigned int cpu;
607 int rc;
608
609 /*
610 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
611 * to group units. For larger mappings, use 1M atom which
612 * should be large enough to contain a number of units.
613 */
614 if (mmu_linear_psize == MMU_PAGE_4K)
615 atom_size = PAGE_SIZE;
616 else
617 atom_size = 1 << 20;
618
619 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
620 pcpu_fc_alloc, pcpu_fc_free);
621 if (rc < 0)
622 panic("cannot initialize percpu area (err=%d)", rc);
623
624 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
ae01f84b
AB
625 for_each_possible_cpu(cpu) {
626 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
627 paca[cpu].data_offset = __per_cpu_offset[cpu];
628 }
7a0268fa
AB
629}
630#endif
4cb3cee0 631
a5d86257
AB
632#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
633unsigned long memory_block_size_bytes(void)
634{
635 if (ppc_md.memory_block_size)
636 return ppc_md.memory_block_size();
637
638 return MIN_MEMORY_BLOCK_SIZE;
639}
640#endif
4cb3cee0 641
ecd73cc5 642#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
4cb3cee0
BH
643struct ppc_pci_io ppc_pci_io;
644EXPORT_SYMBOL(ppc_pci_io);
ecd73cc5 645#endif
c54b2bf1
AB
646
647#ifdef CONFIG_HARDLOCKUP_DETECTOR
648u64 hw_nmi_get_sample_period(int watchdog_thresh)
649{
650 return ppc_proc_freq * watchdog_thresh;
651}
652
653/*
654 * The hardlockup detector breaks PMU event based branches and is likely
655 * to get false positives in KVM guests, so disable it by default.
656 */
657static int __init disable_hardlockup_detector(void)
658{
d19d5efd 659 hardlockup_detector_disable();
c54b2bf1
AB
660
661 return 0;
662}
663early_initcall(disable_hardlockup_detector);
664#endif