Merge branches 'ib-mfd-arm-leds-5.2', 'ib-mfd-gpio-input-leds-power-5.2', 'ib-mfd...
[linux-2.6-block.git] / arch / arm64 / kernel / setup.c
CommitLineData
9703d9d7
CM
1/*
2 * Based on arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
37655163 20#include <linux/acpi.h>
9703d9d7
CM
21#include <linux/export.h>
22#include <linux/kernel.h>
23#include <linux/stddef.h>
24#include <linux/ioport.h>
25#include <linux/delay.h>
9703d9d7
CM
26#include <linux/initrd.h>
27#include <linux/console.h>
a41dc0e8 28#include <linux/cache.h>
9703d9d7
CM
29#include <linux/screen_info.h>
30#include <linux/init.h>
31#include <linux/kexec.h>
9703d9d7
CM
32#include <linux/root_dev.h>
33#include <linux/cpu.h>
34#include <linux/interrupt.h>
35#include <linux/smp.h>
36#include <linux/fs.h>
37#include <linux/proc_fs.h>
38#include <linux/memblock.h>
39#include <linux/of_fdt.h>
f84d0275 40#include <linux/efi.h>
bff60792 41#include <linux/psci.h>
9164bb4a 42#include <linux/sched/task.h>
2077be67 43#include <linux/mm.h>
9703d9d7 44
37655163 45#include <asm/acpi.h>
bf4b558e 46#include <asm/fixmap.h>
df857416 47#include <asm/cpu.h>
9703d9d7 48#include <asm/cputype.h>
41bd5b5d 49#include <asm/daifflags.h>
9703d9d7 50#include <asm/elf.h>
930da09f 51#include <asm/cpufeature.h>
e8765b26 52#include <asm/cpu_ops.h>
39d114dd 53#include <asm/kasan.h>
1a2db300 54#include <asm/numa.h>
9703d9d7
CM
55#include <asm/sections.h>
56#include <asm/setup.h>
4c7aa002 57#include <asm/smp_plat.h>
9703d9d7
CM
58#include <asm/cacheflush.h>
59#include <asm/tlbflush.h>
60#include <asm/traps.h>
f84d0275 61#include <asm/efi.h>
5882bfef 62#include <asm/xen/hypervisor.h>
9e8e865b 63#include <asm/mmu_context.h>
9703d9d7 64
d91680e6
WD
65static int num_standard_resources;
66static struct resource *standard_resources;
67
9703d9d7
CM
68phys_addr_t __fdt_pointer __initdata;
69
70/*
71 * Standard memory resources
72 */
73static struct resource mem_res[] = {
74 {
75 .name = "Kernel code",
76 .start = 0,
77 .end = 0,
35d98e93 78 .flags = IORESOURCE_SYSTEM_RAM
9703d9d7
CM
79 },
80 {
81 .name = "Kernel data",
82 .start = 0,
83 .end = 0,
35d98e93 84 .flags = IORESOURCE_SYSTEM_RAM
9703d9d7
CM
85 }
86};
87
88#define kernel_code mem_res[0]
89#define kernel_data mem_res[1]
90
da9c177d
AB
91/*
92 * The recorded values of x0 .. x3 upon kernel entry.
93 */
94u64 __cacheline_aligned boot_args[4];
95
71586276
WD
96void __init smp_setup_processor_id(void)
97{
80708677
MR
98 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
99 cpu_logical_map(0) = mpidr;
100
71586276
WD
101 /*
102 * clear __my_cpu_offset on boot CPU to avoid hang caused by
103 * using percpu variable early, for example, lockdep will
104 * access percpu variable inside lock_release
105 */
106 set_my_cpu_offset(0);
ccaac162
MR
107 pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
108 (unsigned long)mpidr, read_cpuid_id());
71586276
WD
109}
110
6e15d0e0
SK
111bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
112{
113 return phys_id == cpu_logical_map(cpu);
114}
115
976d7d3f 116struct mpidr_hash mpidr_hash;
976d7d3f
LP
117/**
118 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
119 * level in order to build a linear index from an
120 * MPIDR value. Resulting algorithm is a collision
121 * free hash carried out through shifting and ORing
122 */
123static void __init smp_build_mpidr_hash(void)
124{
125 u32 i, affinity, fs[4], bits[4], ls;
126 u64 mask = 0;
127 /*
128 * Pre-scan the list of MPIDRS and filter out bits that do
129 * not contribute to affinity levels, ie they never toggle.
130 */
131 for_each_possible_cpu(i)
132 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
133 pr_debug("mask of set bits %#llx\n", mask);
134 /*
135 * Find and stash the last and first bit set at all affinity levels to
136 * check how many bits are required to represent them.
137 */
138 for (i = 0; i < 4; i++) {
139 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
140 /*
141 * Find the MSB bit and LSB bits position
142 * to determine how many bits are required
143 * to express the affinity level.
144 */
145 ls = fls(affinity);
146 fs[i] = affinity ? ffs(affinity) - 1 : 0;
147 bits[i] = ls - fs[i];
148 }
149 /*
150 * An index can be created from the MPIDR_EL1 by isolating the
151 * significant bits at each affinity level and by shifting
152 * them in order to compress the 32 bits values space to a
153 * compressed set of values. This is equivalent to hashing
154 * the MPIDR_EL1 through shifting and ORing. It is a collision free
155 * hash though not minimal since some levels might contain a number
156 * of CPUs that is not an exact power of 2 and their bit
157 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
158 */
159 mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
160 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
161 mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
162 (bits[1] + bits[0]);
163 mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
164 fs[3] - (bits[2] + bits[1] + bits[0]);
165 mpidr_hash.mask = mask;
166 mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
167 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
168 mpidr_hash.shift_aff[0],
169 mpidr_hash.shift_aff[1],
170 mpidr_hash.shift_aff[2],
171 mpidr_hash.shift_aff[3],
172 mpidr_hash.mask,
173 mpidr_hash.bits);
174 /*
175 * 4x is an arbitrary value used to warn on a hash table much bigger
176 * than expected on most systems.
177 */
178 if (mpidr_hash_size() > 4 * num_possible_cpus())
179 pr_warn("Large number of MPIDR hash buckets detected\n");
976d7d3f 180}
137650aa 181
9703d9d7
CM
182static void __init setup_machine_fdt(phys_addr_t dt_phys)
183{
61bd93ce 184 void *dt_virt = fixmap_remap_fdt(dt_phys);
2f9a0bec 185 const char *name;
61bd93ce
AB
186
187 if (!dt_virt || !early_init_dt_scan(dt_virt)) {
188 pr_crit("\n"
189 "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
190 "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
191 "\nPlease check your bootloader.",
192 &dt_phys, dt_virt);
9703d9d7
CM
193
194 while (true)
195 cpu_relax();
196 }
5e39977e 197
2f9a0bec 198 name = of_flat_dt_get_machine_name();
690e95dd
KW
199 if (!name)
200 return;
201
2f9a0bec
GU
202 pr_info("Machine model: %s\n", name);
203 dump_stack_set_arch_desc("%s (DT)", name);
9703d9d7
CM
204}
205
9703d9d7
CM
206static void __init request_standard_resources(void)
207{
208 struct memblock_region *region;
209 struct resource *res;
d91680e6 210 unsigned long i = 0;
8a7f97b9 211 size_t res_size;
9703d9d7 212
2077be67
LA
213 kernel_code.start = __pa_symbol(_text);
214 kernel_code.end = __pa_symbol(__init_begin - 1);
215 kernel_data.start = __pa_symbol(_sdata);
216 kernel_data.end = __pa_symbol(_end - 1);
9703d9d7 217
d91680e6 218 num_standard_resources = memblock.memory.cnt;
8a7f97b9 219 res_size = num_standard_resources * sizeof(*standard_resources);
9e0a17db 220 standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
8a7f97b9
MR
221 if (!standard_resources)
222 panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
d91680e6 223
9703d9d7 224 for_each_memblock(memory, region) {
d91680e6 225 res = &standard_resources[i++];
e7cd1903
AT
226 if (memblock_is_nomap(region)) {
227 res->name = "reserved";
79ba11d2 228 res->flags = IORESOURCE_MEM;
e7cd1903
AT
229 } else {
230 res->name = "System RAM";
231 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
232 }
9703d9d7
CM
233 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
234 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
9703d9d7
CM
235
236 request_resource(&iomem_resource, res);
237
238 if (kernel_code.start >= res->start &&
239 kernel_code.end <= res->end)
240 request_resource(res, &kernel_code);
241 if (kernel_data.start >= res->start &&
242 kernel_data.end <= res->end)
243 request_resource(res, &kernel_data);
764b51ea
AT
244#ifdef CONFIG_KEXEC_CORE
245 /* Userspace will find "Crash kernel" region in /proc/iomem. */
246 if (crashk_res.end && crashk_res.start >= res->start &&
247 crashk_res.end <= res->end)
248 request_resource(res, &crashk_res);
249#endif
9703d9d7
CM
250 }
251}
252
50d7ba36
JM
253static int __init reserve_memblock_reserved_regions(void)
254{
d91680e6
WD
255 u64 i, j;
256
257 for (i = 0; i < num_standard_resources; ++i) {
258 struct resource *mem = &standard_resources[i];
259 phys_addr_t r_start, r_end, mem_size = resource_size(mem);
260
261 if (!memblock_is_region_reserved(mem->start, mem_size))
50d7ba36 262 continue;
50d7ba36 263
d91680e6
WD
264 for_each_reserved_mem_region(j, &r_start, &r_end) {
265 resource_size_t start, end;
266
267 start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
268 end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
269
270 if (start > mem->end || end < mem->start)
271 continue;
272
273 reserve_region_with_split(mem, start, end, "reserved");
274 }
50d7ba36
JM
275 }
276
277 return 0;
278}
279arch_initcall(reserve_memblock_reserved_regions);
280
4c7aa002
JM
281u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
282
9703d9d7
CM
283void __init setup_arch(char **cmdline_p)
284{
9703d9d7
CM
285 init_mm.start_code = (unsigned long) _text;
286 init_mm.end_code = (unsigned long) _etext;
287 init_mm.end_data = (unsigned long) _edata;
288 init_mm.brk = (unsigned long) _end;
289
290 *cmdline_p = boot_command_line;
291
af86e597 292 early_fixmap_init();
bf4b558e 293 early_ioremap_init();
0bf757c7 294
61bd93ce
AB
295 setup_machine_fdt(__fdt_pointer);
296
9703d9d7
CM
297 parse_early_param();
298
7a9c43be 299 /*
41bd5b5d
JM
300 * Unmask asynchronous aborts and fiq after bringing up possible
301 * earlycon. (Report possible System Errors once we can report this
302 * occurred).
7a9c43be 303 */
41bd5b5d 304 local_daif_restore(DAIF_PROCCTX_NOIRQ);
7a9c43be 305
86ccce89
MR
306 /*
307 * TTBR0 is only used for the identity mapping at this stage. Make it
308 * point to zero page to avoid speculatively fetching new entries.
309 */
310 cpu_uninstall_idmap();
311
9b08aaa3 312 xen_early_init();
f84d0275 313 efi_init();
9703d9d7
CM
314 arm64_memblock_init();
315
38b04a74
JM
316 paging_init();
317
318 acpi_table_upgrade();
319
37655163
AS
320 /* Parse the ACPI tables for possible boot-time configuration */
321 acpi_boot_table_init();
322
3194ac6e
DD
323 if (acpi_disabled)
324 unflatten_device_tree();
325
326 bootmem_init();
327
39d114dd
AR
328 kasan_init();
329
9703d9d7
CM
330 request_standard_resources();
331
0e63ea48 332 early_ioremap_reset();
f84d0275 333
3194ac6e 334 if (acpi_disabled)
7c59a3df 335 psci_dt_init();
3194ac6e 336 else
7c59a3df 337 psci_acpi_init();
3194ac6e 338
0f078336 339 cpu_read_bootcpu_ops();
0f078336 340 smp_init_cpus();
976d7d3f 341 smp_build_mpidr_hash();
9703d9d7 342
3f41b609
AK
343 /* Init percpu seeds for random tags after cpus are set up. */
344 kasan_init_tags();
345
39bc88e5
CM
346#ifdef CONFIG_ARM64_SW_TTBR0_PAN
347 /*
348 * Make sure init_thread_info.ttbr0 always generates translation
349 * faults in case uaccess_enable() is inadvertently called by the init
350 * thread.
351 */
cbb999dd 352 init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page);
39bc88e5
CM
353#endif
354
9703d9d7 355#ifdef CONFIG_VT
9703d9d7 356 conswitchp = &dummy_con;
9703d9d7 357#endif
da9c177d
AB
358 if (boot_args[1] || boot_args[2] || boot_args[3]) {
359 pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
360 "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
361 "This indicates a broken bootloader or old kernel\n",
362 boot_args[1], boot_args[2], boot_args[3]);
363 }
9703d9d7
CM
364}
365
9703d9d7
CM
366static int __init topology_init(void)
367{
368 int i;
369
1a2db300
GK
370 for_each_online_node(i)
371 register_one_node(i);
372
9703d9d7 373 for_each_possible_cpu(i) {
df857416 374 struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
9703d9d7
CM
375 cpu->hotpluggable = 1;
376 register_cpu(cpu, i);
377 }
378
379 return 0;
380}
381subsys_initcall(topology_init);
f80fb3a3
AB
382
383/*
384 * Dump out kernel offset information on panic.
385 */
386static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
387 void *p)
388{
7ede8665 389 const unsigned long offset = kaslr_offset();
f80fb3a3 390
7ede8665
AP
391 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) {
392 pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
393 offset, KIMAGE_VADDR);
12f799c8 394 pr_emerg("PHYS_OFFSET: 0x%llx\n", PHYS_OFFSET);
f80fb3a3
AB
395 } else {
396 pr_emerg("Kernel Offset: disabled\n");
397 }
398 return 0;
399}
400
401static struct notifier_block kernel_offset_notifier = {
402 .notifier_call = dump_kernel_offset
403};
404
405static int __init register_kernel_offset_dumper(void)
406{
407 atomic_notifier_chain_register(&panic_notifier_list,
408 &kernel_offset_notifier);
409 return 0;
410}
411__initcall(register_kernel_offset_dumper);