tile: support FRAME_POINTER
[linux-2.6-block.git] / arch / tile / kernel / setup.c
CommitLineData
867e359b
CM
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/mmzone.h>
18#include <linux/bootmem.h>
19#include <linux/module.h>
20#include <linux/node.h>
21#include <linux/cpu.h>
22#include <linux/ioport.h>
0707ad30 23#include <linux/irq.h>
867e359b
CM
24#include <linux/kexec.h>
25#include <linux/pci.h>
41bb38fc 26#include <linux/swiotlb.h>
867e359b
CM
27#include <linux/initrd.h>
28#include <linux/io.h>
29#include <linux/highmem.h>
30#include <linux/smp.h>
31#include <linux/timex.h>
621b1955 32#include <linux/hugetlb.h>
2ded5c24 33#include <linux/start_kernel.h>
a05d3f9f 34#include <linux/screen_info.h>
867e359b
CM
35#include <asm/setup.h>
36#include <asm/sections.h>
867e359b
CM
37#include <asm/cacheflush.h>
38#include <asm/pgalloc.h>
39#include <asm/mmu_context.h>
40#include <hv/hypervisor.h>
41#include <arch/interrupts.h>
42
43/* <linux/smp.h> doesn't provide this definition. */
44#ifndef CONFIG_SMP
45#define setup_max_cpus 1
46#endif
47
48static inline int ABS(int x) { return x >= 0 ? x : -x; }
49
50/* Chip information */
51char chip_model[64] __write_once;
52
a05d3f9f
CM
53#ifdef CONFIG_VT
54struct screen_info screen_info;
55#endif
56
867e359b
CM
57struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
58EXPORT_SYMBOL(node_data);
59
867e359b
CM
60/* Information on the NUMA nodes that we compute early */
61unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES];
62unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES];
63unsigned long __initdata node_memmap_pfn[MAX_NUMNODES];
64unsigned long __initdata node_percpu_pfn[MAX_NUMNODES];
65unsigned long __initdata node_free_pfn[MAX_NUMNODES];
66
76c567fb
CM
67static unsigned long __initdata node_percpu[MAX_NUMNODES];
68
293ef7b8
TG
69/*
70 * per-CPU stack and boot info.
71 */
72DEFINE_PER_CPU(unsigned long, boot_sp) =
73 (unsigned long)init_stack + THREAD_SIZE;
74
75#ifdef CONFIG_SMP
76DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel;
77#else
78/*
79 * The variable must be __initdata since it references __init code.
80 * With CONFIG_SMP it is per-cpu data, which is exempt from validation.
81 */
82unsigned long __initdata boot_pc = (unsigned long)start_kernel;
83#endif
84
867e359b
CM
85#ifdef CONFIG_HIGHMEM
86/* Page frame index of end of lowmem on each controller. */
87unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES];
88
89/* Number of pages that can be mapped into lowmem. */
90static unsigned long __initdata mappable_physpages;
91#endif
92
93/* Data on which physical memory controller corresponds to which NUMA node */
94int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 };
95
96#ifdef CONFIG_HIGHMEM
97/* Map information from VAs to PAs */
98unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)]
99 __write_once __attribute__((aligned(L2_CACHE_BYTES)));
100EXPORT_SYMBOL(pbase_map);
101
102/* Map information from PAs to VAs */
103void *vbase_map[NR_PA_HIGHBIT_VALUES]
104 __write_once __attribute__((aligned(L2_CACHE_BYTES)));
105EXPORT_SYMBOL(vbase_map);
106#endif
107
108/* Node number as a function of the high PA bits */
109int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once;
110EXPORT_SYMBOL(highbits_to_node);
111
112static unsigned int __initdata maxmem_pfn = -1U;
113static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = {
114 [0 ... MAX_NUMNODES-1] = -1U
115};
116static nodemask_t __initdata isolnodes;
117
41bb38fc 118#if defined(CONFIG_PCI) && !defined(__tilegx__)
867e359b
CM
119enum { DEFAULT_PCI_RESERVE_MB = 64 };
120static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB;
121unsigned long __initdata pci_reserve_start_pfn = -1U;
122unsigned long __initdata pci_reserve_end_pfn = -1U;
123#endif
124
125static int __init setup_maxmem(char *str)
126{
bfffe79b
CM
127 unsigned long long maxmem;
128 if (str == NULL || (maxmem = memparse(str, NULL)) == 0)
867e359b
CM
129 return -EINVAL;
130
bfffe79b 131 maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT);
0707ad30 132 pr_info("Forcing RAM used to no more than %dMB\n",
867e359b
CM
133 maxmem_pfn >> (20 - PAGE_SHIFT));
134 return 0;
135}
136early_param("maxmem", setup_maxmem);
137
138static int __init setup_maxnodemem(char *str)
139{
140 char *endp;
bfffe79b
CM
141 unsigned long long maxnodemem;
142 long node;
867e359b
CM
143
144 node = str ? simple_strtoul(str, &endp, 0) : INT_MAX;
bfffe79b 145 if (node >= MAX_NUMNODES || *endp != ':')
867e359b
CM
146 return -EINVAL;
147
bfffe79b
CM
148 maxnodemem = memparse(endp+1, NULL);
149 maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) <<
867e359b 150 (HPAGE_SHIFT - PAGE_SHIFT);
0707ad30 151 pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
867e359b
CM
152 node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
153 return 0;
154}
155early_param("maxnodemem", setup_maxnodemem);
156
77f8c740
CM
157struct memmap_entry {
158 u64 addr; /* start of memory segment */
159 u64 size; /* size of memory segment */
160};
161static struct memmap_entry memmap_map[64];
162static int memmap_nr;
163
164static void add_memmap_region(u64 addr, u64 size)
165{
166 if (memmap_nr >= ARRAY_SIZE(memmap_map)) {
167 pr_err("Ooops! Too many entries in the memory map!\n");
168 return;
169 }
170 memmap_map[memmap_nr].addr = addr;
171 memmap_map[memmap_nr].size = size;
172 memmap_nr++;
173}
174
175static int __init setup_memmap(char *p)
176{
177 char *oldp;
178 u64 start_at, mem_size;
179
180 if (!p)
181 return -EINVAL;
182
183 if (!strncmp(p, "exactmap", 8)) {
184 pr_err("\"memmap=exactmap\" not valid on tile\n");
185 return 0;
186 }
187
188 oldp = p;
189 mem_size = memparse(p, &p);
190 if (p == oldp)
191 return -EINVAL;
192
193 if (*p == '@') {
194 pr_err("\"memmap=nn@ss\" (force RAM) invalid on tile\n");
195 } else if (*p == '#') {
196 pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on tile\n");
197 } else if (*p == '$') {
198 start_at = memparse(p+1, &p);
199 add_memmap_region(start_at, mem_size);
200 } else {
201 if (mem_size == 0)
202 return -EINVAL;
203 maxmem_pfn = (mem_size >> HPAGE_SHIFT) <<
204 (HPAGE_SHIFT - PAGE_SHIFT);
205 }
206 return *p == '\0' ? 0 : -EINVAL;
207}
208early_param("memmap", setup_memmap);
209
210static int __init setup_mem(char *str)
211{
212 return setup_maxmem(str);
213}
214early_param("mem", setup_mem); /* compatibility with x86 */
215
867e359b
CM
216static int __init setup_isolnodes(char *str)
217{
218 char buf[MAX_NUMNODES * 5];
219 if (str == NULL || nodelist_parse(str, isolnodes) != 0)
220 return -EINVAL;
221
222 nodelist_scnprintf(buf, sizeof(buf), isolnodes);
0707ad30 223 pr_info("Set isolnodes value to '%s'\n", buf);
867e359b
CM
224 return 0;
225}
226early_param("isolnodes", setup_isolnodes);
227
41bb38fc 228#if defined(CONFIG_PCI) && !defined(__tilegx__)
867e359b
CM
229static int __init setup_pci_reserve(char* str)
230{
231 unsigned long mb;
232
233 if (str == NULL || strict_strtoul(str, 0, &mb) != 0 ||
234 mb > 3 * 1024)
235 return -EINVAL;
236
237 pci_reserve_mb = mb;
0707ad30 238 pr_info("Reserving %dMB for PCIE root complex mappings\n",
41bb38fc 239 pci_reserve_mb);
867e359b
CM
240 return 0;
241}
242early_param("pci_reserve", setup_pci_reserve);
243#endif
244
245#ifndef __tilegx__
246/*
247 * vmalloc=size forces the vmalloc area to be exactly 'size' bytes.
248 * This can be used to increase (or decrease) the vmalloc area.
249 */
250static int __init parse_vmalloc(char *arg)
251{
252 if (!arg)
253 return -EINVAL;
254
255 VMALLOC_RESERVE = (memparse(arg, &arg) + PGDIR_SIZE - 1) & PGDIR_MASK;
256
257 /* See validate_va() for more on this test. */
258 if ((long)_VMALLOC_START >= 0)
259 early_panic("\"vmalloc=%#lx\" value too large: maximum %#lx\n",
260 VMALLOC_RESERVE, _VMALLOC_END - 0x80000000UL);
261
262 return 0;
263}
264early_param("vmalloc", parse_vmalloc);
265#endif
266
267#ifdef CONFIG_HIGHMEM
268/*
a78c942d
CM
269 * Determine for each controller where its lowmem is mapped and how much of
270 * it is mapped there. On controller zero, the first few megabytes are
acbde1db 271 * already mapped in as code at MEM_SV_START, so in principle we could
a78c942d
CM
272 * start our data mappings higher up, but for now we don't bother, to avoid
273 * additional confusion.
867e359b
CM
274 *
275 * One question is whether, on systems with more than 768 Mb and
276 * controllers of different sizes, to map in a proportionate amount of
277 * each one, or to try to map the same amount from each controller.
278 * (E.g. if we have three controllers with 256MB, 1GB, and 256MB
279 * respectively, do we map 256MB from each, or do we map 128 MB, 512
280 * MB, and 128 MB respectively?) For now we use a proportionate
281 * solution like the latter.
282 *
283 * The VA/PA mapping demands that we align our decisions at 16 MB
284 * boundaries so that we can rapidly convert VA to PA.
285 */
286static void *__init setup_pa_va_mapping(void)
287{
288 unsigned long curr_pages = 0;
289 unsigned long vaddr = PAGE_OFFSET;
290 nodemask_t highonlynodes = isolnodes;
291 int i, j;
292
293 memset(pbase_map, -1, sizeof(pbase_map));
294 memset(vbase_map, -1, sizeof(vbase_map));
295
296 /* Node zero cannot be isolated for LOWMEM purposes. */
297 node_clear(0, highonlynodes);
298
299 /* Count up the number of pages on non-highonlynodes controllers. */
300 mappable_physpages = 0;
301 for_each_online_node(i) {
302 if (!node_isset(i, highonlynodes))
303 mappable_physpages +=
304 node_end_pfn[i] - node_start_pfn[i];
305 }
306
307 for_each_online_node(i) {
308 unsigned long start = node_start_pfn[i];
309 unsigned long end = node_end_pfn[i];
310 unsigned long size = end - start;
311 unsigned long vaddr_end;
312
313 if (node_isset(i, highonlynodes)) {
314 /* Mark this controller as having no lowmem. */
315 node_lowmem_end_pfn[i] = start;
316 continue;
317 }
318
319 curr_pages += size;
320 if (mappable_physpages > MAXMEM_PFN) {
321 vaddr_end = PAGE_OFFSET +
322 (((u64)curr_pages * MAXMEM_PFN /
323 mappable_physpages)
324 << PAGE_SHIFT);
325 } else {
326 vaddr_end = PAGE_OFFSET + (curr_pages << PAGE_SHIFT);
327 }
328 for (j = 0; vaddr < vaddr_end; vaddr += HPAGE_SIZE, ++j) {
329 unsigned long this_pfn =
330 start + (j << HUGETLB_PAGE_ORDER);
331 pbase_map[vaddr >> HPAGE_SHIFT] = this_pfn;
332 if (vbase_map[__pfn_to_highbits(this_pfn)] ==
333 (void *)-1)
334 vbase_map[__pfn_to_highbits(this_pfn)] =
335 (void *)(vaddr & HPAGE_MASK);
336 }
337 node_lowmem_end_pfn[i] = start + (j << HUGETLB_PAGE_ORDER);
338 BUG_ON(node_lowmem_end_pfn[i] > end);
339 }
340
341 /* Return highest address of any mapped memory. */
342 return (void *)vaddr;
343}
344#endif /* CONFIG_HIGHMEM */
345
346/*
347 * Register our most important memory mappings with the debug stub.
348 *
349 * This is up to 4 mappings for lowmem, one mapping per memory
350 * controller, plus one for our text segment.
351 */
0707ad30 352static void __cpuinit store_permanent_mappings(void)
867e359b
CM
353{
354 int i;
355
356 for_each_online_node(i) {
357 HV_PhysAddr pa = ((HV_PhysAddr)node_start_pfn[i]) << PAGE_SHIFT;
358#ifdef CONFIG_HIGHMEM
359 HV_PhysAddr high_mapped_pa = node_lowmem_end_pfn[i];
360#else
361 HV_PhysAddr high_mapped_pa = node_end_pfn[i];
362#endif
363
364 unsigned long pages = high_mapped_pa - node_start_pfn[i];
365 HV_VirtAddr addr = (HV_VirtAddr) __va(pa);
366 hv_store_mapping(addr, pages << PAGE_SHIFT, pa);
367 }
368
40a3b8df
JL
369 hv_store_mapping((HV_VirtAddr)_text,
370 (uint32_t)(_einittext - _text), 0);
867e359b
CM
371}
372
373/*
374 * Use hv_inquire_physical() to populate node_{start,end}_pfn[]
375 * and node_online_map, doing suitable sanity-checking.
376 * Also set min_low_pfn, max_low_pfn, and max_pfn.
377 */
378static void __init setup_memory(void)
379{
380 int i, j;
381 int highbits_seen[NR_PA_HIGHBIT_VALUES] = { 0 };
382#ifdef CONFIG_HIGHMEM
383 long highmem_pages;
384#endif
385#ifndef __tilegx__
386 int cap;
387#endif
388#if defined(CONFIG_HIGHMEM) || defined(__tilegx__)
389 long lowmem_pages;
390#endif
3f29c331 391 unsigned long physpages = 0;
867e359b
CM
392
393 /* We are using a char to hold the cpu_2_node[] mapping */
e18105c1 394 BUILD_BUG_ON(MAX_NUMNODES > 127);
867e359b
CM
395
396 /* Discover the ranges of memory available to us */
397 for (i = 0; ; ++i) {
398 unsigned long start, size, end, highbits;
399 HV_PhysAddrRange range = hv_inquire_physical(i);
400 if (range.size == 0)
401 break;
402#ifdef CONFIG_FLATMEM
403 if (i > 0) {
0707ad30 404 pr_err("Can't use discontiguous PAs: %#llx..%#llx\n",
867e359b
CM
405 range.size, range.start + range.size);
406 continue;
407 }
408#endif
409#ifndef __tilegx__
410 if ((unsigned long)range.start) {
0707ad30 411 pr_err("Range not at 4GB multiple: %#llx..%#llx\n",
867e359b
CM
412 range.start, range.start + range.size);
413 continue;
414 }
415#endif
416 if ((range.start & (HPAGE_SIZE-1)) != 0 ||
417 (range.size & (HPAGE_SIZE-1)) != 0) {
418 unsigned long long start_pa = range.start;
0707ad30 419 unsigned long long orig_size = range.size;
867e359b
CM
420 range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK;
421 range.size -= (range.start - start_pa);
422 range.size &= HPAGE_MASK;
0707ad30 423 pr_err("Range not hugepage-aligned: %#llx..%#llx:"
867e359b 424 " now %#llx-%#llx\n",
0707ad30 425 start_pa, start_pa + orig_size,
867e359b
CM
426 range.start, range.start + range.size);
427 }
428 highbits = __pa_to_highbits(range.start);
429 if (highbits >= NR_PA_HIGHBIT_VALUES) {
0707ad30 430 pr_err("PA high bits too high: %#llx..%#llx\n",
867e359b
CM
431 range.start, range.start + range.size);
432 continue;
433 }
434 if (highbits_seen[highbits]) {
0707ad30 435 pr_err("Range overlaps in high bits: %#llx..%#llx\n",
867e359b
CM
436 range.start, range.start + range.size);
437 continue;
438 }
439 highbits_seen[highbits] = 1;
440 if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) {
0707ad30
CM
441 int max_size = maxnodemem_pfn[i];
442 if (max_size > 0) {
443 pr_err("Maxnodemem reduced node %d to"
444 " %d pages\n", i, max_size);
445 range.size = PFN_PHYS(max_size);
867e359b 446 } else {
0707ad30 447 pr_err("Maxnodemem disabled node %d\n", i);
867e359b
CM
448 continue;
449 }
450 }
3f29c331
JL
451 if (physpages + PFN_DOWN(range.size) > maxmem_pfn) {
452 int max_size = maxmem_pfn - physpages;
0707ad30
CM
453 if (max_size > 0) {
454 pr_err("Maxmem reduced node %d to %d pages\n",
455 i, max_size);
456 range.size = PFN_PHYS(max_size);
867e359b 457 } else {
0707ad30 458 pr_err("Maxmem disabled node %d\n", i);
867e359b
CM
459 continue;
460 }
461 }
462 if (i >= MAX_NUMNODES) {
0707ad30 463 pr_err("Too many PA nodes (#%d): %#llx...%#llx\n",
867e359b
CM
464 i, range.size, range.size + range.start);
465 continue;
466 }
467
468 start = range.start >> PAGE_SHIFT;
469 size = range.size >> PAGE_SHIFT;
470 end = start + size;
471
472#ifndef __tilegx__
473 if (((HV_PhysAddr)end << PAGE_SHIFT) !=
474 (range.start + range.size)) {
0707ad30 475 pr_err("PAs too high to represent: %#llx..%#llx\n",
867e359b
CM
476 range.start, range.start + range.size);
477 continue;
478 }
479#endif
41bb38fc 480#if defined(CONFIG_PCI) && !defined(__tilegx__)
867e359b
CM
481 /*
482 * Blocks that overlap the pci reserved region must
483 * have enough space to hold the maximum percpu data
484 * region at the top of the range. If there isn't
485 * enough space above the reserved region, just
486 * truncate the node.
487 */
488 if (start <= pci_reserve_start_pfn &&
489 end > pci_reserve_start_pfn) {
490 unsigned int per_cpu_size =
491 __per_cpu_end - __per_cpu_start;
492 unsigned int percpu_pages =
493 NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT);
494 if (end < pci_reserve_end_pfn + percpu_pages) {
495 end = pci_reserve_start_pfn;
0707ad30 496 pr_err("PCI mapping region reduced node %d to"
867e359b
CM
497 " %ld pages\n", i, end - start);
498 }
499 }
500#endif
501
502 for (j = __pfn_to_highbits(start);
503 j <= __pfn_to_highbits(end - 1); j++)
504 highbits_to_node[j] = i;
505
506 node_start_pfn[i] = start;
507 node_end_pfn[i] = end;
508 node_controller[i] = range.controller;
3f29c331 509 physpages += size;
867e359b
CM
510 max_pfn = end;
511
512 /* Mark node as online */
513 node_set(i, node_online_map);
514 node_set(i, node_possible_map);
515 }
516
517#ifndef __tilegx__
518 /*
519 * For 4KB pages, mem_map "struct page" data is 1% of the size
520 * of the physical memory, so can be quite big (640 MB for
521 * four 16G zones). These structures must be mapped in
522 * lowmem, and since we currently cap out at about 768 MB,
523 * it's impractical to try to use this much address space.
524 * For now, arbitrarily cap the amount of physical memory
525 * we're willing to use at 8 million pages (32GB of 4KB pages).
526 */
527 cap = 8 * 1024 * 1024; /* 8 million pages */
3f29c331 528 if (physpages > cap) {
867e359b
CM
529 int num_nodes = num_online_nodes();
530 int cap_each = cap / num_nodes;
531 unsigned long dropped_pages = 0;
532 for (i = 0; i < num_nodes; ++i) {
533 int size = node_end_pfn[i] - node_start_pfn[i];
534 if (size > cap_each) {
535 dropped_pages += (size - cap_each);
536 node_end_pfn[i] = node_start_pfn[i] + cap_each;
537 }
538 }
3f29c331 539 physpages -= dropped_pages;
0707ad30 540 pr_warning("Only using %ldMB memory;"
867e359b 541 " ignoring %ldMB.\n",
3f29c331 542 physpages >> (20 - PAGE_SHIFT),
867e359b 543 dropped_pages >> (20 - PAGE_SHIFT));
0707ad30 544 pr_warning("Consider using a larger page size.\n");
867e359b
CM
545 }
546#endif
547
548 /* Heap starts just above the last loaded address. */
549 min_low_pfn = PFN_UP((unsigned long)_end - PAGE_OFFSET);
550
551#ifdef CONFIG_HIGHMEM
552 /* Find where we map lowmem from each controller. */
553 high_memory = setup_pa_va_mapping();
554
555 /* Set max_low_pfn based on what node 0 can directly address. */
556 max_low_pfn = node_lowmem_end_pfn[0];
557
558 lowmem_pages = (mappable_physpages > MAXMEM_PFN) ?
559 MAXMEM_PFN : mappable_physpages;
3f29c331 560 highmem_pages = (long) (physpages - lowmem_pages);
867e359b 561
0707ad30 562 pr_notice("%ldMB HIGHMEM available.\n",
867e359b 563 pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
0707ad30 564 pr_notice("%ldMB LOWMEM available.\n",
867e359b
CM
565 pages_to_mb(lowmem_pages));
566#else
567 /* Set max_low_pfn based on what node 0 can directly address. */
568 max_low_pfn = node_end_pfn[0];
569
570#ifndef __tilegx__
571 if (node_end_pfn[0] > MAXMEM_PFN) {
0707ad30 572 pr_warning("Only using %ldMB LOWMEM.\n",
867e359b 573 MAXMEM>>20);
0707ad30 574 pr_warning("Use a HIGHMEM enabled kernel.\n");
867e359b
CM
575 max_low_pfn = MAXMEM_PFN;
576 max_pfn = MAXMEM_PFN;
867e359b
CM
577 node_end_pfn[0] = MAXMEM_PFN;
578 } else {
0707ad30 579 pr_notice("%ldMB memory available.\n",
867e359b
CM
580 pages_to_mb(node_end_pfn[0]));
581 }
582 for (i = 1; i < MAX_NUMNODES; ++i) {
583 node_start_pfn[i] = 0;
584 node_end_pfn[i] = 0;
585 }
586 high_memory = __va(node_end_pfn[0]);
587#else
588 lowmem_pages = 0;
589 for (i = 0; i < MAX_NUMNODES; ++i) {
590 int pages = node_end_pfn[i] - node_start_pfn[i];
591 lowmem_pages += pages;
592 if (pages)
593 high_memory = pfn_to_kaddr(node_end_pfn[i]);
594 }
0707ad30 595 pr_notice("%ldMB memory available.\n",
867e359b
CM
596 pages_to_mb(lowmem_pages));
597#endif
598#endif
599}
600
621b1955
CM
601/*
602 * On 32-bit machines, we only put bootmem on the low controller,
603 * since PAs > 4GB can't be used in bootmem. In principle one could
604 * imagine, e.g., multiple 1 GB controllers all of which could support
605 * bootmem, but in practice using controllers this small isn't a
606 * particularly interesting scenario, so we just keep it simple and
607 * use only the first controller for bootmem on 32-bit machines.
608 */
609static inline int node_has_bootmem(int nid)
867e359b 610{
621b1955
CM
611#ifdef CONFIG_64BIT
612 return 1;
613#else
614 return nid == 0;
615#endif
616}
867e359b 617
621b1955
CM
618static inline unsigned long alloc_bootmem_pfn(int nid,
619 unsigned long size,
620 unsigned long goal)
621{
622 void *kva = __alloc_bootmem_node(NODE_DATA(nid), size,
623 PAGE_SIZE, goal);
624 unsigned long pfn = kaddr_to_pfn(kva);
625 BUG_ON(goal && PFN_PHYS(pfn) != goal);
626 return pfn;
627}
867e359b 628
621b1955
CM
629static void __init setup_bootmem_allocator_node(int i)
630{
631 unsigned long start, end, mapsize, mapstart;
632
633 if (node_has_bootmem(i)) {
634 NODE_DATA(i)->bdata = &bootmem_node_data[i];
635 } else {
636 /* Share controller zero's bdata for now. */
637 NODE_DATA(i)->bdata = &bootmem_node_data[0];
638 return;
639 }
640
641 /* Skip up to after the bss in node 0. */
642 start = (i == 0) ? min_low_pfn : node_start_pfn[i];
643
644 /* Only lowmem, if we're a HIGHMEM build. */
645#ifdef CONFIG_HIGHMEM
646 end = node_lowmem_end_pfn[i];
867e359b 647#else
621b1955 648 end = node_end_pfn[i];
867e359b
CM
649#endif
650
621b1955
CM
651 /* No memory here. */
652 if (end == start)
653 return;
654
655 /* Figure out where the bootmem bitmap is located. */
656 mapsize = bootmem_bootmap_pages(end - start);
657 if (i == 0) {
658 /* Use some space right before the heap on node 0. */
659 mapstart = start;
660 start += mapsize;
661 } else {
662 /* Allocate bitmap on node 0 to avoid page table issues. */
663 mapstart = alloc_bootmem_pfn(0, PFN_PHYS(mapsize), 0);
664 }
867e359b 665
621b1955
CM
666 /* Initialize a node. */
667 init_bootmem_node(NODE_DATA(i), mapstart, start, end);
867e359b 668
621b1955
CM
669 /* Free all the space back into the allocator. */
670 free_bootmem(PFN_PHYS(start), PFN_PHYS(end - start));
671
41bb38fc 672#if defined(CONFIG_PCI) && !defined(__tilegx__)
867e359b 673 /*
41bb38fc 674 * Throw away any memory aliased by the PCI region.
867e359b 675 */
523c178e
CM
676 if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start) {
677 start = max(pci_reserve_start_pfn, start);
678 end = min(pci_reserve_end_pfn, end);
679 reserve_bootmem(PFN_PHYS(start), PFN_PHYS(end - start),
621b1955 680 BOOTMEM_EXCLUSIVE);
523c178e 681 }
621b1955
CM
682#endif
683}
867e359b 684
621b1955
CM
685static void __init setup_bootmem_allocator(void)
686{
687 int i;
688 for (i = 0; i < MAX_NUMNODES; ++i)
689 setup_bootmem_allocator_node(i);
867e359b 690
77f8c740
CM
691 /* Reserve any memory excluded by "memmap" arguments. */
692 for (i = 0; i < memmap_nr; ++i) {
693 struct memmap_entry *m = &memmap_map[i];
694 reserve_bootmem(m->addr, m->size, 0);
695 }
696
6f0142d5
CM
697#ifdef CONFIG_BLK_DEV_INITRD
698 if (initrd_start) {
699 /* Make sure the initrd memory region is not modified. */
700 if (reserve_bootmem(initrd_start, initrd_end - initrd_start,
701 BOOTMEM_EXCLUSIVE)) {
702 pr_crit("The initrd memory region has been polluted. Disabling it.\n");
703 initrd_start = 0;
704 initrd_end = 0;
705 } else {
706 /*
707 * Translate initrd_start & initrd_end from PA to VA for
708 * future access.
709 */
710 initrd_start += PAGE_OFFSET;
711 initrd_end += PAGE_OFFSET;
712 }
713 }
714#endif
715
867e359b
CM
716#ifdef CONFIG_KEXEC
717 if (crashk_res.start != crashk_res.end)
28f65c11 718 reserve_bootmem(crashk_res.start, resource_size(&crashk_res), 0);
867e359b 719#endif
867e359b
CM
720}
721
722void *__init alloc_remap(int nid, unsigned long size)
723{
724 int pages = node_end_pfn[nid] - node_start_pfn[nid];
725 void *map = pfn_to_kaddr(node_memmap_pfn[nid]);
726 BUG_ON(size != pages * sizeof(struct page));
727 memset(map, 0, size);
728 return map;
729}
730
731static int __init percpu_size(void)
732{
76c567fb
CM
733 int size = __per_cpu_end - __per_cpu_start;
734 size += PERCPU_MODULE_RESERVE;
735 size += PERCPU_DYNAMIC_EARLY_SIZE;
736 if (size < PCPU_MIN_UNIT_SIZE)
737 size = PCPU_MIN_UNIT_SIZE;
738 size = roundup(size, PAGE_SIZE);
739
867e359b
CM
740 /* In several places we assume the per-cpu data fits on a huge page. */
741 BUG_ON(kdata_huge && size > HPAGE_SIZE);
742 return size;
743}
744
867e359b
CM
745static void __init zone_sizes_init(void)
746{
747 unsigned long zones_size[MAX_NR_ZONES] = { 0 };
867e359b
CM
748 int size = percpu_size();
749 int num_cpus = smp_height * smp_width;
eef015c8
CM
750 const unsigned long dma_end = (1UL << (32 - PAGE_SHIFT));
751
867e359b
CM
752 int i;
753
754 for (i = 0; i < num_cpus; ++i)
755 node_percpu[cpu_to_node(i)] += size;
756
757 for_each_online_node(i) {
758 unsigned long start = node_start_pfn[i];
759 unsigned long end = node_end_pfn[i];
760#ifdef CONFIG_HIGHMEM
761 unsigned long lowmem_end = node_lowmem_end_pfn[i];
762#else
763 unsigned long lowmem_end = end;
764#endif
765 int memmap_size = (end - start) * sizeof(struct page);
766 node_free_pfn[i] = start;
767
768 /*
769 * Set aside pages for per-cpu data and the mem_map array.
770 *
771 * Since the per-cpu data requires special homecaching,
772 * if we are in kdata_huge mode, we put it at the end of
773 * the lowmem region. If we're not in kdata_huge mode,
774 * we take the per-cpu pages from the bottom of the
775 * controller, since that avoids fragmenting a huge page
776 * that users might want. We always take the memmap
777 * from the bottom of the controller, since with
778 * kdata_huge that lets it be under a huge TLB entry.
779 *
780 * If the user has requested isolnodes for a controller,
781 * though, there'll be no lowmem, so we just alloc_bootmem
782 * the memmap. There will be no percpu memory either.
783 */
621b1955
CM
784 if (i != 0 && cpu_isset(i, isolnodes)) {
785 node_memmap_pfn[i] =
786 alloc_bootmem_pfn(0, memmap_size, 0);
787 BUG_ON(node_percpu[i] != 0);
788 } else if (node_has_bootmem(start)) {
867e359b
CM
789 unsigned long goal = 0;
790 node_memmap_pfn[i] =
621b1955 791 alloc_bootmem_pfn(i, memmap_size, 0);
867e359b
CM
792 if (kdata_huge)
793 goal = PFN_PHYS(lowmem_end) - node_percpu[i];
794 if (node_percpu[i])
795 node_percpu_pfn[i] =
621b1955
CM
796 alloc_bootmem_pfn(i, node_percpu[i],
797 goal);
867e359b 798 } else {
621b1955 799 /* In non-bootmem zones, just reserve some pages. */
867e359b
CM
800 node_memmap_pfn[i] = node_free_pfn[i];
801 node_free_pfn[i] += PFN_UP(memmap_size);
802 if (!kdata_huge) {
803 node_percpu_pfn[i] = node_free_pfn[i];
804 node_free_pfn[i] += PFN_UP(node_percpu[i]);
805 } else {
806 node_percpu_pfn[i] =
807 lowmem_end - PFN_UP(node_percpu[i]);
808 }
809 }
810
811#ifdef CONFIG_HIGHMEM
812 if (start > lowmem_end) {
813 zones_size[ZONE_NORMAL] = 0;
814 zones_size[ZONE_HIGHMEM] = end - start;
815 } else {
816 zones_size[ZONE_NORMAL] = lowmem_end - start;
817 zones_size[ZONE_HIGHMEM] = end - lowmem_end;
818 }
819#else
820 zones_size[ZONE_NORMAL] = end - start;
821#endif
822
eef015c8
CM
823 if (start < dma_end) {
824 zones_size[ZONE_DMA] = min(zones_size[ZONE_NORMAL],
825 dma_end - start);
826 zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA];
827 } else {
828 zones_size[ZONE_DMA] = 0;
829 }
830
621b1955
CM
831 /* Take zone metadata from controller 0 if we're isolnode. */
832 if (node_isset(i, isolnodes))
833 NODE_DATA(i)->bdata = &bootmem_node_data[0];
867e359b
CM
834
835 free_area_init_node(i, zones_size, start, NULL);
76c567fb 836 printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n",
867e359b
CM
837 PFN_UP(node_percpu[i]));
838
839 /* Track the type of memory on each node */
eef015c8 840 if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA])
867e359b
CM
841 node_set_state(i, N_NORMAL_MEMORY);
842#ifdef CONFIG_HIGHMEM
843 if (end != start)
844 node_set_state(i, N_HIGH_MEMORY);
845#endif
846
847 node_set_online(i);
848 }
849}
850
851#ifdef CONFIG_NUMA
852
853/* which logical CPUs are on which nodes */
854struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once;
855EXPORT_SYMBOL(node_2_cpu_mask);
856
857/* which node each logical CPU is on */
858char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES)));
859EXPORT_SYMBOL(cpu_2_node);
860
861/* Return cpu_to_node() except for cpus not yet assigned, which return -1 */
862static int __init cpu_to_bound_node(int cpu, struct cpumask* unbound_cpus)
863{
864 if (!cpu_possible(cpu) || cpumask_test_cpu(cpu, unbound_cpus))
865 return -1;
866 else
867 return cpu_to_node(cpu);
868}
869
870/* Return number of immediately-adjacent tiles sharing the same NUMA node. */
871static int __init node_neighbors(int node, int cpu,
872 struct cpumask *unbound_cpus)
873{
874 int neighbors = 0;
875 int w = smp_width;
876 int h = smp_height;
877 int x = cpu % w;
878 int y = cpu / w;
879 if (x > 0 && cpu_to_bound_node(cpu-1, unbound_cpus) == node)
880 ++neighbors;
881 if (x < w-1 && cpu_to_bound_node(cpu+1, unbound_cpus) == node)
882 ++neighbors;
883 if (y > 0 && cpu_to_bound_node(cpu-w, unbound_cpus) == node)
884 ++neighbors;
885 if (y < h-1 && cpu_to_bound_node(cpu+w, unbound_cpus) == node)
886 ++neighbors;
887 return neighbors;
888}
889
890static void __init setup_numa_mapping(void)
891{
892 int distance[MAX_NUMNODES][NR_CPUS];
893 HV_Coord coord;
894 int cpu, node, cpus, i, x, y;
895 int num_nodes = num_online_nodes();
896 struct cpumask unbound_cpus;
897 nodemask_t default_nodes;
898
899 cpumask_clear(&unbound_cpus);
900
901 /* Get set of nodes we will use for defaults */
902 nodes_andnot(default_nodes, node_online_map, isolnodes);
903 if (nodes_empty(default_nodes)) {
904 BUG_ON(!node_isset(0, node_online_map));
0707ad30 905 pr_err("Forcing NUMA node zero available as a default node\n");
867e359b
CM
906 node_set(0, default_nodes);
907 }
908
909 /* Populate the distance[] array */
910 memset(distance, -1, sizeof(distance));
911 cpu = 0;
912 for (coord.y = 0; coord.y < smp_height; ++coord.y) {
913 for (coord.x = 0; coord.x < smp_width;
914 ++coord.x, ++cpu) {
915 BUG_ON(cpu >= nr_cpu_ids);
916 if (!cpu_possible(cpu)) {
917 cpu_2_node[cpu] = -1;
918 continue;
919 }
920 for_each_node_mask(node, default_nodes) {
921 HV_MemoryControllerInfo info =
922 hv_inquire_memory_controller(
923 coord, node_controller[node]);
924 distance[node][cpu] =
925 ABS(info.coord.x) + ABS(info.coord.y);
926 }
927 cpumask_set_cpu(cpu, &unbound_cpus);
928 }
929 }
930 cpus = cpu;
931
932 /*
933 * Round-robin through the NUMA nodes until all the cpus are
934 * assigned. We could be more clever here (e.g. create four
935 * sorted linked lists on the same set of cpu nodes, and pull
936 * off them in round-robin sequence, removing from all four
937 * lists each time) but given the relatively small numbers
938 * involved, O(n^2) seem OK for a one-time cost.
939 */
940 node = first_node(default_nodes);
941 while (!cpumask_empty(&unbound_cpus)) {
942 int best_cpu = -1;
943 int best_distance = INT_MAX;
944 for (cpu = 0; cpu < cpus; ++cpu) {
945 if (cpumask_test_cpu(cpu, &unbound_cpus)) {
946 /*
947 * Compute metric, which is how much
948 * closer the cpu is to this memory
949 * controller than the others, shifted
950 * up, and then the number of
951 * neighbors already in the node as an
952 * epsilon adjustment to try to keep
953 * the nodes compact.
954 */
955 int d = distance[node][cpu] * num_nodes;
956 for_each_node_mask(i, default_nodes) {
957 if (i != node)
958 d -= distance[i][cpu];
959 }
960 d *= 8; /* allow space for epsilon */
961 d -= node_neighbors(node, cpu, &unbound_cpus);
962 if (d < best_distance) {
963 best_cpu = cpu;
964 best_distance = d;
965 }
966 }
967 }
968 BUG_ON(best_cpu < 0);
969 cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]);
970 cpu_2_node[best_cpu] = node;
971 cpumask_clear_cpu(best_cpu, &unbound_cpus);
972 node = next_node(node, default_nodes);
973 if (node == MAX_NUMNODES)
974 node = first_node(default_nodes);
975 }
976
977 /* Print out node assignments and set defaults for disabled cpus */
978 cpu = 0;
979 for (y = 0; y < smp_height; ++y) {
980 printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y);
981 for (x = 0; x < smp_width; ++x, ++cpu) {
982 if (cpu_to_node(cpu) < 0) {
0707ad30 983 pr_cont(" -");
867e359b
CM
984 cpu_2_node[cpu] = first_node(default_nodes);
985 } else {
0707ad30 986 pr_cont(" %d", cpu_to_node(cpu));
867e359b
CM
987 }
988 }
0707ad30 989 pr_cont("\n");
867e359b
CM
990 }
991}
992
993static struct cpu cpu_devices[NR_CPUS];
994
995static int __init topology_init(void)
996{
997 int i;
998
999 for_each_online_node(i)
1000 register_one_node(i);
1001
4d658d13 1002 for (i = 0; i < smp_height * smp_width; ++i)
867e359b
CM
1003 register_cpu(&cpu_devices[i], i);
1004
1005 return 0;
1006}
1007
1008subsys_initcall(topology_init);
1009
1010#else /* !CONFIG_NUMA */
1011
1012#define setup_numa_mapping() do { } while (0)
1013
1014#endif /* CONFIG_NUMA */
1015
621b1955
CM
1016/*
1017 * Initialize hugepage support on this cpu. We do this on all cores
1018 * early in boot: before argument parsing for the boot cpu, and after
1019 * argument parsing but before the init functions run on the secondaries.
1020 * So the values we set up here in the hypervisor may be overridden on
1021 * the boot cpu as arguments are parsed.
1022 */
1023static __cpuinit void init_super_pages(void)
1024{
1025#ifdef CONFIG_HUGETLB_SUPER_PAGES
1026 int i;
1027 for (i = 0; i < HUGE_SHIFT_ENTRIES; ++i)
1028 hv_set_pte_super_shift(i, huge_shift[i]);
1029#endif
1030}
1031
867e359b 1032/**
0707ad30
CM
1033 * setup_cpu() - Do all necessary per-cpu, tile-specific initialization.
1034 * @boot: Is this the boot cpu?
867e359b 1035 *
0707ad30 1036 * Called from setup_arch() on the boot cpu, or online_secondary().
867e359b 1037 */
0707ad30 1038void __cpuinit setup_cpu(int boot)
867e359b 1039{
0707ad30
CM
1040 /* The boot cpu sets up its permanent mappings much earlier. */
1041 if (!boot)
1042 store_permanent_mappings();
1043
867e359b
CM
1044 /* Allow asynchronous TLB interrupts. */
1045#if CHIP_HAS_TILE_DMA()
5d966115
CM
1046 arch_local_irq_unmask(INT_DMATLB_MISS);
1047 arch_local_irq_unmask(INT_DMATLB_ACCESS);
867e359b
CM
1048#endif
1049#if CHIP_HAS_SN_PROC()
5d966115 1050 arch_local_irq_unmask(INT_SNITLB_MISS);
867e359b 1051#endif
a78c942d 1052#ifdef __tilegx__
5d966115 1053 arch_local_irq_unmask(INT_SINGLE_STEP_K);
a78c942d 1054#endif
867e359b
CM
1055
1056 /*
1057 * Allow user access to many generic SPRs, like the cycle
1058 * counter, PASS/FAIL/DONE, INTERRUPT_CRITICAL_SECTION, etc.
1059 */
1060 __insn_mtspr(SPR_MPL_WORLD_ACCESS_SET_0, 1);
1061
1062#if CHIP_HAS_SN()
1063 /* Static network is not restricted. */
1064 __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1);
1065#endif
1066#if CHIP_HAS_SN_PROC()
1067 __insn_mtspr(SPR_MPL_SN_NOTIFY_SET_0, 1);
1068 __insn_mtspr(SPR_MPL_SN_CPL_SET_0, 1);
1069#endif
1070
1071 /*
a78c942d
CM
1072 * Set the MPL for interrupt control 0 & 1 to the corresponding
1073 * values. This includes access to the SYSTEM_SAVE and EX_CONTEXT
1074 * SPRs, as well as the interrupt mask.
867e359b
CM
1075 */
1076 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
a78c942d 1077 __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
0707ad30
CM
1078
1079 /* Initialize IRQ support for this cpu. */
1080 setup_irq_regs();
1081
1082#ifdef CONFIG_HARDWALL
1083 /* Reset the network state on this cpu. */
1084 reset_network_state();
1085#endif
621b1955
CM
1086
1087 init_super_pages();
867e359b
CM
1088}
1089
43d9ebba
CM
1090#ifdef CONFIG_BLK_DEV_INITRD
1091
867e359b 1092static int __initdata set_initramfs_file;
ff7f3efb 1093static char __initdata initramfs_file[128] = "initramfs";
867e359b
CM
1094
1095static int __init setup_initramfs_file(char *str)
1096{
1097 if (str == NULL)
1098 return -EINVAL;
1099 strncpy(initramfs_file, str, sizeof(initramfs_file) - 1);
1100 set_initramfs_file = 1;
1101
1102 return 0;
1103}
1104early_param("initramfs_file", setup_initramfs_file);
1105
1106/*
ff7f3efb
CM
1107 * We look for a file called "initramfs" in the hvfs. If there is one, we
1108 * allocate some memory for it and it will be unpacked to the initramfs.
1109 * If it's compressed, the initd code will uncompress it first.
867e359b
CM
1110 */
1111static void __init load_hv_initrd(void)
1112{
1113 HV_FS_StatInfo stat;
1114 int fd, rc;
1115 void *initrd;
1116
6f0142d5
CM
1117 /* If initrd has already been set, skip initramfs file in hvfs. */
1118 if (initrd_start)
1119 return;
1120
867e359b
CM
1121 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
1122 if (fd == HV_ENOENT) {
ff7f3efb 1123 if (set_initramfs_file) {
0707ad30
CM
1124 pr_warning("No such hvfs initramfs file '%s'\n",
1125 initramfs_file);
ff7f3efb
CM
1126 return;
1127 } else {
1128 /* Try old backwards-compatible name. */
1129 fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
1130 if (fd == HV_ENOENT)
1131 return;
1132 }
867e359b
CM
1133 }
1134 BUG_ON(fd < 0);
1135 stat = hv_fs_fstat(fd);
1136 BUG_ON(stat.size < 0);
1137 if (stat.flags & HV_FS_ISDIR) {
0707ad30
CM
1138 pr_warning("Ignoring hvfs file '%s': it's a directory.\n",
1139 initramfs_file);
867e359b
CM
1140 return;
1141 }
1142 initrd = alloc_bootmem_pages(stat.size);
1143 rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0);
1144 if (rc != stat.size) {
0707ad30 1145 pr_err("Error reading %d bytes from hvfs file '%s': %d\n",
867e359b 1146 stat.size, initramfs_file, rc);
bc63de7c 1147 free_initrd_mem((unsigned long) initrd, stat.size);
867e359b
CM
1148 return;
1149 }
1150 initrd_start = (unsigned long) initrd;
1151 initrd_end = initrd_start + stat.size;
1152}
1153
1154void __init free_initrd_mem(unsigned long begin, unsigned long end)
1155{
bc63de7c 1156 free_bootmem(__pa(begin), end - begin);
867e359b
CM
1157}
1158
6f0142d5
CM
1159static int __init setup_initrd(char *str)
1160{
1161 char *endp;
1162 unsigned long initrd_size;
1163
1164 initrd_size = str ? simple_strtoul(str, &endp, 0) : 0;
1165 if (initrd_size == 0 || *endp != '@')
1166 return -EINVAL;
1167
1168 initrd_start = simple_strtoul(endp+1, &endp, 0);
1169 if (initrd_start == 0)
1170 return -EINVAL;
1171
1172 initrd_end = initrd_start + initrd_size;
1173
1174 return 0;
1175}
1176early_param("initrd", setup_initrd);
1177
43d9ebba
CM
1178#else
1179static inline void load_hv_initrd(void) {}
1180#endif /* CONFIG_BLK_DEV_INITRD */
1181
867e359b
CM
1182static void __init validate_hv(void)
1183{
1184 /*
1185 * It may already be too late, but let's check our built-in
1186 * configuration against what the hypervisor is providing.
1187 */
1188 unsigned long glue_size = hv_sysconf(HV_SYSCONF_GLUE_SIZE);
1189 int hv_page_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL);
1190 int hv_hpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE);
1191 HV_ASIDRange asid_range;
1192
1193#ifndef CONFIG_SMP
1194 HV_Topology topology = hv_inquire_topology();
1195 BUG_ON(topology.coord.x != 0 || topology.coord.y != 0);
1196 if (topology.width != 1 || topology.height != 1) {
0707ad30
CM
1197 pr_warning("Warning: booting UP kernel on %dx%d grid;"
1198 " will ignore all but first tile.\n",
1199 topology.width, topology.height);
867e359b
CM
1200 }
1201#endif
1202
1203 if (PAGE_OFFSET + HV_GLUE_START_CPA + glue_size > (unsigned long)_text)
1204 early_panic("Hypervisor glue size %ld is too big!\n",
1205 glue_size);
1206 if (hv_page_size != PAGE_SIZE)
1207 early_panic("Hypervisor page size %#x != our %#lx\n",
1208 hv_page_size, PAGE_SIZE);
1209 if (hv_hpage_size != HPAGE_SIZE)
1210 early_panic("Hypervisor huge page size %#x != our %#lx\n",
1211 hv_hpage_size, HPAGE_SIZE);
1212
1213#ifdef CONFIG_SMP
1214 /*
1215 * Some hypervisor APIs take a pointer to a bitmap array
1216 * whose size is at least the number of cpus on the chip.
1217 * We use a struct cpumask for this, so it must be big enough.
1218 */
1219 if ((smp_height * smp_width) > nr_cpu_ids)
1220 early_panic("Hypervisor %d x %d grid too big for Linux"
1221 " NR_CPUS %d\n", smp_height, smp_width,
1222 nr_cpu_ids);
1223#endif
1224
1225 /*
1226 * Check that we're using allowed ASIDs, and initialize the
1227 * various asid variables to their appropriate initial states.
1228 */
1229 asid_range = hv_inquire_asid(0);
1230 __get_cpu_var(current_asid) = min_asid = asid_range.start;
1231 max_asid = asid_range.start + asid_range.size - 1;
1232
1233 if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model,
1234 sizeof(chip_model)) < 0) {
0707ad30 1235 pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n");
867e359b
CM
1236 strlcpy(chip_model, "unknown", sizeof(chip_model));
1237 }
1238}
1239
1240static void __init validate_va(void)
1241{
1242#ifndef __tilegx__ /* FIXME: GX: probably some validation relevant here */
1243 /*
1244 * Similarly, make sure we're only using allowed VAs.
acbde1db 1245 * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_START,
867e359b
CM
1246 * and 0 .. KERNEL_HIGH_VADDR.
1247 * In addition, make sure we CAN'T use the end of memory, since
1248 * we use the last chunk of each pgd for the pgd_list.
1249 */
a78c942d 1250 int i, user_kernel_ok = 0;
867e359b
CM
1251 unsigned long max_va = 0;
1252 unsigned long list_va =
1253 ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
1254
1255 for (i = 0; ; ++i) {
1256 HV_VirtAddrRange range = hv_inquire_virtual(i);
1257 if (range.size == 0)
1258 break;
1259 if (range.start <= MEM_USER_INTRPT &&
acbde1db 1260 range.start + range.size >= MEM_HV_START)
a78c942d 1261 user_kernel_ok = 1;
867e359b
CM
1262 if (range.start == 0)
1263 max_va = range.size;
1264 BUG_ON(range.start + range.size > list_va);
1265 }
a78c942d
CM
1266 if (!user_kernel_ok)
1267 early_panic("Hypervisor not configured for user/kernel VAs\n");
867e359b
CM
1268 if (max_va == 0)
1269 early_panic("Hypervisor not configured for low VAs\n");
1270 if (max_va < KERNEL_HIGH_VADDR)
1271 early_panic("Hypervisor max VA %#lx smaller than %#lx\n",
1272 max_va, KERNEL_HIGH_VADDR);
1273
1274 /* Kernel PCs must have their high bit set; see intvec.S. */
1275 if ((long)VMALLOC_START >= 0)
1276 early_panic(
1277 "Linux VMALLOC region below the 2GB line (%#lx)!\n"
1278 "Reconfigure the kernel with fewer NR_HUGE_VMAPS\n"
1279 "or smaller VMALLOC_RESERVE.\n",
1280 VMALLOC_START);
1281#endif
1282}
1283
1284/*
1285 * cpu_lotar_map lists all the cpus that are valid for the supervisor
1286 * to cache data on at a page level, i.e. what cpus can be placed in
1287 * the LOTAR field of a PTE. It is equivalent to the set of possible
1288 * cpus plus any other cpus that are willing to share their cache.
1289 * It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR).
1290 */
1291struct cpumask __write_once cpu_lotar_map;
1292EXPORT_SYMBOL(cpu_lotar_map);
1293
1294#if CHIP_HAS_CBOX_HOME_MAP()
1295/*
1296 * hash_for_home_map lists all the tiles that hash-for-home data
1297 * will be cached on. Note that this may includes tiles that are not
1298 * valid for this supervisor to use otherwise (e.g. if a hypervisor
1299 * device is being shared between multiple supervisors).
1300 * It is set by hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE).
1301 */
1302struct cpumask hash_for_home_map;
1303EXPORT_SYMBOL(hash_for_home_map);
1304#endif
1305
1306/*
1307 * cpu_cacheable_map lists all the cpus whose caches the hypervisor can
5f054e31 1308 * flush on our behalf. It is set to cpu_possible_mask OR'ed with
867e359b
CM
1309 * hash_for_home_map, and it is what should be passed to
1310 * hv_flush_remote() to flush all caches. Note that if there are
1311 * dedicated hypervisor driver tiles that have authorized use of their
1312 * cache, those tiles will only appear in cpu_lotar_map, NOT in
1313 * cpu_cacheable_map, as they are a special case.
1314 */
1315struct cpumask __write_once cpu_cacheable_map;
1316EXPORT_SYMBOL(cpu_cacheable_map);
1317
1318static __initdata struct cpumask disabled_map;
1319
1320static int __init disabled_cpus(char *str)
1321{
1322 int boot_cpu = smp_processor_id();
1323
1324 if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0)
1325 return -EINVAL;
1326 if (cpumask_test_cpu(boot_cpu, &disabled_map)) {
0707ad30 1327 pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu);
867e359b
CM
1328 cpumask_clear_cpu(boot_cpu, &disabled_map);
1329 }
1330 return 0;
1331}
1332
1333early_param("disabled_cpus", disabled_cpus);
1334
0707ad30 1335void __init print_disabled_cpus(void)
867e359b
CM
1336{
1337 if (!cpumask_empty(&disabled_map)) {
1338 char buf[100];
1339 cpulist_scnprintf(buf, sizeof(buf), &disabled_map);
0707ad30 1340 pr_info("CPUs not available for Linux: %s\n", buf);
867e359b
CM
1341 }
1342}
1343
1344static void __init setup_cpu_maps(void)
1345{
1346 struct cpumask hv_disabled_map, cpu_possible_init;
1347 int boot_cpu = smp_processor_id();
1348 int cpus, i, rc;
1349
1350 /* Learn which cpus are allowed by the hypervisor. */
1351 rc = hv_inquire_tiles(HV_INQ_TILES_AVAIL,
1352 (HV_VirtAddr) cpumask_bits(&cpu_possible_init),
1353 sizeof(cpu_cacheable_map));
1354 if (rc < 0)
1355 early_panic("hv_inquire_tiles(AVAIL) failed: rc %d\n", rc);
1356 if (!cpumask_test_cpu(boot_cpu, &cpu_possible_init))
1357 early_panic("Boot CPU %d disabled by hypervisor!\n", boot_cpu);
1358
1359 /* Compute the cpus disabled by the hvconfig file. */
1360 cpumask_complement(&hv_disabled_map, &cpu_possible_init);
1361
1362 /* Include them with the cpus disabled by "disabled_cpus". */
1363 cpumask_or(&disabled_map, &disabled_map, &hv_disabled_map);
1364
1365 /*
1366 * Disable every cpu after "setup_max_cpus". But don't mark
1367 * as disabled the cpus that are outside of our initial rectangle,
1368 * since that turns out to be confusing.
1369 */
1370 cpus = 1; /* this cpu */
1371 cpumask_set_cpu(boot_cpu, &disabled_map); /* ignore this cpu */
1372 for (i = 0; cpus < setup_max_cpus; ++i)
1373 if (!cpumask_test_cpu(i, &disabled_map))
1374 ++cpus;
1375 for (; i < smp_height * smp_width; ++i)
1376 cpumask_set_cpu(i, &disabled_map);
1377 cpumask_clear_cpu(boot_cpu, &disabled_map); /* reset this cpu */
1378 for (i = smp_height * smp_width; i < NR_CPUS; ++i)
1379 cpumask_clear_cpu(i, &disabled_map);
1380
1381 /*
1382 * Setup cpu_possible map as every cpu allocated to us, minus
1383 * the results of any "disabled_cpus" settings.
1384 */
1385 cpumask_andnot(&cpu_possible_init, &cpu_possible_init, &disabled_map);
1386 init_cpu_possible(&cpu_possible_init);
1387
1388 /* Learn which cpus are valid for LOTAR caching. */
1389 rc = hv_inquire_tiles(HV_INQ_TILES_LOTAR,
1390 (HV_VirtAddr) cpumask_bits(&cpu_lotar_map),
1391 sizeof(cpu_lotar_map));
1392 if (rc < 0) {
0707ad30 1393 pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
0b5f9c00 1394 cpu_lotar_map = *cpu_possible_mask;
867e359b
CM
1395 }
1396
1397#if CHIP_HAS_CBOX_HOME_MAP()
1398 /* Retrieve set of CPUs used for hash-for-home caching */
1399 rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE,
1400 (HV_VirtAddr) hash_for_home_map.bits,
1401 sizeof(hash_for_home_map));
1402 if (rc < 0)
1403 early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
0b5f9c00 1404 cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map);
867e359b 1405#else
0b5f9c00 1406 cpu_cacheable_map = *cpu_possible_mask;
867e359b
CM
1407#endif
1408}
1409
1410
1411static int __init dataplane(char *str)
1412{
0707ad30 1413 pr_warning("WARNING: dataplane support disabled in this kernel\n");
867e359b
CM
1414 return 0;
1415}
1416
1417early_param("dataplane", dataplane);
1418
1419#ifdef CONFIG_CMDLINE_BOOL
1420static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
1421#endif
1422
1423void __init setup_arch(char **cmdline_p)
1424{
1425 int len;
1426
1427#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
1428 len = hv_get_command_line((HV_VirtAddr) boot_command_line,
1429 COMMAND_LINE_SIZE);
1430 if (boot_command_line[0])
0707ad30
CM
1431 pr_warning("WARNING: ignoring dynamic command line \"%s\"\n",
1432 boot_command_line);
867e359b
CM
1433 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
1434#else
1435 char *hv_cmdline;
1436#if defined(CONFIG_CMDLINE_BOOL)
1437 if (builtin_cmdline[0]) {
1438 int builtin_len = strlcpy(boot_command_line, builtin_cmdline,
1439 COMMAND_LINE_SIZE);
1440 if (builtin_len < COMMAND_LINE_SIZE-1)
1441 boot_command_line[builtin_len++] = ' ';
1442 hv_cmdline = &boot_command_line[builtin_len];
1443 len = COMMAND_LINE_SIZE - builtin_len;
1444 } else
1445#endif
1446 {
1447 hv_cmdline = boot_command_line;
1448 len = COMMAND_LINE_SIZE;
1449 }
1450 len = hv_get_command_line((HV_VirtAddr) hv_cmdline, len);
1451 if (len < 0 || len > COMMAND_LINE_SIZE)
1452 early_panic("hv_get_command_line failed: %d\n", len);
1453#endif
1454
1455 *cmdline_p = boot_command_line;
1456
1457 /* Set disabled_map and setup_max_cpus very early */
1458 parse_early_param();
1459
1460 /* Make sure the kernel is compatible with the hypervisor. */
1461 validate_hv();
1462 validate_va();
1463
1464 setup_cpu_maps();
1465
1466
41bb38fc 1467#if defined(CONFIG_PCI) && !defined(__tilegx__)
867e359b
CM
1468 /*
1469 * Initialize the PCI structures. This is done before memory
1470 * setup so that we know whether or not a pci_reserve region
1471 * is necessary.
1472 */
1473 if (tile_pci_init() == 0)
1474 pci_reserve_mb = 0;
1475
1476 /* PCI systems reserve a region just below 4GB for mapping iomem. */
1477 pci_reserve_end_pfn = (1 << (32 - PAGE_SHIFT));
1478 pci_reserve_start_pfn = pci_reserve_end_pfn -
1479 (pci_reserve_mb << (20 - PAGE_SHIFT));
1480#endif
1481
1482 init_mm.start_code = (unsigned long) _text;
1483 init_mm.end_code = (unsigned long) _etext;
1484 init_mm.end_data = (unsigned long) _edata;
1485 init_mm.brk = (unsigned long) _end;
1486
1487 setup_memory();
1488 store_permanent_mappings();
1489 setup_bootmem_allocator();
1490
1491 /*
1492 * NOTE: before this point _nobody_ is allowed to allocate
1493 * any memory using the bootmem allocator.
1494 */
1495
41bb38fc
CM
1496#ifdef CONFIG_SWIOTLB
1497 swiotlb_init(0);
1498#endif
1499
867e359b
CM
1500 paging_init();
1501 setup_numa_mapping();
1502 zone_sizes_init();
1503 set_page_homes();
0707ad30 1504 setup_cpu(1);
867e359b
CM
1505 setup_clock();
1506 load_hv_initrd();
1507}
1508
1509
1510/*
1511 * Set up per-cpu memory.
1512 */
1513
1514unsigned long __per_cpu_offset[NR_CPUS] __write_once;
1515EXPORT_SYMBOL(__per_cpu_offset);
1516
1517static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 };
1518static unsigned long __initdata percpu_pfn[NR_CPUS] = { 0 };
1519
1520/*
1521 * As the percpu code allocates pages, we return the pages from the
1522 * end of the node for the specified cpu.
1523 */
1524static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
1525{
1526 int nid = cpu_to_node(cpu);
1527 unsigned long pfn = node_percpu_pfn[nid] + pfn_offset[nid];
1528
1529 BUG_ON(size % PAGE_SIZE != 0);
1530 pfn_offset[nid] += size / PAGE_SIZE;
76c567fb
CM
1531 BUG_ON(node_percpu[nid] < size);
1532 node_percpu[nid] -= size;
867e359b
CM
1533 if (percpu_pfn[cpu] == 0)
1534 percpu_pfn[cpu] = pfn;
1535 return pfn_to_kaddr(pfn);
1536}
1537
1538/*
1539 * Pages reserved for percpu memory are not freeable, and in any case we are
1540 * on a short path to panic() in setup_per_cpu_area() at this point anyway.
1541 */
1542static void __init pcpu_fc_free(void *ptr, size_t size)
1543{
1544}
1545
1546/*
1547 * Set up vmalloc page tables using bootmem for the percpu code.
1548 */
1549static void __init pcpu_fc_populate_pte(unsigned long addr)
1550{
1551 pgd_t *pgd;
1552 pud_t *pud;
1553 pmd_t *pmd;
1554 pte_t *pte;
1555
1556 BUG_ON(pgd_addr_invalid(addr));
77d23303
CM
1557 if (addr < VMALLOC_START || addr >= VMALLOC_END)
1558 panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;"
1559 " try increasing CONFIG_VMALLOC_RESERVE\n",
1560 addr, VMALLOC_START, VMALLOC_END);
867e359b
CM
1561
1562 pgd = swapper_pg_dir + pgd_index(addr);
1563 pud = pud_offset(pgd, addr);
1564 BUG_ON(!pud_present(*pud));
1565 pmd = pmd_offset(pud, addr);
1566 if (pmd_present(*pmd)) {
1567 BUG_ON(pmd_huge_page(*pmd));
1568 } else {
1569 pte = __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE,
1570 HV_PAGE_TABLE_ALIGN, 0);
1571 pmd_populate_kernel(&init_mm, pmd, pte);
1572 }
1573}
1574
1575void __init setup_per_cpu_areas(void)
1576{
1577 struct page *pg;
1578 unsigned long delta, pfn, lowmem_va;
1579 unsigned long size = percpu_size();
1580 char *ptr;
1581 int rc, cpu, i;
1582
1583 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_fc_alloc,
1584 pcpu_fc_free, pcpu_fc_populate_pte);
1585 if (rc < 0)
1586 panic("Cannot initialize percpu area (err=%d)", rc);
1587
1588 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1589 for_each_possible_cpu(cpu) {
1590 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1591
1592 /* finv the copy out of cache so we can change homecache */
1593 ptr = pcpu_base_addr + pcpu_unit_offsets[cpu];
1594 __finv_buffer(ptr, size);
1595 pfn = percpu_pfn[cpu];
1596
1597 /* Rewrite the page tables to cache on that cpu */
1598 pg = pfn_to_page(pfn);
1599 for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
1600
1601 /* Update the vmalloc mapping and page home. */
d5d14ed6
CM
1602 unsigned long addr = (unsigned long)ptr + i;
1603 pte_t *ptep = virt_to_pte(NULL, addr);
867e359b
CM
1604 pte_t pte = *ptep;
1605 BUG_ON(pfn != pte_pfn(pte));
1606 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
1607 pte = set_remote_cache_cpu(pte, cpu);
d5d14ed6 1608 set_pte_at(&init_mm, addr, ptep, pte);
867e359b
CM
1609
1610 /* Update the lowmem mapping for consistency. */
1611 lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
1612 ptep = virt_to_pte(NULL, lowmem_va);
1613 if (pte_huge(*ptep)) {
1614 printk(KERN_DEBUG "early shatter of huge page"
1615 " at %#lx\n", lowmem_va);
1616 shatter_pmd((pmd_t *)ptep);
1617 ptep = virt_to_pte(NULL, lowmem_va);
1618 BUG_ON(pte_huge(*ptep));
1619 }
1620 BUG_ON(pfn != pte_pfn(*ptep));
d5d14ed6 1621 set_pte_at(&init_mm, lowmem_va, ptep, pte);
867e359b
CM
1622 }
1623 }
1624
1625 /* Set our thread pointer appropriately. */
1626 set_my_cpu_offset(__per_cpu_offset[smp_processor_id()]);
1627
1628 /* Make sure the finv's have completed. */
1629 mb_incoherent();
1630
1631 /* Flush the TLB so we reference it properly from here on out. */
1632 local_flush_tlb_all();
1633}
1634
1635static struct resource data_resource = {
1636 .name = "Kernel data",
1637 .start = 0,
1638 .end = 0,
1639 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
1640};
1641
1642static struct resource code_resource = {
1643 .name = "Kernel code",
1644 .start = 0,
1645 .end = 0,
1646 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
1647};
1648
1649/*
41bb38fc 1650 * On Pro, we reserve all resources above 4GB so that PCI won't try to put
f6d2ce00 1651 * mappings above 4GB.
867e359b 1652 */
41bb38fc 1653#if defined(CONFIG_PCI) && !defined(__tilegx__)
867e359b
CM
1654static struct resource* __init
1655insert_non_bus_resource(void)
1656{
1657 struct resource *res =
1658 kzalloc(sizeof(struct resource), GFP_ATOMIC);
1659 res->name = "Non-Bus Physical Address Space";
1660 res->start = (1ULL << 32);
1661 res->end = -1LL;
1662 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1663 if (insert_resource(&iomem_resource, res)) {
1664 kfree(res);
1665 return NULL;
1666 }
1667 return res;
1668}
1669#endif
1670
1671static struct resource* __init
77f8c740 1672insert_ram_resource(u64 start_pfn, u64 end_pfn, bool reserved)
867e359b
CM
1673{
1674 struct resource *res =
1675 kzalloc(sizeof(struct resource), GFP_ATOMIC);
77f8c740 1676 res->name = reserved ? "Reserved" : "System RAM";
867e359b
CM
1677 res->start = start_pfn << PAGE_SHIFT;
1678 res->end = (end_pfn << PAGE_SHIFT) - 1;
1679 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1680 if (insert_resource(&iomem_resource, res)) {
1681 kfree(res);
1682 return NULL;
1683 }
1684 return res;
1685}
1686
1687/*
1688 * Request address space for all standard resources
1689 *
1690 * If the system includes PCI root complex drivers, we need to create
1691 * a window just below 4GB where PCI BARs can be mapped.
1692 */
1693static int __init request_standard_resources(void)
1694{
1695 int i;
acbde1db 1696 enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET };
867e359b 1697
41bb38fc 1698#if defined(CONFIG_PCI) && !defined(__tilegx__)
867e359b
CM
1699 insert_non_bus_resource();
1700#endif
1701
1702 for_each_online_node(i) {
1703 u64 start_pfn = node_start_pfn[i];
1704 u64 end_pfn = node_end_pfn[i];
1705
41bb38fc 1706#if defined(CONFIG_PCI) && !defined(__tilegx__)
867e359b
CM
1707 if (start_pfn <= pci_reserve_start_pfn &&
1708 end_pfn > pci_reserve_start_pfn) {
1709 if (end_pfn > pci_reserve_end_pfn)
1710 insert_ram_resource(pci_reserve_end_pfn,
77f8c740 1711 end_pfn, 0);
867e359b
CM
1712 end_pfn = pci_reserve_start_pfn;
1713 }
1714#endif
77f8c740 1715 insert_ram_resource(start_pfn, end_pfn, 0);
867e359b
CM
1716 }
1717
1718 code_resource.start = __pa(_text - CODE_DELTA);
1719 code_resource.end = __pa(_etext - CODE_DELTA)-1;
1720 data_resource.start = __pa(_sdata);
1721 data_resource.end = __pa(_end)-1;
1722
1723 insert_resource(&iomem_resource, &code_resource);
1724 insert_resource(&iomem_resource, &data_resource);
1725
77f8c740
CM
1726 /* Mark any "memmap" regions busy for the resource manager. */
1727 for (i = 0; i < memmap_nr; ++i) {
1728 struct memmap_entry *m = &memmap_map[i];
1729 insert_ram_resource(PFN_DOWN(m->addr),
1730 PFN_UP(m->addr + m->size - 1), 1);
1731 }
1732
867e359b
CM
1733#ifdef CONFIG_KEXEC
1734 insert_resource(&iomem_resource, &crashk_res);
1735#endif
1736
1737 return 0;
1738}
1739
1740subsys_initcall(request_standard_resources);