[IA64] register memory ranges in a consistent manner
[linux-2.6-block.git] / arch / ia64 / mm / init.c
CommitLineData
1da177e4
LT
1/*
2 * Initialize MMU support.
3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 */
1da177e4
LT
7#include <linux/kernel.h>
8#include <linux/init.h>
9
10#include <linux/bootmem.h>
11#include <linux/efi.h>
12#include <linux/elf.h>
13#include <linux/mm.h>
14#include <linux/mmzone.h>
15#include <linux/module.h>
16#include <linux/personality.h>
17#include <linux/reboot.h>
18#include <linux/slab.h>
19#include <linux/swap.h>
20#include <linux/proc_fs.h>
21#include <linux/bitops.h>
139b8304 22#include <linux/kexec.h>
1da177e4
LT
23
24#include <asm/a.out.h>
25#include <asm/dma.h>
26#include <asm/ia32.h>
27#include <asm/io.h>
28#include <asm/machvec.h>
29#include <asm/numa.h>
30#include <asm/patch.h>
31#include <asm/pgalloc.h>
32#include <asm/sal.h>
33#include <asm/sections.h>
34#include <asm/system.h>
35#include <asm/tlb.h>
36#include <asm/uaccess.h>
37#include <asm/unistd.h>
38#include <asm/mca.h>
39
40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41
fde740e4
RH
42DEFINE_PER_CPU(unsigned long *, __pgtable_quicklist);
43DEFINE_PER_CPU(long, __pgtable_quicklist_size);
44
1da177e4
LT
45extern void ia64_tlb_init (void);
46
47unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
48
49#ifdef CONFIG_VIRTUAL_MEM_MAP
50unsigned long vmalloc_end = VMALLOC_END_INIT;
51EXPORT_SYMBOL(vmalloc_end);
52struct page *vmem_map;
53EXPORT_SYMBOL(vmem_map);
54#endif
55
fde740e4 56struct page *zero_page_memmap_ptr; /* map entry for zero page */
1da177e4
LT
57EXPORT_SYMBOL(zero_page_memmap_ptr);
58
fde740e4 59#define MIN_PGT_PAGES 25UL
e96c9b47 60#define MAX_PGT_FREES_PER_PASS 16L
fde740e4
RH
61#define PGT_FRACTION_OF_NODE_MEM 16
62
63static inline long
64max_pgt_pages(void)
65{
66 u64 node_free_pages, max_pgt_pages;
67
68#ifndef CONFIG_NUMA
69 node_free_pages = nr_free_pages();
70#else
71 node_free_pages = nr_free_pages_pgdat(NODE_DATA(numa_node_id()));
72#endif
73 max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM;
74 max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES);
75 return max_pgt_pages;
76}
77
78static inline long
79min_pages_to_free(void)
80{
81 long pages_to_free;
82
83 pages_to_free = pgtable_quicklist_size - max_pgt_pages();
84 pages_to_free = min(pages_to_free, MAX_PGT_FREES_PER_PASS);
85 return pages_to_free;
86}
87
1da177e4 88void
fde740e4 89check_pgt_cache(void)
1da177e4 90{
fde740e4 91 long pages_to_free;
1da177e4 92
fde740e4
RH
93 if (unlikely(pgtable_quicklist_size <= MIN_PGT_PAGES))
94 return;
1da177e4
LT
95
96 preempt_disable();
fde740e4
RH
97 while (unlikely((pages_to_free = min_pages_to_free()) > 0)) {
98 while (pages_to_free--) {
99 free_page((unsigned long)pgtable_quicklist_alloc());
100 }
101 preempt_enable();
102 preempt_disable();
1da177e4
LT
103 }
104 preempt_enable();
105}
106
107void
108lazy_mmu_prot_update (pte_t pte)
109{
110 unsigned long addr;
111 struct page *page;
5e48521e 112 unsigned long order;
1da177e4
LT
113
114 if (!pte_exec(pte))
115 return; /* not an executable page... */
116
117 page = pte_page(pte);
118 addr = (unsigned long) page_address(page);
119
120 if (test_bit(PG_arch_1, &page->flags))
121 return; /* i-cache is already coherent with d-cache */
122
5e48521e
ZY
123 if (PageCompound(page)) {
124 order = (unsigned long) (page[1].lru.prev);
125 flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT));
126 }
127 else
128 flush_icache_range(addr, addr + PAGE_SIZE);
1da177e4
LT
129 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
130}
131
132inline void
133ia64_set_rbs_bot (void)
134{
135 unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16;
136
137 if (stack_size > MAX_USER_STACK_SIZE)
138 stack_size = MAX_USER_STACK_SIZE;
139 current->thread.rbs_bot = STACK_TOP - stack_size;
140}
141
142/*
143 * This performs some platform-dependent address space initialization.
144 * On IA-64, we want to setup the VM area for the register backing
145 * store (which grows upwards) and install the gateway page which is
146 * used for signal trampolines, etc.
147 */
148void
149ia64_init_addr_space (void)
150{
151 struct vm_area_struct *vma;
152
153 ia64_set_rbs_bot();
154
155 /*
156 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
157 * the problem. When the process attempts to write to the register backing store
158 * for the first time, it will get a SEGFAULT in this case.
159 */
e94b1766 160 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1da177e4
LT
161 if (vma) {
162 memset(vma, 0, sizeof(*vma));
163 vma->vm_mm = current->mm;
164 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
165 vma->vm_end = vma->vm_start + PAGE_SIZE;
166 vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
46dea3d0 167 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1da177e4
LT
168 down_write(&current->mm->mmap_sem);
169 if (insert_vm_struct(current->mm, vma)) {
170 up_write(&current->mm->mmap_sem);
171 kmem_cache_free(vm_area_cachep, vma);
172 return;
173 }
174 up_write(&current->mm->mmap_sem);
175 }
176
177 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
178 if (!(current->personality & MMAP_PAGE_ZERO)) {
e94b1766 179 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1da177e4
LT
180 if (vma) {
181 memset(vma, 0, sizeof(*vma));
182 vma->vm_mm = current->mm;
183 vma->vm_end = PAGE_SIZE;
184 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
185 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
186 down_write(&current->mm->mmap_sem);
187 if (insert_vm_struct(current->mm, vma)) {
188 up_write(&current->mm->mmap_sem);
189 kmem_cache_free(vm_area_cachep, vma);
190 return;
191 }
192 up_write(&current->mm->mmap_sem);
193 }
194 }
195}
196
197void
198free_initmem (void)
199{
200 unsigned long addr, eaddr;
201
202 addr = (unsigned long) ia64_imva(__init_begin);
203 eaddr = (unsigned long) ia64_imva(__init_end);
204 while (addr < eaddr) {
205 ClearPageReserved(virt_to_page(addr));
7835e98b 206 init_page_count(virt_to_page(addr));
1da177e4
LT
207 free_page(addr);
208 ++totalram_pages;
209 addr += PAGE_SIZE;
210 }
211 printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
212 (__init_end - __init_begin) >> 10);
213}
214
dae28066 215void __init
1da177e4
LT
216free_initrd_mem (unsigned long start, unsigned long end)
217{
218 struct page *page;
219 /*
220 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
221 * Thus EFI and the kernel may have different page sizes. It is
222 * therefore possible to have the initrd share the same page as
223 * the end of the kernel (given current setup).
224 *
225 * To avoid freeing/using the wrong page (kernel sized) we:
226 * - align up the beginning of initrd
227 * - align down the end of initrd
228 *
229 * | |
230 * |=============| a000
231 * | |
232 * | |
233 * | | 9000
234 * |/////////////|
235 * |/////////////|
236 * |=============| 8000
237 * |///INITRD////|
238 * |/////////////|
239 * |/////////////| 7000
240 * | |
241 * |KKKKKKKKKKKKK|
242 * |=============| 6000
243 * |KKKKKKKKKKKKK|
244 * |KKKKKKKKKKKKK|
245 * K=kernel using 8KB pages
246 *
247 * In this example, we must free page 8000 ONLY. So we must align up
248 * initrd_start and keep initrd_end as is.
249 */
250 start = PAGE_ALIGN(start);
251 end = end & PAGE_MASK;
252
253 if (start < end)
254 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
255
256 for (; start < end; start += PAGE_SIZE) {
257 if (!virt_addr_valid(start))
258 continue;
259 page = virt_to_page(start);
260 ClearPageReserved(page);
7835e98b 261 init_page_count(page);
1da177e4
LT
262 free_page(start);
263 ++totalram_pages;
264 }
265}
266
267/*
268 * This installs a clean page in the kernel's page table.
269 */
dae28066 270static struct page * __init
1da177e4
LT
271put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
272{
273 pgd_t *pgd;
274 pud_t *pud;
275 pmd_t *pmd;
276 pte_t *pte;
277
278 if (!PageReserved(page))
279 printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
280 page_address(page));
281
282 pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
283
1da177e4
LT
284 {
285 pud = pud_alloc(&init_mm, pgd, address);
286 if (!pud)
287 goto out;
1da177e4
LT
288 pmd = pmd_alloc(&init_mm, pud, address);
289 if (!pmd)
290 goto out;
872fec16 291 pte = pte_alloc_kernel(pmd, address);
1da177e4
LT
292 if (!pte)
293 goto out;
872fec16 294 if (!pte_none(*pte))
1da177e4 295 goto out;
1da177e4 296 set_pte(pte, mk_pte(page, pgprot));
1da177e4 297 }
872fec16 298 out:
1da177e4
LT
299 /* no need for flush_tlb */
300 return page;
301}
302
914a4ea4 303static void __init
1da177e4
LT
304setup_gate (void)
305{
306 struct page *page;
307
308 /*
ad597bd5
DMT
309 * Map the gate page twice: once read-only to export the ELF
310 * headers etc. and once execute-only page to enable
311 * privilege-promotion via "epc":
1da177e4
LT
312 */
313 page = virt_to_page(ia64_imva(__start_gate_section));
314 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
315#ifdef HAVE_BUGGY_SEGREL
316 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
317 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
318#else
319 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
ad597bd5
DMT
320 /* Fill in the holes (if any) with read-only zero pages: */
321 {
322 unsigned long addr;
323
324 for (addr = GATE_ADDR + PAGE_SIZE;
325 addr < GATE_ADDR + PERCPU_PAGE_SIZE;
326 addr += PAGE_SIZE)
327 {
328 put_kernel_page(ZERO_PAGE(0), addr,
329 PAGE_READONLY);
330 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
331 PAGE_READONLY);
332 }
333 }
1da177e4
LT
334#endif
335 ia64_patch_gate();
336}
337
338void __devinit
339ia64_mmu_init (void *my_cpu_data)
340{
341 unsigned long psr, pta, impl_va_bits;
342 extern void __devinit tlb_init (void);
343
344#ifdef CONFIG_DISABLE_VHPT
345# define VHPT_ENABLE_BIT 0
346#else
347# define VHPT_ENABLE_BIT 1
348#endif
349
350 /* Pin mapping for percpu area into TLB */
351 psr = ia64_clear_ic();
352 ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
353 pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
354 PERCPU_PAGE_SHIFT);
355
356 ia64_set_psr(psr);
357 ia64_srlz_i();
358
359 /*
360 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
361 * address space. The IA-64 architecture guarantees that at least 50 bits of
362 * virtual address space are implemented but if we pick a large enough page size
363 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
364 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
365 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
366 * problem in practice. Alternatively, we could truncate the top of the mapped
367 * address space to not permit mappings that would overlap with the VMLPT.
368 * --davidm 00/12/06
369 */
370# define pte_bits 3
371# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
372 /*
373 * The virtual page table has to cover the entire implemented address space within
374 * a region even though not all of this space may be mappable. The reason for
375 * this is that the Access bit and Dirty bit fault handlers perform
376 * non-speculative accesses to the virtual page table, so the address range of the
377 * virtual page table itself needs to be covered by virtual page table.
378 */
379# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
380# define POW2(n) (1ULL << (n))
381
382 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
383
384 if (impl_va_bits < 51 || impl_va_bits > 61)
385 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
6cf07a8c
PC
386 /*
387 * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
388 * which must fit into "vmlpt_bits - pte_bits" slots. Second half of
389 * the test makes sure that our mapped space doesn't overlap the
390 * unimplemented hole in the middle of the region.
391 */
392 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
393 (mapped_space_bits > impl_va_bits - 1))
394 panic("Cannot build a big enough virtual-linear page table"
395 " to cover mapped address space.\n"
396 " Try using a smaller page size.\n");
397
1da177e4
LT
398
399 /* place the VMLPT at the end of each page-table mapped region: */
400 pta = POW2(61) - POW2(vmlpt_bits);
401
1da177e4
LT
402 /*
403 * Set the (virtually mapped linear) page table address. Bit
404 * 8 selects between the short and long format, bits 2-7 the
405 * size of the table, and bit 0 whether the VHPT walker is
406 * enabled.
407 */
408 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
409
410 ia64_tlb_init();
411
412#ifdef CONFIG_HUGETLB_PAGE
413 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
414 ia64_srlz_d();
415#endif
416}
417
418#ifdef CONFIG_VIRTUAL_MEM_MAP
e44e41d0
BP
419int vmemmap_find_next_valid_pfn(int node, int i)
420{
421 unsigned long end_address, hole_next_pfn;
422 unsigned long stop_address;
423 pg_data_t *pgdat = NODE_DATA(node);
424
425 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
426 end_address = PAGE_ALIGN(end_address);
427
428 stop_address = (unsigned long) &vmem_map[
429 pgdat->node_start_pfn + pgdat->node_spanned_pages];
430
431 do {
432 pgd_t *pgd;
433 pud_t *pud;
434 pmd_t *pmd;
435 pte_t *pte;
436
437 pgd = pgd_offset_k(end_address);
438 if (pgd_none(*pgd)) {
439 end_address += PGDIR_SIZE;
440 continue;
441 }
442
443 pud = pud_offset(pgd, end_address);
444 if (pud_none(*pud)) {
445 end_address += PUD_SIZE;
446 continue;
447 }
448
449 pmd = pmd_offset(pud, end_address);
450 if (pmd_none(*pmd)) {
451 end_address += PMD_SIZE;
452 continue;
453 }
454
455 pte = pte_offset_kernel(pmd, end_address);
456retry_pte:
457 if (pte_none(*pte)) {
458 end_address += PAGE_SIZE;
459 pte++;
460 if ((end_address < stop_address) &&
461 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
462 goto retry_pte;
463 continue;
464 }
465 /* Found next valid vmem_map page */
466 break;
467 } while (end_address < stop_address);
468
469 end_address = min(end_address, stop_address);
470 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
471 hole_next_pfn = end_address / sizeof(struct page);
472 return hole_next_pfn - pgdat->node_start_pfn;
473}
1da177e4 474
dae28066 475int __init
1da177e4
LT
476create_mem_map_page_table (u64 start, u64 end, void *arg)
477{
478 unsigned long address, start_page, end_page;
479 struct page *map_start, *map_end;
480 int node;
481 pgd_t *pgd;
482 pud_t *pud;
483 pmd_t *pmd;
484 pte_t *pte;
485
486 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
487 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
488
489 start_page = (unsigned long) map_start & PAGE_MASK;
490 end_page = PAGE_ALIGN((unsigned long) map_end);
491 node = paddr_to_nid(__pa(start));
492
493 for (address = start_page; address < end_page; address += PAGE_SIZE) {
494 pgd = pgd_offset_k(address);
495 if (pgd_none(*pgd))
496 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
497 pud = pud_offset(pgd, address);
498
499 if (pud_none(*pud))
500 pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
501 pmd = pmd_offset(pud, address);
502
503 if (pmd_none(*pmd))
504 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
505 pte = pte_offset_kernel(pmd, address);
506
507 if (pte_none(*pte))
508 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
509 PAGE_KERNEL));
510 }
511 return 0;
512}
513
514struct memmap_init_callback_data {
515 struct page *start;
516 struct page *end;
517 int nid;
518 unsigned long zone;
519};
520
521static int
522virtual_memmap_init (u64 start, u64 end, void *arg)
523{
524 struct memmap_init_callback_data *args;
525 struct page *map_start, *map_end;
526
527 args = (struct memmap_init_callback_data *) arg;
528 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
529 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
530
531 if (map_start < args->start)
532 map_start = args->start;
533 if (map_end > args->end)
534 map_end = args->end;
535
536 /*
537 * We have to initialize "out of bounds" struct page elements that fit completely
538 * on the same pages that were allocated for the "in bounds" elements because they
539 * may be referenced later (and found to be "reserved").
540 */
541 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
542 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
543 / sizeof(struct page));
544
545 if (map_start < map_end)
546 memmap_init_zone((unsigned long)(map_end - map_start),
a2f3aa02
DH
547 args->nid, args->zone, page_to_pfn(map_start),
548 MEMMAP_EARLY);
1da177e4
LT
549 return 0;
550}
551
552void
553memmap_init (unsigned long size, int nid, unsigned long zone,
554 unsigned long start_pfn)
555{
556 if (!vmem_map)
a2f3aa02 557 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
1da177e4
LT
558 else {
559 struct page *start;
560 struct memmap_init_callback_data args;
561
562 start = pfn_to_page(start_pfn);
563 args.start = start;
564 args.end = start + size;
565 args.nid = nid;
566 args.zone = zone;
567
568 efi_memmap_walk(virtual_memmap_init, &args);
569 }
570}
571
572int
573ia64_pfn_valid (unsigned long pfn)
574{
575 char byte;
576 struct page *pg = pfn_to_page(pfn);
577
578 return (__get_user(byte, (char __user *) pg) == 0)
579 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
580 || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
581}
582EXPORT_SYMBOL(ia64_pfn_valid);
583
dae28066 584int __init
1da177e4
LT
585find_largest_hole (u64 start, u64 end, void *arg)
586{
587 u64 *max_gap = arg;
588
589 static u64 last_end = PAGE_OFFSET;
590
591 /* NOTE: this algorithm assumes efi memmap table is ordered */
592
593 if (*max_gap < (start - last_end))
594 *max_gap = start - last_end;
595 last_end = end;
596 return 0;
597}
05e0caad 598
139b8304
BP
599#endif /* CONFIG_VIRTUAL_MEM_MAP */
600
05e0caad 601int __init
8b9c1068 602register_active_ranges(u64 start, u64 end, void *arg)
05e0caad 603{
139b8304
BP
604 int nid = paddr_to_nid(__pa(start));
605
606 if (nid < 0)
607 nid = 0;
608#ifdef CONFIG_KEXEC
609 if (start > crashk_res.start && start < crashk_res.end)
610 start = crashk_res.end;
611 if (end > crashk_res.start && end < crashk_res.end)
612 end = crashk_res.start;
613#endif
614
615 if (start < end)
616 add_active_range(nid, __pa(start) >> PAGE_SHIFT,
617 __pa(end) >> PAGE_SHIFT);
05e0caad
MG
618 return 0;
619}
1da177e4 620
dae28066 621static int __init
1da177e4
LT
622count_reserved_pages (u64 start, u64 end, void *arg)
623{
624 unsigned long num_reserved = 0;
625 unsigned long *count = arg;
626
627 for (; start < end; start += PAGE_SIZE)
628 if (PageReserved(virt_to_page(start)))
629 ++num_reserved;
630 *count += num_reserved;
631 return 0;
632}
633
634/*
635 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
636 * system call handler. When this option is in effect, all fsyscalls will end up bubbling
637 * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
638 * useful for performance testing, but conceivably could also come in handy for debugging
639 * purposes.
640 */
641
03906ea0 642static int nolwsys __initdata;
1da177e4
LT
643
644static int __init
645nolwsys_setup (char *s)
646{
647 nolwsys = 1;
648 return 1;
649}
650
651__setup("nolwsys", nolwsys_setup);
652
dae28066 653void __init
1da177e4
LT
654mem_init (void)
655{
656 long reserved_pages, codesize, datasize, initsize;
1da177e4
LT
657 pg_data_t *pgdat;
658 int i;
659 static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
660
fde740e4
RH
661 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
662 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
663 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
664
1da177e4
LT
665#ifdef CONFIG_PCI
666 /*
667 * This needs to be called _after_ the command line has been parsed but _before_
668 * any drivers that may need the PCI DMA interface are initialized or bootmem has
669 * been freed.
670 */
671 platform_dma_init();
672#endif
673
2d4b1fa2 674#ifdef CONFIG_FLATMEM
1da177e4
LT
675 if (!mem_map)
676 BUG();
677 max_mapnr = max_low_pfn;
678#endif
679
680 high_memory = __va(max_low_pfn * PAGE_SIZE);
681
682 kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);
683 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
684 kclist_add(&kcore_kernel, _stext, _end - _stext);
685
ec936fc5 686 for_each_online_pgdat(pgdat)
564601a5 687 if (pgdat->bdata->node_bootmem_map)
688 totalram_pages += free_all_bootmem_node(pgdat);
1da177e4
LT
689
690 reserved_pages = 0;
691 efi_memmap_walk(count_reserved_pages, &reserved_pages);
692
693 codesize = (unsigned long) _etext - (unsigned long) _stext;
694 datasize = (unsigned long) _edata - (unsigned long) _etext;
695 initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
696
697 printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
698 "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
699 num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
700 reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
701
1da177e4
LT
702
703 /*
704 * For fsyscall entrpoints with no light-weight handler, use the ordinary
705 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
706 * code can tell them apart.
707 */
708 for (i = 0; i < NR_syscalls; ++i) {
709 extern unsigned long fsyscall_table[NR_syscalls];
710 extern unsigned long sys_call_table[NR_syscalls];
711
712 if (!fsyscall_table[i] || nolwsys)
713 fsyscall_table[i] = sys_call_table[i] | 1;
714 }
715 setup_gate();
716
717#ifdef CONFIG_IA32_SUPPORT
718 ia32_mem_init();
719#endif
720}
1681b8e1
YG
721
722#ifdef CONFIG_MEMORY_HOTPLUG
723void online_page(struct page *page)
724{
725 ClearPageReserved(page);
7835e98b 726 init_page_count(page);
1681b8e1
YG
727 __free_page(page);
728 totalram_pages++;
729 num_physpages++;
730}
731
bc02af93 732int arch_add_memory(int nid, u64 start, u64 size)
1681b8e1
YG
733{
734 pg_data_t *pgdat;
735 struct zone *zone;
736 unsigned long start_pfn = start >> PAGE_SHIFT;
737 unsigned long nr_pages = size >> PAGE_SHIFT;
738 int ret;
739
bc02af93 740 pgdat = NODE_DATA(nid);
1681b8e1
YG
741
742 zone = pgdat->node_zones + ZONE_NORMAL;
743 ret = __add_pages(zone, start_pfn, nr_pages);
744
745 if (ret)
746 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
747 __FUNCTION__, ret);
748
749 return ret;
750}
751
752int remove_memory(u64 start, u64 size)
753{
754 return -EINVAL;
755}
9c576ff1 756EXPORT_SYMBOL_GPL(remove_memory);
1681b8e1 757#endif