2 * Virtual Memory Map support
4 * (C) 2007 sgi. Christoph Lameter <clameter@sgi.com>.
6 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
7 * virt_to_page, page_address() to be implemented as a base offset
8 * calculation without memory access.
10 * However, virtual mappings need a page table and TLBs. Many Linux
11 * architectures already map their physical space using 1-1 mappings
12 * via TLBs. For those arches the virtual memmory map is essentially
13 * for free if we use the same page size as the 1-1 mappings. In that
14 * case the overhead consists of a few additional pages that are
15 * allocated to create a view of memory for vmemmap.
17 * Special Kconfig settings:
19 * CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP
21 * The architecture has its own functions to populate the memory
22 * map and provides a vmemmap_populate function.
24 * CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP_PMD
26 * The architecture provides functions to populate the pmd level
27 * of the vmemmap mappings. Allowing mappings using large pages
30 * If neither are set then PAGE_SIZE mappings are generated which
31 * require one PTE/TLB per PAGE_SIZE chunk of the virtual memory map.
34 #include <linux/mmzone.h>
35 #include <linux/bootmem.h>
36 #include <linux/highmem.h>
37 #include <linux/module.h>
38 #include <linux/spinlock.h>
39 #include <linux/vmalloc.h>
41 #include <asm/pgalloc.h>
42 #include <asm/pgtable.h>
45 * Allocate a block of memory to be used to back the virtual memory map
46 * or to back the page tables that are used to create the mapping.
47 * Uses the main allocators if they are available, else bootmem.
49 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
51 /* If the main allocator is up use that, fallback to bootmem. */
52 if (slab_is_available()) {
53 struct page *page = alloc_pages_node(node,
54 GFP_KERNEL | __GFP_ZERO, get_order(size));
56 return page_address(page);
59 return __alloc_bootmem_node(NODE_DATA(node), size, size,
60 __pa(MAX_DMA_ADDRESS));
63 #ifndef CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP
64 void __meminit vmemmap_verify(pte_t *pte, int node,
65 unsigned long start, unsigned long end)
67 unsigned long pfn = pte_pfn(*pte);
68 int actual_node = early_pfn_to_nid(pfn);
70 if (actual_node != node)
71 printk(KERN_WARNING "[%lx-%lx] potential offnode "
72 "page_structs\n", start, end - 1);
75 #ifndef CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP_PMD
76 static int __meminit vmemmap_populate_pte(pmd_t *pmd, unsigned long addr,
77 unsigned long end, int node)
81 for (pte = pte_offset_kernel(pmd, addr); addr < end;
82 pte++, addr += PAGE_SIZE)
85 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
89 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
93 vmemmap_verify(pte, node, addr + PAGE_SIZE, end);
98 int __meminit vmemmap_populate_pmd(pud_t *pud, unsigned long addr,
99 unsigned long end, int node)
105 for (pmd = pmd_offset(pud, addr); addr < end && !error;
106 pmd++, addr = next) {
107 if (pmd_none(*pmd)) {
108 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
112 pmd_populate_kernel(&init_mm, pmd, p);
114 vmemmap_verify((pte_t *)pmd, node,
115 pmd_addr_end(addr, end), end);
116 next = pmd_addr_end(addr, end);
117 error = vmemmap_populate_pte(pmd, addr, next, node);
121 #endif /* CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP_PMD */
123 static int __meminit vmemmap_populate_pud(pgd_t *pgd, unsigned long addr,
124 unsigned long end, int node)
130 for (pud = pud_offset(pgd, addr); addr < end && !error;
131 pud++, addr = next) {
132 if (pud_none(*pud)) {
133 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
137 pud_populate(&init_mm, pud, p);
139 next = pud_addr_end(addr, end);
140 error = vmemmap_populate_pmd(pud, addr, next, node);
145 int __meminit vmemmap_populate(struct page *start_page,
146 unsigned long nr, int node)
149 unsigned long addr = (unsigned long)start_page;
150 unsigned long end = (unsigned long)(start_page + nr);
154 printk(KERN_DEBUG "[%lx-%lx] Virtual memory section"
155 " (%ld pages) node %d\n", addr, end - 1, nr, node);
157 for (pgd = pgd_offset_k(addr); addr < end && !error;
158 pgd++, addr = next) {
159 if (pgd_none(*pgd)) {
160 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
164 pgd_populate(&init_mm, pgd, p);
166 next = pgd_addr_end(addr,end);
167 error = vmemmap_populate_pud(pgd, addr, next, node);
171 #endif /* !CONFIG_ARCH_POPULATES_SPARSEMEM_VMEMMAP */
173 struct page __init *sparse_early_mem_map_populate(unsigned long pnum, int nid)
175 struct page *map = pfn_to_page(pnum * PAGES_PER_SECTION);
176 int error = vmemmap_populate(map, PAGES_PER_SECTION, nid);