2 * Copyright IBM Corp. 2006
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #include <linux/bootmem.h>
9 #include <linux/module.h>
10 #include <linux/list.h>
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <linux/memblock.h>
14 #include <asm/cacheflush.h>
15 #include <asm/pgalloc.h>
16 #include <asm/pgtable.h>
17 #include <asm/setup.h>
18 #include <asm/tlbflush.h>
19 #include <asm/sections.h>
21 static DEFINE_MUTEX(vmem_mutex);
23 struct memory_segment {
24 struct list_head list;
29 static LIST_HEAD(mem_segs);
31 static void __ref *vmem_alloc_pages(unsigned int order)
33 unsigned long size = PAGE_SIZE << order;
35 if (slab_is_available())
36 return (void *)__get_free_pages(GFP_KERNEL, order);
37 return (void *) memblock_alloc(size, size);
40 static inline pud_t *vmem_pud_alloc(void)
44 pud = vmem_alloc_pages(2);
47 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
51 pmd_t *vmem_pmd_alloc(void)
55 pmd = vmem_alloc_pages(2);
58 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
62 pte_t __ref *vmem_pte_alloc(void)
64 unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
67 if (slab_is_available())
68 pte = (pte_t *) page_table_alloc(&init_mm);
70 pte = (pte_t *) memblock_alloc(size, size);
73 clear_table((unsigned long *) pte, _PAGE_INVALID, size);
78 * Add a physical memory range to the 1:1 mapping.
80 static int vmem_add_mem(unsigned long start, unsigned long size)
82 unsigned long pages4k, pages1m, pages2g;
83 unsigned long end = start + size;
84 unsigned long address = start;
91 pages4k = pages1m = pages2g = 0;
92 while (address < end) {
93 pg_dir = pgd_offset_k(address);
94 if (pgd_none(*pg_dir)) {
95 pu_dir = vmem_pud_alloc();
98 pgd_populate(&init_mm, pg_dir, pu_dir);
100 pu_dir = pud_offset(pg_dir, address);
101 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
102 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
103 !debug_pagealloc_enabled()) {
104 pud_val(*pu_dir) = address | pgprot_val(REGION3_KERNEL);
109 if (pud_none(*pu_dir)) {
110 pm_dir = vmem_pmd_alloc();
113 pud_populate(&init_mm, pu_dir, pm_dir);
115 pm_dir = pmd_offset(pu_dir, address);
116 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
117 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
118 !debug_pagealloc_enabled()) {
119 pmd_val(*pm_dir) = address | pgprot_val(SEGMENT_KERNEL);
124 if (pmd_none(*pm_dir)) {
125 pt_dir = vmem_pte_alloc();
128 pmd_populate(&init_mm, pm_dir, pt_dir);
131 pt_dir = pte_offset_kernel(pm_dir, address);
132 pte_val(*pt_dir) = address | pgprot_val(PAGE_KERNEL);
133 address += PAGE_SIZE;
138 update_page_count(PG_DIRECT_MAP_4K, pages4k);
139 update_page_count(PG_DIRECT_MAP_1M, pages1m);
140 update_page_count(PG_DIRECT_MAP_2G, pages2g);
145 * Remove a physical memory range from the 1:1 mapping.
146 * Currently only invalidates page table entries.
148 static void vmem_remove_range(unsigned long start, unsigned long size)
150 unsigned long pages4k, pages1m, pages2g;
151 unsigned long end = start + size;
152 unsigned long address = start;
158 pages4k = pages1m = pages2g = 0;
159 while (address < end) {
160 pg_dir = pgd_offset_k(address);
161 if (pgd_none(*pg_dir)) {
162 address += PGDIR_SIZE;
165 pu_dir = pud_offset(pg_dir, address);
166 if (pud_none(*pu_dir)) {
170 if (pud_large(*pu_dir)) {
176 pm_dir = pmd_offset(pu_dir, address);
177 if (pmd_none(*pm_dir)) {
181 if (pmd_large(*pm_dir)) {
187 pt_dir = pte_offset_kernel(pm_dir, address);
188 pte_clear(&init_mm, address, pt_dir);
189 address += PAGE_SIZE;
192 flush_tlb_kernel_range(start, end);
193 update_page_count(PG_DIRECT_MAP_4K, -pages4k);
194 update_page_count(PG_DIRECT_MAP_1M, -pages1m);
195 update_page_count(PG_DIRECT_MAP_2G, -pages2g);
199 * Add a backed mem_map array to the virtual mem_map array.
201 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
203 unsigned long address = start;
210 for (address = start; address < end;) {
211 pg_dir = pgd_offset_k(address);
212 if (pgd_none(*pg_dir)) {
213 pu_dir = vmem_pud_alloc();
216 pgd_populate(&init_mm, pg_dir, pu_dir);
219 pu_dir = pud_offset(pg_dir, address);
220 if (pud_none(*pu_dir)) {
221 pm_dir = vmem_pmd_alloc();
224 pud_populate(&init_mm, pu_dir, pm_dir);
227 pm_dir = pmd_offset(pu_dir, address);
228 if (pmd_none(*pm_dir)) {
229 /* Use 1MB frames for vmemmap if available. We always
230 * use large frames even if they are only partially
232 * Otherwise we would have also page tables since
233 * vmemmap_populate gets called for each section
235 if (MACHINE_HAS_EDAT1) {
238 new_page = vmemmap_alloc_block(PMD_SIZE, node);
241 pmd_val(*pm_dir) = __pa(new_page) |
242 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
243 address = (address + PMD_SIZE) & PMD_MASK;
246 pt_dir = vmem_pte_alloc();
249 pmd_populate(&init_mm, pm_dir, pt_dir);
250 } else if (pmd_large(*pm_dir)) {
251 address = (address + PMD_SIZE) & PMD_MASK;
255 pt_dir = pte_offset_kernel(pm_dir, address);
256 if (pte_none(*pt_dir)) {
259 new_page = vmemmap_alloc_block(PAGE_SIZE, node);
263 __pa(new_page) | pgprot_val(PAGE_KERNEL);
265 address += PAGE_SIZE;
272 void vmemmap_free(unsigned long start, unsigned long end)
277 * Add memory segment to the segment list if it doesn't overlap with
278 * an already present segment.
280 static int insert_memory_segment(struct memory_segment *seg)
282 struct memory_segment *tmp;
284 if (seg->start + seg->size > VMEM_MAX_PHYS ||
285 seg->start + seg->size < seg->start)
288 list_for_each_entry(tmp, &mem_segs, list) {
289 if (seg->start >= tmp->start + tmp->size)
291 if (seg->start + seg->size <= tmp->start)
295 list_add(&seg->list, &mem_segs);
300 * Remove memory segment from the segment list.
302 static void remove_memory_segment(struct memory_segment *seg)
304 list_del(&seg->list);
307 static void __remove_shared_memory(struct memory_segment *seg)
309 remove_memory_segment(seg);
310 vmem_remove_range(seg->start, seg->size);
313 int vmem_remove_mapping(unsigned long start, unsigned long size)
315 struct memory_segment *seg;
318 mutex_lock(&vmem_mutex);
321 list_for_each_entry(seg, &mem_segs, list) {
322 if (seg->start == start && seg->size == size)
326 if (seg->start != start || seg->size != size)
330 __remove_shared_memory(seg);
333 mutex_unlock(&vmem_mutex);
337 int vmem_add_mapping(unsigned long start, unsigned long size)
339 struct memory_segment *seg;
342 mutex_lock(&vmem_mutex);
344 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
350 ret = insert_memory_segment(seg);
354 ret = vmem_add_mem(start, size);
360 __remove_shared_memory(seg);
364 mutex_unlock(&vmem_mutex);
369 * map whole physical memory to virtual memory (identity mapping)
370 * we reserve enough space in the vmalloc area for vmemmap to hotplug
371 * additional memory segments.
373 void __init vmem_map_init(void)
375 unsigned long size = _eshared - _stext;
376 struct memblock_region *reg;
378 for_each_memblock(memory, reg)
379 vmem_add_mem(reg->base, reg->size);
380 set_memory_ro((unsigned long)_stext, size >> PAGE_SHIFT);
381 pr_info("Write protected kernel read-only data: %luk\n", size >> 10);
385 * Convert memblock.memory to a memory segment list so there is a single
386 * list that contains all memory segments.
388 static int __init vmem_convert_memory_chunk(void)
390 struct memblock_region *reg;
391 struct memory_segment *seg;
393 mutex_lock(&vmem_mutex);
394 for_each_memblock(memory, reg) {
395 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
397 panic("Out of memory...\n");
398 seg->start = reg->base;
399 seg->size = reg->size;
400 insert_memory_segment(seg);
402 mutex_unlock(&vmem_mutex);
406 core_initcall(vmem_convert_memory_chunk);