#include <asm/setup.h>
#include <asm/tlbflush.h>
-unsigned long vmalloc_end;
-EXPORT_SYMBOL(vmalloc_end);
-
-static struct page *vmem_map;
static DEFINE_MUTEX(vmem_mutex);
struct memory_segment {
static LIST_HEAD(mem_segs);
-void memmap_init(unsigned long size, int nid, unsigned long zone,
- unsigned long start_pfn)
+void __meminit memmap_init(unsigned long size, int nid, unsigned long zone,
+ unsigned long start_pfn)
{
struct page *start, *end;
struct page *map_start, *map_end;
}
}
-static inline void *vmem_alloc_pages(unsigned int order)
+static void __ref *vmem_alloc_pages(unsigned int order)
{
if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order);
return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
}
+#define vmem_pud_alloc() ({ BUG(); ((pud_t *) NULL); })
+
static inline pmd_t *vmem_pmd_alloc(void)
{
- pmd_t *pmd;
- int i;
+ pmd_t *pmd = NULL;
- pmd = vmem_alloc_pages(PMD_ALLOC_ORDER);
+#ifdef CONFIG_64BIT
+ pmd = vmem_alloc_pages(2);
if (!pmd)
return NULL;
- for (i = 0; i < PTRS_PER_PMD; i++)
- pmd_clear_kernel(pmd + i);
+ clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE*4);
+#endif
return pmd;
}
-static inline pte_t *vmem_pte_alloc(void)
+static pte_t __init_refok *vmem_pte_alloc(void)
{
pte_t *pte;
- pte_t empty_pte;
- int i;
- pte = vmem_alloc_pages(PTE_ALLOC_ORDER);
+ if (slab_is_available())
+ pte = (pte_t *) page_table_alloc(&init_mm);
+ else
+ pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
if (!pte)
return NULL;
- pte_val(empty_pte) = _PAGE_TYPE_EMPTY;
- for (i = 0; i < PTRS_PER_PTE; i++)
- pte[i] = empty_pte;
+ clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
+ PTRS_PER_PTE * sizeof(pte_t));
return pte;
}
{
unsigned long address;
pgd_t *pg_dir;
+ pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
pte_t pte;
for (address = start; address < start + size; address += PAGE_SIZE) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
+ pu_dir = vmem_pud_alloc();
+ if (!pu_dir)
+ goto out;
+ pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
+ }
+
+ pu_dir = pud_offset(pg_dir, address);
+ if (pud_none(*pu_dir)) {
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
goto out;
- pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
+ pud_populate_kernel(&init_mm, pu_dir, pm_dir);
}
- pm_dir = pmd_offset(pg_dir, address);
+ pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
pt_dir = vmem_pte_alloc();
if (!pt_dir)
{
unsigned long address;
pgd_t *pg_dir;
+ pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
pte_t pte;
pte_val(pte) = _PAGE_TYPE_EMPTY;
for (address = start; address < start + size; address += PAGE_SIZE) {
pg_dir = pgd_offset_k(address);
- if (pgd_none(*pg_dir))
+ pu_dir = pud_offset(pg_dir, address);
+ if (pud_none(*pu_dir))
continue;
- pm_dir = pmd_offset(pg_dir, address);
+ pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir))
continue;
pt_dir = pte_offset_kernel(pm_dir, address);
unsigned long address, start_addr, end_addr;
struct page *map_start, *map_end;
pgd_t *pg_dir;
+ pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
pte_t pte;
int ret = -ENOMEM;
- map_start = vmem_map + PFN_DOWN(start);
- map_end = vmem_map + PFN_DOWN(start + size);
+ map_start = VMEM_MAP + PFN_DOWN(start);
+ map_end = VMEM_MAP + PFN_DOWN(start + size);
start_addr = (unsigned long) map_start & PAGE_MASK;
end_addr = PFN_ALIGN((unsigned long) map_end);
for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
+ pu_dir = vmem_pud_alloc();
+ if (!pu_dir)
+ goto out;
+ pgd_populate_kernel(&init_mm, pg_dir, pu_dir);
+ }
+
+ pu_dir = pud_offset(pg_dir, address);
+ if (pud_none(*pu_dir)) {
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
goto out;
- pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
+ pud_populate_kernel(&init_mm, pu_dir, pm_dir);
}
- pm_dir = pmd_offset(pg_dir, address);
+ pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
pt_dir = vmem_pte_alloc();
if (!pt_dir)
{
int ret;
- ret = vmem_add_range(start, size);
+ ret = vmem_add_mem_map(start, size);
if (ret)
return ret;
- return vmem_add_mem_map(start, size);
+ return vmem_add_range(start, size);
}
/*
{
struct memory_segment *tmp;
- if (PFN_DOWN(seg->start + seg->size) > max_pfn ||
+ if (seg->start + seg->size >= VMEM_MAX_PHYS ||
seg->start + seg->size < seg->start)
return -ERANGE;
/*
* map whole physical memory to virtual memory (identity mapping)
+ * we reserve enough space in the vmalloc area for vmemmap to hotplug
+ * additional memory segments.
*/
void __init vmem_map_init(void)
{
- unsigned long map_size;
int i;
- map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page);
- vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size);
- vmem_map = (struct page *) vmalloc_end;
- NODE_DATA(0)->node_mem_map = vmem_map;
-
+ INIT_LIST_HEAD(&init_mm.context.crst_list);
+ INIT_LIST_HEAD(&init_mm.context.pgtable_list);
+ init_mm.context.noexec = 0;
+ NODE_DATA(0)->node_mem_map = VMEM_MAP;
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
}
int i;
mutex_lock(&vmem_mutex);
- for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
+ for (i = 0; i < MEMORY_CHUNKS; i++) {
if (!memory_chunk[i].size)
continue;
seg = kzalloc(sizeof(*seg), GFP_KERNEL);