Merge branches 'amba', 'fixes', 'misc', 'mmci', 'unstable/omap-dma' and 'unstable...
[linux-2.6-block.git] / arch / arm / mm / mmu.c
index 580ef2de82d728f8ecfde5f5f3b208a2e5525b06..b68c6b22e1c80f263d46555064b1c1c13f48586c 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/cputype.h>
 #include <asm/sections.h>
 #include <asm/cachetype.h>
+#include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/smp_plat.h>
 #include <asm/tlb.h>
@@ -231,12 +232,16 @@ __setup("noalign", noalign_setup);
 #endif /* ifdef CONFIG_CPU_CP15 / else */
 
 #define PROT_PTE_DEVICE                L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
+#define PROT_PTE_S2_DEVICE     PROT_PTE_DEVICE
 #define PROT_SECT_DEVICE       PMD_TYPE_SECT|PMD_SECT_AP_WRITE
 
 static struct mem_type mem_types[] = {
        [MT_DEVICE] = {           /* Strongly ordered / ARMv6 shared device */
                .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
                                  L_PTE_SHARED,
+               .prot_pte_s2    = s2_policy(PROT_PTE_S2_DEVICE) |
+                                 s2_policy(L_PTE_S2_MT_DEV_SHARED) |
+                                 L_PTE_SHARED,
                .prot_l1        = PMD_TYPE_TABLE,
                .prot_sect      = PROT_SECT_DEVICE | PMD_SECT_S,
                .domain         = DOMAIN_IO,
@@ -287,36 +292,43 @@ static struct mem_type mem_types[] = {
                .prot_l1   = PMD_TYPE_TABLE,
                .domain    = DOMAIN_USER,
        },
-       [MT_MEMORY] = {
+       [MT_MEMORY_RWX] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
                .prot_l1   = PMD_TYPE_TABLE,
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_KERNEL,
        },
+       [MT_MEMORY_RW] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+                            L_PTE_XN,
+               .prot_l1   = PMD_TYPE_TABLE,
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+               .domain    = DOMAIN_KERNEL,
+       },
        [MT_ROM] = {
                .prot_sect = PMD_TYPE_SECT,
                .domain    = DOMAIN_KERNEL,
        },
-       [MT_MEMORY_NONCACHED] = {
+       [MT_MEMORY_RWX_NONCACHED] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
                                L_PTE_MT_BUFFERABLE,
                .prot_l1   = PMD_TYPE_TABLE,
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_KERNEL,
        },
-       [MT_MEMORY_DTCM] = {
+       [MT_MEMORY_RW_DTCM] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
                                L_PTE_XN,
                .prot_l1   = PMD_TYPE_TABLE,
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
                .domain    = DOMAIN_KERNEL,
        },
-       [MT_MEMORY_ITCM] = {
+       [MT_MEMORY_RWX_ITCM] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
                .prot_l1   = PMD_TYPE_TABLE,
                .domain    = DOMAIN_KERNEL,
        },
-       [MT_MEMORY_SO] = {
+       [MT_MEMORY_RW_SO] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
                                L_PTE_MT_UNCACHED | L_PTE_XN,
                .prot_l1   = PMD_TYPE_TABLE,
@@ -325,7 +337,8 @@ static struct mem_type mem_types[] = {
                .domain    = DOMAIN_KERNEL,
        },
        [MT_MEMORY_DMA_READY] = {
-               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+                               L_PTE_XN,
                .prot_l1   = PMD_TYPE_TABLE,
                .domain    = DOMAIN_KERNEL,
        },
@@ -337,6 +350,44 @@ const struct mem_type *get_mem_type(unsigned int type)
 }
 EXPORT_SYMBOL(get_mem_type);
 
+#define PTE_SET_FN(_name, pteop) \
+static int pte_set_##_name(pte_t *ptep, pgtable_t token, unsigned long addr, \
+                       void *data) \
+{ \
+       pte_t pte = pteop(*ptep); \
+\
+       set_pte_ext(ptep, pte, 0); \
+       return 0; \
+} \
+
+#define SET_MEMORY_FN(_name, callback) \
+int set_memory_##_name(unsigned long addr, int numpages) \
+{ \
+       unsigned long start = addr; \
+       unsigned long size = PAGE_SIZE*numpages; \
+       unsigned end = start + size; \
+\
+       if (start < MODULES_VADDR || start >= MODULES_END) \
+               return -EINVAL;\
+\
+       if (end < MODULES_VADDR || end >= MODULES_END) \
+               return -EINVAL; \
+\
+       apply_to_page_range(&init_mm, start, size, callback, NULL); \
+       flush_tlb_kernel_range(start, end); \
+       return 0;\
+}
+
+PTE_SET_FN(ro, pte_wrprotect)
+PTE_SET_FN(rw, pte_mkwrite)
+PTE_SET_FN(x, pte_mkexec)
+PTE_SET_FN(nx, pte_mknexec)
+
+SET_MEMORY_FN(ro, pte_set_ro)
+SET_MEMORY_FN(rw, pte_set_rw)
+SET_MEMORY_FN(x, pte_set_x)
+SET_MEMORY_FN(nx, pte_set_nx)
+
 /*
  * Adjust the PMD section entries according to the CPU in use.
  */
@@ -410,6 +461,9 @@ static void __init build_mem_type_table(void)
                        mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
                        mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
                        mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
+
+                       /* Also setup NX memory mapping */
+                       mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
                }
                if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
                        /*
@@ -458,7 +512,18 @@ static void __init build_mem_type_table(void)
        cp = &cache_policies[cachepolicy];
        vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
        s2_pgprot = cp->pte_s2;
-       hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
+       hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
+       s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
+
+       /*
+        * We don't use domains on ARMv6 (since this causes problems with
+        * v6/v7 kernels), so we must use a separate memory type for user
+        * r/o, kernel r/w to map the vectors page.
+        */
+#ifndef CONFIG_ARM_LPAE
+       if (cpu_arch == CPU_ARCH_ARMv6)
+               vecs_pgprot |= L_PTE_MT_VECTORS;
+#endif
 
        /*
         * ARMv6 and above have extended page tables.
@@ -487,11 +552,13 @@ static void __init build_mem_type_table(void)
                        mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
                        mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
                        mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
-                       mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
-                       mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+                       mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
+                       mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
+                       mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
+                       mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
                        mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
-                       mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
-                       mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+                       mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
+                       mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
                }
        }
 
@@ -502,15 +569,15 @@ static void __init build_mem_type_table(void)
        if (cpu_arch >= CPU_ARCH_ARMv6) {
                if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
                        /* Non-cacheable Normal is XCB = 001 */
-                       mem_types[MT_MEMORY_NONCACHED].prot_sect |=
+                       mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
                                PMD_SECT_BUFFERED;
                } else {
                        /* For both ARMv6 and non-TEX-remapping ARMv7 */
-                       mem_types[MT_MEMORY_NONCACHED].prot_sect |=
+                       mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
                                PMD_SECT_TEX(1);
                }
        } else {
-               mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
+               mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
        }
 
 #ifdef CONFIG_ARM_LPAE
@@ -543,10 +610,12 @@ static void __init build_mem_type_table(void)
 
        mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
        mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
-       mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
-       mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+       mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
+       mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
+       mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
+       mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
        mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
-       mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
+       mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
        mem_types[MT_ROM].prot_sect |= cp->pmd;
 
        switch (cp->pmd) {
@@ -1296,6 +1365,8 @@ static void __init kmap_init(void)
 static void __init map_lowmem(void)
 {
        struct memblock_region *reg;
+       unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
+       unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
 
        /* Map all the lowmem memory banks. */
        for_each_memblock(memory, reg) {
@@ -1308,12 +1379,40 @@ static void __init map_lowmem(void)
                if (start >= end)
                        break;
 
-               map.pfn = __phys_to_pfn(start);
-               map.virtual = __phys_to_virt(start);
-               map.length = end - start;
-               map.type = MT_MEMORY;
+               if (end < kernel_x_start || start >= kernel_x_end) {
+                       map.pfn = __phys_to_pfn(start);
+                       map.virtual = __phys_to_virt(start);
+                       map.length = end - start;
+                       map.type = MT_MEMORY_RWX;
 
-               create_mapping(&map);
+                       create_mapping(&map);
+               } else {
+                       /* This better cover the entire kernel */
+                       if (start < kernel_x_start) {
+                               map.pfn = __phys_to_pfn(start);
+                               map.virtual = __phys_to_virt(start);
+                               map.length = kernel_x_start - start;
+                               map.type = MT_MEMORY_RW;
+
+                               create_mapping(&map);
+                       }
+
+                       map.pfn = __phys_to_pfn(kernel_x_start);
+                       map.virtual = __phys_to_virt(kernel_x_start);
+                       map.length = kernel_x_end - kernel_x_start;
+                       map.type = MT_MEMORY_RWX;
+
+                       create_mapping(&map);
+
+                       if (kernel_x_end < end) {
+                               map.pfn = __phys_to_pfn(kernel_x_end);
+                               map.virtual = __phys_to_virt(kernel_x_end);
+                               map.length = end - kernel_x_end;
+                               map.type = MT_MEMORY_RW;
+
+                               create_mapping(&map);
+                       }
+               }
        }
 }