2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
10 * They are semantically the same although in different contexts
11 * VALID marks a TLB entry exists and it will only happen if PRESENT
12 * - Utilise some unused free bits to confine PTE flags to 12 bits
13 * This is a must for 4k pg-sz
15 * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
16 * -TLB Locking never really existed, except for initial specs
17 * -SILENT_xxx not needed for our port
18 * -Per my request, MMU V3 changes the layout of some of the bits
19 * to avoid a few shifts in TLB Miss handlers.
22 * -PGD entry no longer contains any flags. If empty it is 0, otherwise has
23 * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
26 * -Switched form 8:11:13 split for page table lookup to 11:8:13
27 * -this speeds up page table allocation itself as we now have to memset 1K
28 * instead of 8k per page table.
29 * -TODO: Right now page table alloc is 8K and rest 7K is unused
32 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
35 #ifndef _ASM_ARC_PGTABLE_H
36 #define _ASM_ARC_PGTABLE_H
40 #include <asm-generic/pgtable-nopmd.h>
41 #include <linux/const.h>
43 /**************************************************************************
46 * ARC700 MMU only deals with softare managed TLB entries.
47 * Page Tables are purely for Linux VM's consumption and the bits below are
48 * suited to that (uniqueness). Hence some are not implemented in the TLB and
49 * some have different value in TLB.
50 * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
51 * seperate PD0 and PD1, which combined forms a translation entry)
52 * while for PTE perspective, they are 8 and 9 respectively
53 * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
54 * (saves some bit shift ops in TLB Miss hdlrs)
57 #if (CONFIG_ARC_MMU_VER <= 2)
59 #define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
60 #define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
61 #define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
62 #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
63 #define _PAGE_READ (1<<5) /* Page has user read perm (H) */
64 #define _PAGE_DIRTY (1<<6) /* Page modified (dirty) (S) */
65 #define _PAGE_SPECIAL (1<<7)
66 #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
67 #define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */
69 #else /* MMU v3 onwards */
71 #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
72 #define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
73 #define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
74 #define _PAGE_READ (1<<3) /* Page has user read perm (H) */
75 #define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */
76 #define _PAGE_DIRTY (1<<5) /* Page modified (dirty) (S) */
77 #define _PAGE_SPECIAL (1<<6)
79 #if (CONFIG_ARC_MMU_VER >= 4)
80 #define _PAGE_WTHRU (1<<7) /* Page cache mode write-thru (H) */
83 #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
84 #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
86 #if (CONFIG_ARC_MMU_VER >= 4)
87 #define _PAGE_HW_SZ (1<<10) /* Page Size indicator (H): 0 normal, 1 super */
90 #define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr
91 usable for shared TLB entries (H) */
93 #define _PAGE_UNUSED_BIT (1<<12)
96 /* vmalloc permissions */
97 #define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
98 _PAGE_GLOBAL | _PAGE_PRESENT)
100 #ifndef CONFIG_ARC_CACHE_PAGES
101 #undef _PAGE_CACHEABLE
102 #define _PAGE_CACHEABLE 0
106 #define _PAGE_HW_SZ 0
109 /* Defaults for every user page */
110 #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
112 /* Set of bits not changed in pte_modify */
113 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
115 /* More Abbrevaited helpers */
116 #define PAGE_U_NONE __pgprot(___DEF)
117 #define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
118 #define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
119 #define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
120 #define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
123 #define PAGE_SHARED PAGE_U_W_R
125 /* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of
126 * user vaddr space - visible in all addr spaces, but kernel mode only
127 * Thus Global, all-kernel-access, no-user-access, cached
129 #define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_CACHEABLE)
132 #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
134 /* Masks for actual TLB "PD"s */
135 #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
136 #define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
137 #define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
139 /**************************************************************************
140 * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
142 * Certain cases have 1:1 mapping
143 * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
144 * which directly corresponds to PAGE_U_X_R
146 * Other rules which cause the divergence from 1:1 mapping
148 * 1. Although ARC700 can do exclusive execute/write protection (meaning R
149 * can be tracked independet of X/W unlike some other CPUs), still to
150 * keep things consistent with other archs:
151 * -Write implies Read: W => R
152 * -Execute implies Read: X => R
154 * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
155 * This is to enable COW mechanism
158 #define __P000 PAGE_U_NONE
159 #define __P001 PAGE_U_R
160 #define __P010 PAGE_U_R /* Pvt-W => !W */
161 #define __P011 PAGE_U_R /* Pvt-W => !W */
162 #define __P100 PAGE_U_X_R /* X => R */
163 #define __P101 PAGE_U_X_R
164 #define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
165 #define __P111 PAGE_U_X_R /* Pvt-W => !W */
167 #define __S000 PAGE_U_NONE
168 #define __S001 PAGE_U_R
169 #define __S010 PAGE_U_W_R /* W => R */
170 #define __S011 PAGE_U_W_R
171 #define __S100 PAGE_U_X_R /* X => R */
172 #define __S101 PAGE_U_X_R
173 #define __S110 PAGE_U_X_W_R /* X => R */
174 #define __S111 PAGE_U_X_W_R
176 /****************************************************************
177 * Page Table Lookup split
179 * We implement 2 tier paging and since this is all software, we are free
180 * to customize the span of a PGD / PTE entry to suit us
182 * 32 bit virtual address
183 * -------------------------------------------------------
184 * | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE |
185 * -------------------------------------------------------
187 * | | --> off in page frame
189 * | ---> index into Page Table
191 * ----> index into Page Directory
194 #define BITS_IN_PAGE PAGE_SHIFT
196 /* Optimal Sizing of Pg Tbl - based on MMU page size */
197 #if defined(CONFIG_ARC_PAGE_SIZE_8K)
198 #define BITS_FOR_PTE 8 /* 11:8:13 */
199 #elif defined(CONFIG_ARC_PAGE_SIZE_16K)
200 #define BITS_FOR_PTE 8 /* 10:8:14 */
201 #elif defined(CONFIG_ARC_PAGE_SIZE_4K)
202 #define BITS_FOR_PTE 9 /* 11:9:12 */
205 #define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE)
207 #define PGDIR_SHIFT (32 - BITS_FOR_PGD)
208 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
209 #define PGDIR_MASK (~(PGDIR_SIZE-1))
211 #define PTRS_PER_PTE _BITUL(BITS_FOR_PTE)
212 #define PTRS_PER_PGD _BITUL(BITS_FOR_PGD)
215 * Number of entries a user land program use.
216 * TASK_SIZE is the maximum vaddr that can be used by a userland program.
218 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
221 * No special requirements for lowest virtual address we permit any user space
222 * mapping to be mapped at.
224 #define FIRST_USER_ADDRESS 0UL
227 /****************************************************************
228 * Bucket load of VM Helpers
233 #define pte_ERROR(e) \
234 pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
235 #define pgd_ERROR(e) \
236 pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
238 /* the zero page used for uninitialized and anonymous pages */
239 extern char empty_zero_page[PAGE_SIZE];
240 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
242 #define pte_unmap(pte) do { } while (0)
243 #define pte_unmap_nested(pte) do { } while (0)
245 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
246 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
248 /* find the page descriptor of the Page Tbl ref by PMD entry */
249 #define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK)
251 /* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
252 #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
254 /* In a 2 level sys, setup the PGD entry with PTE value */
255 static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
257 pmd_val(*pmdp) = (unsigned long)ptep;
260 #define pte_none(x) (!pte_val(x))
261 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
262 #define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0))
264 #define pmd_none(x) (!pmd_val(x))
265 #define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK))
266 #define pmd_present(x) (pmd_val(x))
267 #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
269 #define pte_page(x) (mem_map + \
270 (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
273 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
274 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
275 #define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
276 #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
279 * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
280 * and returns ptr to PTE entry corresponding to @addr
282 #define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
285 /* No mapping of Page Tables in high mem etc, so following same as above */
286 #define pte_offset_kernel(dir, addr) pte_offset(dir, addr)
287 #define pte_offset_map(dir, addr) pte_offset(dir, addr)
289 /* Zoo of pte_xxx function */
290 #define pte_read(pte) (pte_val(pte) & _PAGE_READ)
291 #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
292 #define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY)
293 #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
294 #define pte_special(pte) (pte_val(pte) & _PAGE_SPECIAL)
296 #define PTE_BIT_FUNC(fn, op) \
297 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
299 PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT));
300 PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
301 PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
302 PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY));
303 PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY));
304 PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
305 PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
306 PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
307 PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
308 PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL));
309 PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ));
311 #define __HAVE_ARCH_PTE_SPECIAL
313 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
315 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
318 /* Macro to mark a page protection as uncacheable */
319 #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
321 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
322 pte_t *ptep, pte_t pteval)
324 set_pte(ptep, pteval);
328 * All kernel related VM pages are in init's mm.
330 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
331 #define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
332 #define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr))
335 * Macro to quickly access the PGD entry, utlising the fact that some
336 * arch may cache the pointer to Page Directory of "current" task
339 * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
340 * becomes read a register
342 * ********CAUTION*******:
343 * Kernel code might be dealing with some mm_struct of NON "current"
344 * Thus use this macro only when you are certain that "current" is current
345 * e.g. when dealing with signal frame setup code etc
348 #define pgd_offset_fast(mm, addr) \
350 pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \
351 pgd_base + pgd_index(addr); \
354 #define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
357 extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
358 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
361 /* Encode swap {type,off} tuple into PTE
362 * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
363 * PAGE_PRESENT is zero in a PTE holding swap "identifier"
365 #define __swp_entry(type, off) ((swp_entry_t) { \
366 ((type) & 0x1f) | ((off) << 13) })
368 /* Decode a PTE containing swap "identifier "into constituents */
369 #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
370 #define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13)
372 /* NOPs, to keep generic kernel happy */
373 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
374 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
376 #define kern_addr_valid(addr) (1)
379 * remap a physical page `pfn' of size `size' with page protection `prot'
380 * into virtual address `from'
382 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
383 #include <asm/hugepage.h>
386 #include <asm-generic/pgtable.h>
388 /* to cope with aliasing VIPT cache */
389 #define HAVE_ARCH_UNMAPPED_AREA
392 * No page table caches to initialise
394 #define pgtable_cache_init() do { } while (0)
396 #endif /* __ASSEMBLY__ */