ARCv2: STAR 9000814690: Really Re-enable interrupts to avoid deadlocks
[linux-2.6-block.git] / arch / arc / include / asm / pgtable.h
CommitLineData
5dda4dc5
VG
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: May 2011
9 * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
10 * They are semantically the same although in different contexts
11 * VALID marks a TLB entry exists and it will only happen if PRESENT
12 * - Utilise some unused free bits to confine PTE flags to 12 bits
13 * This is a must for 4k pg-sz
14 *
15 * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
16 * -TLB Locking never really existed, except for initial specs
17 * -SILENT_xxx not needed for our port
18 * -Per my request, MMU V3 changes the layout of some of the bits
19 * to avoid a few shifts in TLB Miss handlers.
20 *
21 * vineetg: April 2010
22 * -PGD entry no longer contains any flags. If empty it is 0, otherwise has
23 * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
24 *
25 * vineetg: April 2010
26 * -Switched form 8:11:13 split for page table lookup to 11:8:13
27 * -this speeds up page table allocation itself as we now have to memset 1K
28 * instead of 8k per page table.
29 * -TODO: Right now page table alloc is 8K and rest 7K is unused
30 * need to optimise it
31 *
32 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
33 */
34
35#ifndef _ASM_ARC_PGTABLE_H
36#define _ASM_ARC_PGTABLE_H
37
38#include <asm/page.h>
39#include <asm/mmu.h>
40#include <asm-generic/pgtable-nopmd.h>
41
42/**************************************************************************
43 * Page Table Flags
44 *
45 * ARC700 MMU only deals with softare managed TLB entries.
46 * Page Tables are purely for Linux VM's consumption and the bits below are
47 * suited to that (uniqueness). Hence some are not implemented in the TLB and
48 * some have different value in TLB.
49 * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
50 * seperate PD0 and PD1, which combined forms a translation entry)
51 * while for PTE perspective, they are 8 and 9 respectively
52 * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
53 * (saves some bit shift ops in TLB Miss hdlrs)
54 */
55
56#if (CONFIG_ARC_MMU_VER <= 2)
57
58#define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
59#define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
64b703ef
VG
60#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
61#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
62#define _PAGE_READ (1<<5) /* Page has user read perm (H) */
d091fcb9 63#define _PAGE_MODIFIED (1<<6) /* Page modified (dirty) (S) */
d091fcb9
VG
64#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
65#define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */
5dda4dc5 66
64b703ef 67#else /* MMU v3 onwards */
5dda4dc5 68
5dda4dc5 69#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
64b703ef
VG
70#define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
71#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
72#define _PAGE_READ (1<<3) /* Page has user read perm (H) */
d091fcb9
VG
73#define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */
74#define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */
5dda4dc5
VG
75#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
76#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
d091fcb9 77#define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr
5dda4dc5 78 usable for shared TLB entries (H) */
5dda4dc5
VG
79#endif
80
64b703ef
VG
81/* vmalloc permissions */
82#define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
a950549c 83 _PAGE_GLOBAL | _PAGE_PRESENT)
5dda4dc5
VG
84
85#ifdef CONFIG_ARC_CACHE_PAGES
86#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
87#else
88#define _PAGE_DEF_CACHEABLE (0)
89#endif
90
91/* Helper for every "user" page
92 * -kernel can R/W/X
93 * -by default cached, unless config otherwise
94 * -present in memory
95 */
a950549c
VG
96#define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE)
97
5dda4dc5
VG
98/* Set of bits not changed in pte_modify */
99#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
100
101/* More Abbrevaited helpers */
102#define PAGE_U_NONE __pgprot(___DEF)
103#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
104#define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
105#define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
106#define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
107 _PAGE_EXECUTE)
108
109#define PAGE_SHARED PAGE_U_W_R
110
64b703ef
VG
111/* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of
112 * user vaddr space - visible in all addr spaces, but kernel mode only
5dda4dc5
VG
113 * Thus Global, all-kernel-access, no-user-access, cached
114 */
a950549c 115#define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
5dda4dc5
VG
116
117/* ioremap */
a950549c 118#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
5dda4dc5 119
da1677b0 120/* Masks for actual TLB "PD"s */
64b703ef
VG
121#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
122#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
123#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
da1677b0 124
5dda4dc5
VG
125/**************************************************************************
126 * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
127 *
128 * Certain cases have 1:1 mapping
129 * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
130 * which directly corresponds to PAGE_U_X_R
131 *
132 * Other rules which cause the divergence from 1:1 mapping
133 *
134 * 1. Although ARC700 can do exclusive execute/write protection (meaning R
135 * can be tracked independet of X/W unlike some other CPUs), still to
136 * keep things consistent with other archs:
137 * -Write implies Read: W => R
138 * -Execute implies Read: X => R
139 *
140 * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
141 * This is to enable COW mechanism
142 */
143 /* xwr */
144#define __P000 PAGE_U_NONE
145#define __P001 PAGE_U_R
146#define __P010 PAGE_U_R /* Pvt-W => !W */
147#define __P011 PAGE_U_R /* Pvt-W => !W */
148#define __P100 PAGE_U_X_R /* X => R */
149#define __P101 PAGE_U_X_R
150#define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
151#define __P111 PAGE_U_X_R /* Pvt-W => !W */
152
153#define __S000 PAGE_U_NONE
154#define __S001 PAGE_U_R
155#define __S010 PAGE_U_W_R /* W => R */
156#define __S011 PAGE_U_W_R
157#define __S100 PAGE_U_X_R /* X => R */
158#define __S101 PAGE_U_X_R
159#define __S110 PAGE_U_X_W_R /* X => R */
160#define __S111 PAGE_U_X_W_R
161
162/****************************************************************
163 * Page Table Lookup split
164 *
165 * We implement 2 tier paging and since this is all software, we are free
166 * to customize the span of a PGD / PTE entry to suit us
167 *
168 * 32 bit virtual address
169 * -------------------------------------------------------
170 * | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE |
171 * -------------------------------------------------------
172 * | | |
173 * | | --> off in page frame
174 * | |
175 * | ---> index into Page Table
176 * |
177 * ----> index into Page Directory
178 */
179
180#define BITS_IN_PAGE PAGE_SHIFT
181
182/* Optimal Sizing of Pg Tbl - based on MMU page size */
183#if defined(CONFIG_ARC_PAGE_SIZE_8K)
184#define BITS_FOR_PTE 8
185#elif defined(CONFIG_ARC_PAGE_SIZE_16K)
186#define BITS_FOR_PTE 8
187#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
188#define BITS_FOR_PTE 9
189#endif
190
191#define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE)
192
193#define PGDIR_SHIFT (BITS_FOR_PTE + BITS_IN_PAGE)
194#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
195#define PGDIR_MASK (~(PGDIR_SIZE-1))
196
197#ifdef __ASSEMBLY__
198#define PTRS_PER_PTE (1 << BITS_FOR_PTE)
199#define PTRS_PER_PGD (1 << BITS_FOR_PGD)
200#else
201#define PTRS_PER_PTE (1UL << BITS_FOR_PTE)
202#define PTRS_PER_PGD (1UL << BITS_FOR_PGD)
203#endif
204/*
205 * Number of entries a user land program use.
206 * TASK_SIZE is the maximum vaddr that can be used by a userland program.
207 */
208#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
209
210/*
211 * No special requirements for lowest virtual address we permit any user space
212 * mapping to be mapped at.
213 */
d016bf7e 214#define FIRST_USER_ADDRESS 0UL
5dda4dc5
VG
215
216
217/****************************************************************
218 * Bucket load of VM Helpers
219 */
220
221#ifndef __ASSEMBLY__
222
223#define pte_ERROR(e) \
224 pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
225#define pgd_ERROR(e) \
226 pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
227
228/* the zero page used for uninitialized and anonymous pages */
229extern char empty_zero_page[PAGE_SIZE];
230#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
231
232#define pte_unmap(pte) do { } while (0)
233#define pte_unmap_nested(pte) do { } while (0)
234
235#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
236#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
237
238/* find the page descriptor of the Page Tbl ref by PMD entry */
239#define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK)
240
241/* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
242#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
243
244/* In a 2 level sys, setup the PGD entry with PTE value */
245static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
246{
247 pmd_val(*pmdp) = (unsigned long)ptep;
248}
249
250#define pte_none(x) (!pte_val(x))
251#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
252#define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0))
253
254#define pmd_none(x) (!pmd_val(x))
255#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK))
256#define pmd_present(x) (pmd_val(x))
257#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
258
259#define pte_page(x) (mem_map + \
06f34e1c
AB
260 (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
261 PAGE_SHIFT)))
5dda4dc5
VG
262
263#define mk_pte(page, pgprot) \
264({ \
265 pte_t pte; \
266 pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot); \
267 pte; \
268})
269
5dda4dc5
VG
270#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
271#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
272#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
273
274/*
275 * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
276 * and returns ptr to PTE entry corresponding to @addr
277 */
278#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
279 __pte_index(addr))
280
281/* No mapping of Page Tables in high mem etc, so following same as above */
282#define pte_offset_kernel(dir, addr) pte_offset(dir, addr)
283#define pte_offset_map(dir, addr) pte_offset(dir, addr)
284
285/* Zoo of pte_xxx function */
286#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
287#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
288#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
289#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
290#define pte_special(pte) (0)
291
292#define PTE_BIT_FUNC(fn, op) \
293 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
294
295PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
296PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
297PTE_BIT_FUNC(mkclean, &= ~(_PAGE_MODIFIED));
298PTE_BIT_FUNC(mkdirty, |= (_PAGE_MODIFIED));
299PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
300PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
301PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
302PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
303
304static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
305
306static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
307{
308 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
309}
310
311/* Macro to mark a page protection as uncacheable */
312#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
313
314static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
315 pte_t *ptep, pte_t pteval)
316{
317 set_pte(ptep, pteval);
318}
319
320/*
321 * All kernel related VM pages are in init's mm.
322 */
323#define pgd_offset_k(address) pgd_offset(&init_mm, address)
324#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
325#define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr))
326
327/*
328 * Macro to quickly access the PGD entry, utlising the fact that some
329 * arch may cache the pointer to Page Directory of "current" task
330 * in a MMU register
331 *
332 * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
333 * becomes read a register
334 *
335 * ********CAUTION*******:
336 * Kernel code might be dealing with some mm_struct of NON "current"
337 * Thus use this macro only when you are certain that "current" is current
338 * e.g. when dealing with signal frame setup code etc
339 */
41195d23 340#ifndef CONFIG_SMP
5dda4dc5
VG
341#define pgd_offset_fast(mm, addr) \
342({ \
343 pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \
344 pgd_base + pgd_index(addr); \
345})
41195d23
VG
346#else
347#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
348#endif
5dda4dc5
VG
349
350extern void paging_init(void);
351extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
352void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
353 pte_t *ptep);
354
355/* Encode swap {type,off} tuple into PTE
356 * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
18747151 357 * PAGE_PRESENT is zero in a PTE holding swap "identifier"
5dda4dc5
VG
358 */
359#define __swp_entry(type, off) ((swp_entry_t) { \
360 ((type) & 0x1f) | ((off) << 13) })
361
362/* Decode a PTE containing swap "identifier "into constituents */
363#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
364#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13)
365
366/* NOPs, to keep generic kernel happy */
367#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
368#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
369
370#define kern_addr_valid(addr) (1)
371
372/*
373 * remap a physical page `pfn' of size `size' with page protection `prot'
374 * into virtual address `from'
375 */
5dda4dc5
VG
376#include <asm-generic/pgtable.h>
377
5bba49f5
VG
378/* to cope with aliasing VIPT cache */
379#define HAVE_ARCH_UNMAPPED_AREA
380
5dda4dc5
VG
381/*
382 * No page table caches to initialise
383 */
384#define pgtable_cache_init() do { } while (0)
385
386#endif /* __ASSEMBLY__ */
387
388#endif