mm: consolidate pgtable_cache_init() and pgd_cache_init()
[linux-2.6-block.git] / arch / unicore32 / include / asm / pgtable.h
CommitLineData
d2912cb1 1/* SPDX-License-Identifier: GPL-2.0-only */
56372b0b
G
2/*
3 * linux/arch/unicore32/include/asm/pgtable.h
4 *
5 * Code specific to PKUnity SoC and UniCore ISA
6 *
7 * Copyright (C) 2001-2010 GUAN Xue-tao
56372b0b
G
8 */
9#ifndef __UNICORE_PGTABLE_H__
10#define __UNICORE_PGTABLE_H__
11
9849a569 12#define __ARCH_USE_5LEVEL_HACK
56372b0b
G
13#include <asm-generic/pgtable-nopmd.h>
14#include <asm/cpu-single.h>
15
16#include <asm/memory.h>
17#include <asm/pgtable-hwdef.h>
18
19/*
20 * Just any arbitrary offset to the start of the vmalloc VM area: the
21 * current 8MB value just means that there will be a 8MB "hole" after the
22 * physical memory until the kernel virtual memory starts. That means that
23 * any out-of-bounds memory accesses will hopefully be caught.
24 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
25 * area for the same reason. ;)
26 *
27 * Note that platforms may override VMALLOC_START, but they must provide
28 * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space,
29 * which may not overlap IO space.
30 */
31#ifndef VMALLOC_START
32#define VMALLOC_OFFSET SZ_8M
33#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) \
34 & ~(VMALLOC_OFFSET-1))
35#define VMALLOC_END (0xff000000UL)
36#endif
37
38#define PTRS_PER_PTE 1024
39#define PTRS_PER_PGD 1024
40
41/*
42 * PGDIR_SHIFT determines what a third-level page table entry can map
43 */
44#define PGDIR_SHIFT 22
45
46#ifndef __ASSEMBLY__
47extern void __pte_error(const char *file, int line, unsigned long val);
48extern void __pgd_error(const char *file, int line, unsigned long val);
49
50#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
51#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
52#endif /* !__ASSEMBLY__ */
53
54#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
55#define PGDIR_MASK (~(PGDIR_SIZE-1))
56
57/*
58 * This is the lowest virtual address we can permit any user space
59 * mapping to be mapped at. This is particularly important for
60 * non-high vector CPUs.
61 */
62#define FIRST_USER_ADDRESS PAGE_SIZE
63
64#define FIRST_USER_PGD_NR 1
65#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
66
67/*
68 * section address mask and size definitions.
69 */
70#define SECTION_SHIFT 22
71#define SECTION_SIZE (1UL << SECTION_SHIFT)
72#define SECTION_MASK (~(SECTION_SIZE-1))
73
74#ifndef __ASSEMBLY__
75
76/*
77 * The pgprot_* and protection_map entries will be fixed up in runtime
78 * to include the cachable bits based on memory policy, as well as any
79 * architecture dependent bits.
80 */
81#define _PTE_DEFAULT (PTE_PRESENT | PTE_YOUNG | PTE_CACHEABLE)
82
83extern pgprot_t pgprot_user;
84extern pgprot_t pgprot_kernel;
85
86#define PAGE_NONE pgprot_user
87#define PAGE_SHARED __pgprot(pgprot_val(pgprot_user | PTE_READ \
aaad6183 88 | PTE_WRITE))
56372b0b
G
89#define PAGE_SHARED_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \
90 | PTE_WRITE \
aaad6183 91 | PTE_EXEC))
56372b0b
G
92#define PAGE_COPY __pgprot(pgprot_val(pgprot_user | PTE_READ)
93#define PAGE_COPY_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \
aaad6183
CG
94 | PTE_EXEC))
95#define PAGE_READONLY __pgprot(pgprot_val(pgprot_user | PTE_READ))
56372b0b 96#define PAGE_READONLY_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \
aaad6183 97 | PTE_EXEC))
56372b0b
G
98#define PAGE_KERNEL pgprot_kernel
99#define PAGE_KERNEL_EXEC __pgprot(pgprot_val(pgprot_kernel | PTE_EXEC))
100
101#define __PAGE_NONE __pgprot(_PTE_DEFAULT)
102#define __PAGE_SHARED __pgprot(_PTE_DEFAULT | PTE_READ \
103 | PTE_WRITE)
104#define __PAGE_SHARED_EXEC __pgprot(_PTE_DEFAULT | PTE_READ \
105 | PTE_WRITE \
106 | PTE_EXEC)
107#define __PAGE_COPY __pgprot(_PTE_DEFAULT | PTE_READ)
108#define __PAGE_COPY_EXEC __pgprot(_PTE_DEFAULT | PTE_READ \
109 | PTE_EXEC)
110#define __PAGE_READONLY __pgprot(_PTE_DEFAULT | PTE_READ)
111#define __PAGE_READONLY_EXEC __pgprot(_PTE_DEFAULT | PTE_READ \
112 | PTE_EXEC)
113
114#endif /* __ASSEMBLY__ */
115
116/*
117 * The table below defines the page protection levels that we insert into our
118 * Linux page table version. These get translated into the best that the
119 * architecture can perform. Note that on UniCore hardware:
120 * 1) We cannot do execute protection
121 * 2) If we could do execute protection, then read is implied
122 * 3) write implies read permissions
123 */
124#define __P000 __PAGE_NONE
125#define __P001 __PAGE_READONLY
126#define __P010 __PAGE_COPY
127#define __P011 __PAGE_COPY
128#define __P100 __PAGE_READONLY_EXEC
129#define __P101 __PAGE_READONLY_EXEC
130#define __P110 __PAGE_COPY_EXEC
131#define __P111 __PAGE_COPY_EXEC
132
133#define __S000 __PAGE_NONE
134#define __S001 __PAGE_READONLY
135#define __S010 __PAGE_SHARED
136#define __S011 __PAGE_SHARED
137#define __S100 __PAGE_READONLY_EXEC
138#define __S101 __PAGE_READONLY_EXEC
139#define __S110 __PAGE_SHARED_EXEC
140#define __S111 __PAGE_SHARED_EXEC
141
142#ifndef __ASSEMBLY__
143/*
144 * ZERO_PAGE is a global shared page that is always zero: used
145 * for zero-mapped memory areas etc..
146 */
147extern struct page *empty_zero_page;
148#define ZERO_PAGE(vaddr) (empty_zero_page)
149
150#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
151#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) \
152 | pgprot_val(prot)))
153
154#define pte_none(pte) (!pte_val(pte))
155#define pte_clear(mm, addr, ptep) set_pte(ptep, __pte(0))
156#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
157#define pte_offset_kernel(dir, addr) (pmd_page_vaddr(*(dir)) \
158 + __pte_index(addr))
159
160#define pte_offset_map(dir, addr) (pmd_page_vaddr(*(dir)) \
161 + __pte_index(addr))
162#define pte_unmap(pte) do { } while (0)
163
164#define set_pte(ptep, pte) cpu_set_pte(ptep, pte)
165
166#define set_pte_at(mm, addr, ptep, pteval) \
167 do { \
168 set_pte(ptep, pteval); \
169 } while (0)
170
171/*
172 * The following only work if pte_present() is true.
173 * Undefined behaviour if not..
174 */
175#define pte_present(pte) (pte_val(pte) & PTE_PRESENT)
176#define pte_write(pte) (pte_val(pte) & PTE_WRITE)
177#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
178#define pte_young(pte) (pte_val(pte) & PTE_YOUNG)
179#define pte_exec(pte) (pte_val(pte) & PTE_EXEC)
180#define pte_special(pte) (0)
181
182#define PTE_BIT_FUNC(fn, op) \
183static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
184
185PTE_BIT_FUNC(wrprotect, &= ~PTE_WRITE);
186PTE_BIT_FUNC(mkwrite, |= PTE_WRITE);
187PTE_BIT_FUNC(mkclean, &= ~PTE_DIRTY);
188PTE_BIT_FUNC(mkdirty, |= PTE_DIRTY);
189PTE_BIT_FUNC(mkold, &= ~PTE_YOUNG);
190PTE_BIT_FUNC(mkyoung, |= PTE_YOUNG);
191
192static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
193
194/*
195 * Mark the prot value as uncacheable.
196 */
197#define pgprot_noncached(prot) \
198 __pgprot(pgprot_val(prot) & ~PTE_CACHEABLE)
199#define pgprot_writecombine(prot) \
200 __pgprot(pgprot_val(prot) & ~PTE_CACHEABLE)
56372b0b
G
201
202#define pmd_none(pmd) (!pmd_val(pmd))
203#define pmd_present(pmd) (pmd_val(pmd) & PMD_PRESENT)
204#define pmd_bad(pmd) (((pmd_val(pmd) & \
205 (PMD_PRESENT | PMD_TYPE_MASK)) \
206 != (PMD_PRESENT | PMD_TYPE_TABLE)))
207
208#define set_pmd(pmdpd, pmdval) \
209 do { \
210 *(pmdpd) = pmdval; \
211 } while (0)
212
213#define pmd_clear(pmdp) \
214 do { \
215 set_pmd(pmdp, __pmd(0));\
216 clean_pmd_entry(pmdp); \
217 } while (0)
218
219#define pmd_page_vaddr(pmd) ((pte_t *)__va(pmd_val(pmd) & PAGE_MASK))
220#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
221
222/*
223 * Conversion functions: convert a page and protection to a page entry,
224 * and a page entry and page directory to the page they refer to.
225 */
226#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
227
228/* to find an entry in a page-table-directory */
229#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
230
231#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
232
233/* to find an entry in a kernel page-table-directory */
234#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
235
236/* Find an entry in the third-level page table.. */
237#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
238
239static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
240{
241 const unsigned long mask = PTE_EXEC | PTE_WRITE | PTE_READ;
242 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
243 return pte;
244}
245
246extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
247
248/*
249 * Encode and decode a swap entry. Swap entries are stored in the Linux
250 * page tables as follows:
251 *
252 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
253 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
254 * <--------------- offset --------------> <--- type --> 0 0 0 0 0
255 *
256 * This gives us up to 127 swap files and 32GB per swap file. Note that
257 * the offset field is always non-zero.
258 */
259#define __SWP_TYPE_SHIFT 5
260#define __SWP_TYPE_BITS 7
261#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
262#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
263
264#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) \
265 & __SWP_TYPE_MASK)
266#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
267#define __swp_entry(type, offset) ((swp_entry_t) { \
268 ((type) << __SWP_TYPE_SHIFT) | \
269 ((offset) << __SWP_OFFSET_SHIFT) })
270
271#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
272#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
273
274/*
275 * It is an error for the kernel to have more swap files than we can
276 * encode in the PTEs. This ensures that we know when MAX_SWAPFILES
277 * is increased beyond what we presently support.
278 */
279#define MAX_SWAPFILES_CHECK() \
280 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
281
56372b0b
G
282/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
283/* FIXME: this is not correct */
284#define kern_addr_valid(addr) (1)
285
286#include <asm-generic/pgtable.h>
287
56372b0b
G
288#endif /* !__ASSEMBLY__ */
289
290#endif /* __UNICORE_PGTABLE_H__ */