Commit | Line | Data |
---|---|---|
07037db5 PD |
1 | /* |
2 | * Copyright (C) 2012 Regents of the University of California | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | */ | |
13 | ||
14 | #ifndef _ASM_RISCV_PGTABLE_H | |
15 | #define _ASM_RISCV_PGTABLE_H | |
16 | ||
17 | #include <linux/mmzone.h> | |
18 | ||
19 | #include <asm/pgtable-bits.h> | |
20 | ||
21 | #ifndef __ASSEMBLY__ | |
22 | ||
07037db5 PD |
23 | /* Page Upper Directory not used in RISC-V */ |
24 | #include <asm-generic/pgtable-nopud.h> | |
25 | #include <asm/page.h> | |
26 | #include <asm/tlbflush.h> | |
27 | #include <linux/mm_types.h> | |
28 | ||
29 | #ifdef CONFIG_64BIT | |
30 | #include <asm/pgtable-64.h> | |
31 | #else | |
32 | #include <asm/pgtable-32.h> | |
33 | #endif /* CONFIG_64BIT */ | |
34 | ||
35 | /* Number of entries in the page global directory */ | |
36 | #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) | |
37 | /* Number of entries in the page table */ | |
38 | #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) | |
39 | ||
40 | /* Number of PGD entries that a user-mode program can use */ | |
41 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) | |
42 | #define FIRST_USER_ADDRESS 0 | |
43 | ||
44 | /* Page protection bits */ | |
45 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) | |
46 | ||
47 | #define PAGE_NONE __pgprot(0) | |
48 | #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) | |
49 | #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) | |
50 | #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) | |
51 | #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) | |
52 | #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \ | |
53 | _PAGE_EXEC | _PAGE_WRITE) | |
54 | ||
55 | #define PAGE_COPY PAGE_READ | |
56 | #define PAGE_COPY_EXEC PAGE_EXEC | |
57 | #define PAGE_COPY_READ_EXEC PAGE_READ_EXEC | |
58 | #define PAGE_SHARED PAGE_WRITE | |
59 | #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC | |
60 | ||
61 | #define _PAGE_KERNEL (_PAGE_READ \ | |
62 | | _PAGE_WRITE \ | |
63 | | _PAGE_PRESENT \ | |
64 | | _PAGE_ACCESSED \ | |
65 | | _PAGE_DIRTY) | |
66 | ||
67 | #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) | |
68 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC) | |
69 | ||
70 | extern pgd_t swapper_pg_dir[]; | |
71 | ||
72 | /* MAP_PRIVATE permissions: xwr (copy-on-write) */ | |
73 | #define __P000 PAGE_NONE | |
74 | #define __P001 PAGE_READ | |
75 | #define __P010 PAGE_COPY | |
76 | #define __P011 PAGE_COPY | |
77 | #define __P100 PAGE_EXEC | |
78 | #define __P101 PAGE_READ_EXEC | |
79 | #define __P110 PAGE_COPY_EXEC | |
80 | #define __P111 PAGE_COPY_READ_EXEC | |
81 | ||
82 | /* MAP_SHARED permissions: xwr */ | |
83 | #define __S000 PAGE_NONE | |
84 | #define __S001 PAGE_READ | |
85 | #define __S010 PAGE_SHARED | |
86 | #define __S011 PAGE_SHARED | |
87 | #define __S100 PAGE_EXEC | |
88 | #define __S101 PAGE_READ_EXEC | |
89 | #define __S110 PAGE_SHARED_EXEC | |
90 | #define __S111 PAGE_SHARED_EXEC | |
91 | ||
92 | /* | |
93 | * ZERO_PAGE is a global shared page that is always zero, | |
94 | * used for zero-mapped memory areas, etc. | |
95 | */ | |
96 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |
97 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | |
98 | ||
99 | static inline int pmd_present(pmd_t pmd) | |
100 | { | |
101 | return (pmd_val(pmd) & _PAGE_PRESENT); | |
102 | } | |
103 | ||
104 | static inline int pmd_none(pmd_t pmd) | |
105 | { | |
106 | return (pmd_val(pmd) == 0); | |
107 | } | |
108 | ||
109 | static inline int pmd_bad(pmd_t pmd) | |
110 | { | |
111 | return !pmd_present(pmd); | |
112 | } | |
113 | ||
114 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) | |
115 | { | |
116 | *pmdp = pmd; | |
117 | } | |
118 | ||
119 | static inline void pmd_clear(pmd_t *pmdp) | |
120 | { | |
121 | set_pmd(pmdp, __pmd(0)); | |
122 | } | |
123 | ||
124 | ||
125 | static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot) | |
126 | { | |
127 | return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot)); | |
128 | } | |
129 | ||
130 | #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | |
131 | ||
132 | /* Locate an entry in the page global directory */ | |
133 | static inline pgd_t *pgd_offset(const struct mm_struct *mm, unsigned long addr) | |
134 | { | |
135 | return mm->pgd + pgd_index(addr); | |
136 | } | |
137 | /* Locate an entry in the kernel page global directory */ | |
138 | #define pgd_offset_k(addr) pgd_offset(&init_mm, (addr)) | |
139 | ||
140 | static inline struct page *pmd_page(pmd_t pmd) | |
141 | { | |
142 | return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT); | |
143 | } | |
144 | ||
145 | static inline unsigned long pmd_page_vaddr(pmd_t pmd) | |
146 | { | |
147 | return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT); | |
148 | } | |
149 | ||
150 | /* Yields the page frame number (PFN) of a page table entry */ | |
151 | static inline unsigned long pte_pfn(pte_t pte) | |
152 | { | |
153 | return (pte_val(pte) >> _PAGE_PFN_SHIFT); | |
154 | } | |
155 | ||
156 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | |
157 | ||
158 | /* Constructs a page table entry */ | |
159 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) | |
160 | { | |
161 | return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot)); | |
162 | } | |
163 | ||
164 | static inline pte_t mk_pte(struct page *page, pgprot_t prot) | |
165 | { | |
166 | return pfn_pte(page_to_pfn(page), prot); | |
167 | } | |
168 | ||
169 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
170 | ||
171 | static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr) | |
172 | { | |
173 | return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(addr); | |
174 | } | |
175 | ||
176 | #define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr)) | |
177 | #define pte_unmap(pte) ((void)(pte)) | |
178 | ||
07037db5 PD |
179 | static inline int pte_present(pte_t pte) |
180 | { | |
181 | return (pte_val(pte) & _PAGE_PRESENT); | |
182 | } | |
183 | ||
184 | static inline int pte_none(pte_t pte) | |
185 | { | |
186 | return (pte_val(pte) == 0); | |
187 | } | |
188 | ||
07037db5 PD |
189 | static inline int pte_write(pte_t pte) |
190 | { | |
191 | return pte_val(pte) & _PAGE_WRITE; | |
192 | } | |
193 | ||
08f051ed AW |
194 | static inline int pte_exec(pte_t pte) |
195 | { | |
196 | return pte_val(pte) & _PAGE_EXEC; | |
197 | } | |
198 | ||
07037db5 PD |
199 | static inline int pte_huge(pte_t pte) |
200 | { | |
201 | return pte_present(pte) | |
202 | && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)); | |
203 | } | |
204 | ||
07037db5 PD |
205 | static inline int pte_dirty(pte_t pte) |
206 | { | |
207 | return pte_val(pte) & _PAGE_DIRTY; | |
208 | } | |
209 | ||
210 | static inline int pte_young(pte_t pte) | |
211 | { | |
212 | return pte_val(pte) & _PAGE_ACCESSED; | |
213 | } | |
214 | ||
215 | static inline int pte_special(pte_t pte) | |
216 | { | |
217 | return pte_val(pte) & _PAGE_SPECIAL; | |
218 | } | |
219 | ||
220 | /* static inline pte_t pte_rdprotect(pte_t pte) */ | |
221 | ||
222 | static inline pte_t pte_wrprotect(pte_t pte) | |
223 | { | |
224 | return __pte(pte_val(pte) & ~(_PAGE_WRITE)); | |
225 | } | |
226 | ||
227 | /* static inline pte_t pte_mkread(pte_t pte) */ | |
228 | ||
229 | static inline pte_t pte_mkwrite(pte_t pte) | |
230 | { | |
231 | return __pte(pte_val(pte) | _PAGE_WRITE); | |
232 | } | |
233 | ||
234 | /* static inline pte_t pte_mkexec(pte_t pte) */ | |
235 | ||
236 | static inline pte_t pte_mkdirty(pte_t pte) | |
237 | { | |
238 | return __pte(pte_val(pte) | _PAGE_DIRTY); | |
239 | } | |
240 | ||
241 | static inline pte_t pte_mkclean(pte_t pte) | |
242 | { | |
243 | return __pte(pte_val(pte) & ~(_PAGE_DIRTY)); | |
244 | } | |
245 | ||
246 | static inline pte_t pte_mkyoung(pte_t pte) | |
247 | { | |
248 | return __pte(pte_val(pte) | _PAGE_ACCESSED); | |
249 | } | |
250 | ||
251 | static inline pte_t pte_mkold(pte_t pte) | |
252 | { | |
253 | return __pte(pte_val(pte) & ~(_PAGE_ACCESSED)); | |
254 | } | |
255 | ||
256 | static inline pte_t pte_mkspecial(pte_t pte) | |
257 | { | |
258 | return __pte(pte_val(pte) | _PAGE_SPECIAL); | |
259 | } | |
260 | ||
261 | /* Modify page protection bits */ | |
262 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |
263 | { | |
264 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); | |
265 | } | |
266 | ||
267 | #define pgd_ERROR(e) \ | |
268 | pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e)) | |
269 | ||
270 | ||
271 | /* Commit new configuration to MMU hardware */ | |
272 | static inline void update_mmu_cache(struct vm_area_struct *vma, | |
273 | unsigned long address, pte_t *ptep) | |
274 | { | |
275 | /* | |
276 | * The kernel assumes that TLBs don't cache invalid entries, but | |
277 | * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a | |
278 | * cache flush; it is necessary even after writing invalid entries. | |
279 | * Relying on flush_tlb_fix_spurious_fault would suffice, but | |
280 | * the extra traps reduce performance. So, eagerly SFENCE.VMA. | |
281 | */ | |
282 | local_flush_tlb_page(address); | |
283 | } | |
284 | ||
285 | #define __HAVE_ARCH_PTE_SAME | |
286 | static inline int pte_same(pte_t pte_a, pte_t pte_b) | |
287 | { | |
288 | return pte_val(pte_a) == pte_val(pte_b); | |
289 | } | |
290 | ||
08f051ed AW |
291 | /* |
292 | * Certain architectures need to do special things when PTEs within | |
293 | * a page table are directly modified. Thus, the following hook is | |
294 | * made available. | |
295 | */ | |
296 | static inline void set_pte(pte_t *ptep, pte_t pteval) | |
297 | { | |
298 | *ptep = pteval; | |
299 | } | |
300 | ||
301 | void flush_icache_pte(pte_t pte); | |
302 | ||
303 | static inline void set_pte_at(struct mm_struct *mm, | |
304 | unsigned long addr, pte_t *ptep, pte_t pteval) | |
305 | { | |
306 | if (pte_present(pteval) && pte_exec(pteval)) | |
307 | flush_icache_pte(pteval); | |
308 | ||
309 | set_pte(ptep, pteval); | |
310 | } | |
311 | ||
312 | static inline void pte_clear(struct mm_struct *mm, | |
313 | unsigned long addr, pte_t *ptep) | |
314 | { | |
315 | set_pte_at(mm, addr, ptep, __pte(0)); | |
316 | } | |
317 | ||
07037db5 PD |
318 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
319 | static inline int ptep_set_access_flags(struct vm_area_struct *vma, | |
320 | unsigned long address, pte_t *ptep, | |
321 | pte_t entry, int dirty) | |
322 | { | |
323 | if (!pte_same(*ptep, entry)) | |
324 | set_pte_at(vma->vm_mm, address, ptep, entry); | |
325 | /* | |
326 | * update_mmu_cache will unconditionally execute, handling both | |
327 | * the case that the PTE changed and the spurious fault case. | |
328 | */ | |
329 | return true; | |
330 | } | |
331 | ||
332 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
333 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |
334 | unsigned long address, pte_t *ptep) | |
335 | { | |
336 | return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0)); | |
337 | } | |
338 | ||
339 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
340 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | |
341 | unsigned long address, | |
342 | pte_t *ptep) | |
343 | { | |
344 | if (!pte_young(*ptep)) | |
345 | return 0; | |
346 | return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep)); | |
347 | } | |
348 | ||
349 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
350 | static inline void ptep_set_wrprotect(struct mm_struct *mm, | |
351 | unsigned long address, pte_t *ptep) | |
352 | { | |
353 | atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep); | |
354 | } | |
355 | ||
356 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | |
357 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, | |
358 | unsigned long address, pte_t *ptep) | |
359 | { | |
360 | /* | |
361 | * This comment is borrowed from x86, but applies equally to RISC-V: | |
362 | * | |
363 | * Clearing the accessed bit without a TLB flush | |
364 | * doesn't cause data corruption. [ It could cause incorrect | |
365 | * page aging and the (mistaken) reclaim of hot pages, but the | |
366 | * chance of that should be relatively low. ] | |
367 | * | |
368 | * So as a performance optimization don't flush the TLB when | |
369 | * clearing the accessed bit, it will eventually be flushed by | |
370 | * a context switch or a VM operation anyway. [ In the rare | |
371 | * event of it not getting flushed for a long time the delay | |
372 | * shouldn't really matter because there's no real memory | |
373 | * pressure for swapout to react to. ] | |
374 | */ | |
375 | return ptep_test_and_clear_young(vma, address, ptep); | |
376 | } | |
377 | ||
378 | /* | |
379 | * Encode and decode a swap entry | |
380 | * | |
381 | * Format of swap PTE: | |
382 | * bit 0: _PAGE_PRESENT (zero) | |
383 | * bit 1: reserved for future use (zero) | |
384 | * bits 2 to 6: swap type | |
385 | * bits 7 to XLEN-1: swap offset | |
386 | */ | |
387 | #define __SWP_TYPE_SHIFT 2 | |
388 | #define __SWP_TYPE_BITS 5 | |
389 | #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) | |
390 | #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) | |
391 | ||
392 | #define MAX_SWAPFILES_CHECK() \ | |
393 | BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) | |
394 | ||
395 | #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) | |
396 | #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) | |
397 | #define __swp_entry(type, offset) ((swp_entry_t) \ | |
398 | { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) | |
399 | ||
400 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
401 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | |
402 | ||
403 | #ifdef CONFIG_FLATMEM | |
404 | #define kern_addr_valid(addr) (1) /* FIXME */ | |
405 | #endif | |
406 | ||
407 | extern void paging_init(void); | |
408 | ||
409 | static inline void pgtable_cache_init(void) | |
410 | { | |
411 | /* No page table caches to initialize */ | |
412 | } | |
413 | ||
07037db5 PD |
414 | #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) |
415 | #define VMALLOC_END (PAGE_OFFSET - 1) | |
416 | #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) | |
417 | ||
418 | /* | |
419 | * Task size is 0x40000000000 for RV64 or 0xb800000 for RV32. | |
420 | * Note that PGDIR_SIZE must evenly divide TASK_SIZE. | |
421 | */ | |
422 | #ifdef CONFIG_64BIT | |
423 | #define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2) | |
424 | #else | |
425 | #define TASK_SIZE VMALLOC_START | |
426 | #endif | |
427 | ||
428 | #include <asm-generic/pgtable.h> | |
429 | ||
430 | #endif /* !__ASSEMBLY__ */ | |
431 | ||
432 | #endif /* _ASM_RISCV_PGTABLE_H */ |