Commit | Line | Data |
---|---|---|
50acfb2b | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
07037db5 PD |
2 | /* |
3 | * Copyright (C) 2012 Regents of the University of California | |
07037db5 PD |
4 | */ |
5 | ||
6 | #ifndef _ASM_RISCV_PGTABLE_H | |
7 | #define _ASM_RISCV_PGTABLE_H | |
8 | ||
9 | #include <linux/mmzone.h> | |
10 | ||
11 | #include <asm/pgtable-bits.h> | |
12 | ||
13 | #ifndef __ASSEMBLY__ | |
14 | ||
07037db5 PD |
15 | /* Page Upper Directory not used in RISC-V */ |
16 | #include <asm-generic/pgtable-nopud.h> | |
17 | #include <asm/page.h> | |
18 | #include <asm/tlbflush.h> | |
19 | #include <linux/mm_types.h> | |
20 | ||
21 | #ifdef CONFIG_64BIT | |
22 | #include <asm/pgtable-64.h> | |
23 | #else | |
24 | #include <asm/pgtable-32.h> | |
25 | #endif /* CONFIG_64BIT */ | |
26 | ||
27 | /* Number of entries in the page global directory */ | |
28 | #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) | |
29 | /* Number of entries in the page table */ | |
30 | #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) | |
31 | ||
32 | /* Number of PGD entries that a user-mode program can use */ | |
33 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) | |
34 | #define FIRST_USER_ADDRESS 0 | |
35 | ||
36 | /* Page protection bits */ | |
37 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) | |
38 | ||
e3613bb8 | 39 | #define PAGE_NONE __pgprot(_PAGE_PROT_NONE) |
07037db5 PD |
40 | #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) |
41 | #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) | |
42 | #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) | |
43 | #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) | |
44 | #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \ | |
45 | _PAGE_EXEC | _PAGE_WRITE) | |
46 | ||
47 | #define PAGE_COPY PAGE_READ | |
48 | #define PAGE_COPY_EXEC PAGE_EXEC | |
49 | #define PAGE_COPY_READ_EXEC PAGE_READ_EXEC | |
50 | #define PAGE_SHARED PAGE_WRITE | |
51 | #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC | |
52 | ||
53 | #define _PAGE_KERNEL (_PAGE_READ \ | |
54 | | _PAGE_WRITE \ | |
55 | | _PAGE_PRESENT \ | |
56 | | _PAGE_ACCESSED \ | |
57 | | _PAGE_DIRTY) | |
58 | ||
59 | #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) | |
60 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC) | |
61 | ||
671f9a3e AP |
62 | #define PAGE_TABLE __pgprot(_PAGE_TABLE) |
63 | ||
07037db5 PD |
64 | extern pgd_t swapper_pg_dir[]; |
65 | ||
66 | /* MAP_PRIVATE permissions: xwr (copy-on-write) */ | |
67 | #define __P000 PAGE_NONE | |
68 | #define __P001 PAGE_READ | |
69 | #define __P010 PAGE_COPY | |
70 | #define __P011 PAGE_COPY | |
71 | #define __P100 PAGE_EXEC | |
72 | #define __P101 PAGE_READ_EXEC | |
73 | #define __P110 PAGE_COPY_EXEC | |
74 | #define __P111 PAGE_COPY_READ_EXEC | |
75 | ||
76 | /* MAP_SHARED permissions: xwr */ | |
77 | #define __S000 PAGE_NONE | |
78 | #define __S001 PAGE_READ | |
79 | #define __S010 PAGE_SHARED | |
80 | #define __S011 PAGE_SHARED | |
81 | #define __S100 PAGE_EXEC | |
82 | #define __S101 PAGE_READ_EXEC | |
83 | #define __S110 PAGE_SHARED_EXEC | |
84 | #define __S111 PAGE_SHARED_EXEC | |
85 | ||
d95f1a54 LG |
86 | /* |
87 | * Roughly size the vmemmap space to be large enough to fit enough | |
88 | * struct pages to map half the virtual address space. Then | |
89 | * position vmemmap directly below the VMALLOC region. | |
90 | */ | |
91 | #define VMEMMAP_SHIFT \ | |
92 | (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) | |
93 | #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) | |
94 | #define VMEMMAP_END (VMALLOC_START - 1) | |
95 | #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) | |
96 | ||
97 | #define vmemmap ((struct page *)VMEMMAP_START) | |
98 | ||
07037db5 PD |
99 | /* |
100 | * ZERO_PAGE is a global shared page that is always zero, | |
101 | * used for zero-mapped memory areas, etc. | |
102 | */ | |
103 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |
104 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | |
105 | ||
106 | static inline int pmd_present(pmd_t pmd) | |
107 | { | |
e3613bb8 | 108 | return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); |
07037db5 PD |
109 | } |
110 | ||
111 | static inline int pmd_none(pmd_t pmd) | |
112 | { | |
113 | return (pmd_val(pmd) == 0); | |
114 | } | |
115 | ||
116 | static inline int pmd_bad(pmd_t pmd) | |
117 | { | |
118 | return !pmd_present(pmd); | |
119 | } | |
120 | ||
121 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) | |
122 | { | |
123 | *pmdp = pmd; | |
124 | } | |
125 | ||
126 | static inline void pmd_clear(pmd_t *pmdp) | |
127 | { | |
128 | set_pmd(pmdp, __pmd(0)); | |
129 | } | |
130 | ||
07037db5 PD |
131 | static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot) |
132 | { | |
133 | return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot)); | |
134 | } | |
135 | ||
671f9a3e AP |
136 | static inline unsigned long _pgd_pfn(pgd_t pgd) |
137 | { | |
138 | return pgd_val(pgd) >> _PAGE_PFN_SHIFT; | |
139 | } | |
140 | ||
07037db5 PD |
141 | #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) |
142 | ||
143 | /* Locate an entry in the page global directory */ | |
144 | static inline pgd_t *pgd_offset(const struct mm_struct *mm, unsigned long addr) | |
145 | { | |
146 | return mm->pgd + pgd_index(addr); | |
147 | } | |
148 | /* Locate an entry in the kernel page global directory */ | |
149 | #define pgd_offset_k(addr) pgd_offset(&init_mm, (addr)) | |
150 | ||
151 | static inline struct page *pmd_page(pmd_t pmd) | |
152 | { | |
153 | return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT); | |
154 | } | |
155 | ||
156 | static inline unsigned long pmd_page_vaddr(pmd_t pmd) | |
157 | { | |
158 | return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT); | |
159 | } | |
160 | ||
161 | /* Yields the page frame number (PFN) of a page table entry */ | |
162 | static inline unsigned long pte_pfn(pte_t pte) | |
163 | { | |
164 | return (pte_val(pte) >> _PAGE_PFN_SHIFT); | |
165 | } | |
166 | ||
167 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | |
168 | ||
169 | /* Constructs a page table entry */ | |
170 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) | |
171 | { | |
172 | return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot)); | |
173 | } | |
174 | ||
175 | static inline pte_t mk_pte(struct page *page, pgprot_t prot) | |
176 | { | |
177 | return pfn_pte(page_to_pfn(page), prot); | |
178 | } | |
179 | ||
180 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
181 | ||
182 | static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr) | |
183 | { | |
184 | return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(addr); | |
185 | } | |
186 | ||
187 | #define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr)) | |
188 | #define pte_unmap(pte) ((void)(pte)) | |
189 | ||
07037db5 PD |
190 | static inline int pte_present(pte_t pte) |
191 | { | |
e3613bb8 | 192 | return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); |
07037db5 PD |
193 | } |
194 | ||
195 | static inline int pte_none(pte_t pte) | |
196 | { | |
197 | return (pte_val(pte) == 0); | |
198 | } | |
199 | ||
07037db5 PD |
200 | static inline int pte_write(pte_t pte) |
201 | { | |
202 | return pte_val(pte) & _PAGE_WRITE; | |
203 | } | |
204 | ||
08f051ed AW |
205 | static inline int pte_exec(pte_t pte) |
206 | { | |
207 | return pte_val(pte) & _PAGE_EXEC; | |
208 | } | |
209 | ||
07037db5 PD |
210 | static inline int pte_huge(pte_t pte) |
211 | { | |
212 | return pte_present(pte) | |
213 | && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)); | |
214 | } | |
215 | ||
07037db5 PD |
216 | static inline int pte_dirty(pte_t pte) |
217 | { | |
218 | return pte_val(pte) & _PAGE_DIRTY; | |
219 | } | |
220 | ||
221 | static inline int pte_young(pte_t pte) | |
222 | { | |
223 | return pte_val(pte) & _PAGE_ACCESSED; | |
224 | } | |
225 | ||
226 | static inline int pte_special(pte_t pte) | |
227 | { | |
228 | return pte_val(pte) & _PAGE_SPECIAL; | |
229 | } | |
230 | ||
231 | /* static inline pte_t pte_rdprotect(pte_t pte) */ | |
232 | ||
233 | static inline pte_t pte_wrprotect(pte_t pte) | |
234 | { | |
235 | return __pte(pte_val(pte) & ~(_PAGE_WRITE)); | |
236 | } | |
237 | ||
238 | /* static inline pte_t pte_mkread(pte_t pte) */ | |
239 | ||
240 | static inline pte_t pte_mkwrite(pte_t pte) | |
241 | { | |
242 | return __pte(pte_val(pte) | _PAGE_WRITE); | |
243 | } | |
244 | ||
245 | /* static inline pte_t pte_mkexec(pte_t pte) */ | |
246 | ||
247 | static inline pte_t pte_mkdirty(pte_t pte) | |
248 | { | |
249 | return __pte(pte_val(pte) | _PAGE_DIRTY); | |
250 | } | |
251 | ||
252 | static inline pte_t pte_mkclean(pte_t pte) | |
253 | { | |
254 | return __pte(pte_val(pte) & ~(_PAGE_DIRTY)); | |
255 | } | |
256 | ||
257 | static inline pte_t pte_mkyoung(pte_t pte) | |
258 | { | |
259 | return __pte(pte_val(pte) | _PAGE_ACCESSED); | |
260 | } | |
261 | ||
262 | static inline pte_t pte_mkold(pte_t pte) | |
263 | { | |
264 | return __pte(pte_val(pte) & ~(_PAGE_ACCESSED)); | |
265 | } | |
266 | ||
267 | static inline pte_t pte_mkspecial(pte_t pte) | |
268 | { | |
269 | return __pte(pte_val(pte) | _PAGE_SPECIAL); | |
270 | } | |
271 | ||
9e953cda AG |
272 | static inline pte_t pte_mkhuge(pte_t pte) |
273 | { | |
274 | return pte; | |
275 | } | |
276 | ||
07037db5 PD |
277 | /* Modify page protection bits */ |
278 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |
279 | { | |
280 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); | |
281 | } | |
282 | ||
283 | #define pgd_ERROR(e) \ | |
284 | pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e)) | |
285 | ||
286 | ||
287 | /* Commit new configuration to MMU hardware */ | |
288 | static inline void update_mmu_cache(struct vm_area_struct *vma, | |
289 | unsigned long address, pte_t *ptep) | |
290 | { | |
291 | /* | |
292 | * The kernel assumes that TLBs don't cache invalid entries, but | |
293 | * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a | |
294 | * cache flush; it is necessary even after writing invalid entries. | |
295 | * Relying on flush_tlb_fix_spurious_fault would suffice, but | |
296 | * the extra traps reduce performance. So, eagerly SFENCE.VMA. | |
297 | */ | |
298 | local_flush_tlb_page(address); | |
299 | } | |
300 | ||
301 | #define __HAVE_ARCH_PTE_SAME | |
302 | static inline int pte_same(pte_t pte_a, pte_t pte_b) | |
303 | { | |
304 | return pte_val(pte_a) == pte_val(pte_b); | |
305 | } | |
306 | ||
08f051ed AW |
307 | /* |
308 | * Certain architectures need to do special things when PTEs within | |
309 | * a page table are directly modified. Thus, the following hook is | |
310 | * made available. | |
311 | */ | |
312 | static inline void set_pte(pte_t *ptep, pte_t pteval) | |
313 | { | |
314 | *ptep = pteval; | |
315 | } | |
316 | ||
317 | void flush_icache_pte(pte_t pte); | |
318 | ||
319 | static inline void set_pte_at(struct mm_struct *mm, | |
320 | unsigned long addr, pte_t *ptep, pte_t pteval) | |
321 | { | |
322 | if (pte_present(pteval) && pte_exec(pteval)) | |
323 | flush_icache_pte(pteval); | |
324 | ||
325 | set_pte(ptep, pteval); | |
326 | } | |
327 | ||
328 | static inline void pte_clear(struct mm_struct *mm, | |
329 | unsigned long addr, pte_t *ptep) | |
330 | { | |
331 | set_pte_at(mm, addr, ptep, __pte(0)); | |
332 | } | |
333 | ||
07037db5 PD |
334 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
335 | static inline int ptep_set_access_flags(struct vm_area_struct *vma, | |
336 | unsigned long address, pte_t *ptep, | |
337 | pte_t entry, int dirty) | |
338 | { | |
339 | if (!pte_same(*ptep, entry)) | |
340 | set_pte_at(vma->vm_mm, address, ptep, entry); | |
341 | /* | |
342 | * update_mmu_cache will unconditionally execute, handling both | |
343 | * the case that the PTE changed and the spurious fault case. | |
344 | */ | |
345 | return true; | |
346 | } | |
347 | ||
348 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
349 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |
350 | unsigned long address, pte_t *ptep) | |
351 | { | |
352 | return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0)); | |
353 | } | |
354 | ||
355 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
356 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | |
357 | unsigned long address, | |
358 | pte_t *ptep) | |
359 | { | |
360 | if (!pte_young(*ptep)) | |
361 | return 0; | |
362 | return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep)); | |
363 | } | |
364 | ||
365 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
366 | static inline void ptep_set_wrprotect(struct mm_struct *mm, | |
367 | unsigned long address, pte_t *ptep) | |
368 | { | |
369 | atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep); | |
370 | } | |
371 | ||
372 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | |
373 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, | |
374 | unsigned long address, pte_t *ptep) | |
375 | { | |
376 | /* | |
377 | * This comment is borrowed from x86, but applies equally to RISC-V: | |
378 | * | |
379 | * Clearing the accessed bit without a TLB flush | |
380 | * doesn't cause data corruption. [ It could cause incorrect | |
381 | * page aging and the (mistaken) reclaim of hot pages, but the | |
382 | * chance of that should be relatively low. ] | |
383 | * | |
384 | * So as a performance optimization don't flush the TLB when | |
385 | * clearing the accessed bit, it will eventually be flushed by | |
386 | * a context switch or a VM operation anyway. [ In the rare | |
387 | * event of it not getting flushed for a long time the delay | |
388 | * shouldn't really matter because there's no real memory | |
389 | * pressure for swapout to react to. ] | |
390 | */ | |
391 | return ptep_test_and_clear_young(vma, address, ptep); | |
392 | } | |
393 | ||
394 | /* | |
395 | * Encode and decode a swap entry | |
396 | * | |
397 | * Format of swap PTE: | |
398 | * bit 0: _PAGE_PRESENT (zero) | |
e3613bb8 | 399 | * bit 1: _PAGE_PROT_NONE (zero) |
07037db5 PD |
400 | * bits 2 to 6: swap type |
401 | * bits 7 to XLEN-1: swap offset | |
402 | */ | |
403 | #define __SWP_TYPE_SHIFT 2 | |
404 | #define __SWP_TYPE_BITS 5 | |
405 | #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) | |
406 | #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) | |
407 | ||
408 | #define MAX_SWAPFILES_CHECK() \ | |
409 | BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) | |
410 | ||
411 | #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) | |
412 | #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) | |
413 | #define __swp_entry(type, offset) ((swp_entry_t) \ | |
414 | { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) | |
415 | ||
416 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
417 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | |
418 | ||
419 | #ifdef CONFIG_FLATMEM | |
420 | #define kern_addr_valid(addr) (1) /* FIXME */ | |
421 | #endif | |
422 | ||
671f9a3e | 423 | extern void *dtb_early_va; |
0651c263 | 424 | extern void setup_bootmem(void); |
07037db5 PD |
425 | extern void paging_init(void); |
426 | ||
07037db5 PD |
427 | #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) |
428 | #define VMALLOC_END (PAGE_OFFSET - 1) | |
429 | #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) | |
430 | ||
a256f2e3 AP |
431 | #define FIXADDR_TOP VMALLOC_START |
432 | #ifdef CONFIG_64BIT | |
433 | #define FIXADDR_SIZE PMD_SIZE | |
434 | #else | |
435 | #define FIXADDR_SIZE PGDIR_SIZE | |
436 | #endif | |
437 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) | |
438 | ||
07037db5 | 439 | /* |
a256f2e3 | 440 | * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. |
07037db5 PD |
441 | * Note that PGDIR_SIZE must evenly divide TASK_SIZE. |
442 | */ | |
443 | #ifdef CONFIG_64BIT | |
444 | #define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2) | |
445 | #else | |
a256f2e3 | 446 | #define TASK_SIZE FIXADDR_START |
07037db5 PD |
447 | #endif |
448 | ||
449 | #include <asm-generic/pgtable.h> | |
450 | ||
451 | #endif /* !__ASSEMBLY__ */ | |
452 | ||
453 | #endif /* _ASM_RISCV_PGTABLE_H */ |