Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
1da177e4 | 2 | /* |
4baa9922 | 3 | * arch/arm/include/asm/pgalloc.h |
1da177e4 LT |
4 | * |
5 | * Copyright (C) 2000-2001 Russell King | |
1da177e4 LT |
6 | */ |
7 | #ifndef _ASMARM_PGALLOC_H | |
8 | #define _ASMARM_PGALLOC_H | |
9 | ||
97594b0f UKK |
10 | #include <linux/pagemap.h> |
11 | ||
74945c86 RK |
12 | #include <asm/domain.h> |
13 | #include <asm/pgtable-hwdef.h> | |
1da177e4 LT |
14 | #include <asm/processor.h> |
15 | #include <asm/cacheflush.h> | |
16 | #include <asm/tlbflush.h> | |
17 | ||
002547b4 RK |
18 | #define check_pgt_cache() do { } while (0) |
19 | ||
20 | #ifdef CONFIG_MMU | |
21 | ||
74945c86 RK |
22 | #define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER)) |
23 | #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL)) | |
24 | ||
da028779 CM |
25 | #ifdef CONFIG_ARM_LPAE |
26 | ||
27 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) | |
28 | { | |
32d6bd90 | 29 | return (pmd_t *)get_zeroed_page(GFP_KERNEL); |
da028779 CM |
30 | } |
31 | ||
32 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | |
33 | { | |
34 | BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); | |
35 | free_page((unsigned long)pmd); | |
36 | } | |
37 | ||
38 | static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |
39 | { | |
40 | set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); | |
41 | } | |
42 | ||
43 | #else /* !CONFIG_ARM_LPAE */ | |
44 | ||
1da177e4 LT |
45 | /* |
46 | * Since we have only two-level page tables, these are trivial | |
47 | */ | |
48 | #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); }) | |
5e541973 | 49 | #define pmd_free(mm, pmd) do { } while (0) |
a32618d2 | 50 | #define pud_populate(mm,pmd,pte) BUG() |
1da177e4 | 51 | |
da028779 CM |
52 | #endif /* CONFIG_ARM_LPAE */ |
53 | ||
b0d03745 RK |
54 | extern pgd_t *pgd_alloc(struct mm_struct *mm); |
55 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); | |
1da177e4 | 56 | |
75f296d9 | 57 | #define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO) |
65cec8e3 | 58 | |
d30e45ee RK |
59 | static inline void clean_pte_table(pte_t *pte) |
60 | { | |
61 | clean_dcache_area(pte + PTE_HWTABLE_PTRS, PTE_HWTABLE_SIZE); | |
62 | } | |
63 | ||
1da177e4 LT |
64 | /* |
65 | * Allocate one PTE table. | |
66 | * | |
67 | * This actually allocates two hardware PTE tables, but we wrap this up | |
68 | * into one table thus: | |
69 | * | |
70 | * +------------+ | |
1da177e4 LT |
71 | * | Linux pt 0 | |
72 | * +------------+ | |
73 | * | Linux pt 1 | | |
74 | * +------------+ | |
d30e45ee RK |
75 | * | h/w pt 0 | |
76 | * +------------+ | |
77 | * | h/w pt 1 | | |
78 | * +------------+ | |
1da177e4 LT |
79 | */ |
80 | static inline pte_t * | |
4cf58924 | 81 | pte_alloc_one_kernel(struct mm_struct *mm) |
1da177e4 LT |
82 | { |
83 | pte_t *pte; | |
84 | ||
65cec8e3 | 85 | pte = (pte_t *)__get_free_page(PGALLOC_GFP); |
d30e45ee RK |
86 | if (pte) |
87 | clean_pte_table(pte); | |
1da177e4 LT |
88 | |
89 | return pte; | |
90 | } | |
91 | ||
2f569afd | 92 | static inline pgtable_t |
4cf58924 | 93 | pte_alloc_one(struct mm_struct *mm) |
1da177e4 LT |
94 | { |
95 | struct page *pte; | |
96 | ||
65cec8e3 RK |
97 | #ifdef CONFIG_HIGHPTE |
98 | pte = alloc_pages(PGALLOC_GFP | __GFP_HIGHMEM, 0); | |
99 | #else | |
100 | pte = alloc_pages(PGALLOC_GFP, 0); | |
101 | #endif | |
affce508 KS |
102 | if (!pte) |
103 | return NULL; | |
104 | if (!PageHighMem(pte)) | |
105 | clean_pte_table(page_address(pte)); | |
106 | if (!pgtable_page_ctor(pte)) { | |
107 | __free_page(pte); | |
108 | return NULL; | |
1da177e4 | 109 | } |
1da177e4 LT |
110 | return pte; |
111 | } | |
112 | ||
113 | /* | |
114 | * Free one PTE table. | |
115 | */ | |
5e541973 | 116 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
1da177e4 | 117 | { |
d30e45ee | 118 | if (pte) |
1da177e4 | 119 | free_page((unsigned long)pte); |
1da177e4 LT |
120 | } |
121 | ||
2f569afd | 122 | static inline void pte_free(struct mm_struct *mm, pgtable_t pte) |
1da177e4 | 123 | { |
2f569afd | 124 | pgtable_page_dtor(pte); |
1da177e4 LT |
125 | __free_page(pte); |
126 | } | |
127 | ||
97092e0c | 128 | static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, |
442e70c0 | 129 | pmdval_t prot) |
bdf04248 | 130 | { |
442e70c0 | 131 | pmdval_t pmdval = (pte + PTE_HWTABLE_OFF) | prot; |
bdf04248 | 132 | pmdp[0] = __pmd(pmdval); |
da028779 | 133 | #ifndef CONFIG_ARM_LPAE |
bdf04248 | 134 | pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); |
da028779 | 135 | #endif |
bdf04248 RK |
136 | flush_pmd_entry(pmdp); |
137 | } | |
138 | ||
1da177e4 LT |
139 | /* |
140 | * Populate the pmdp entry with a pointer to the pte. This pmd is part | |
141 | * of the mm address space. | |
142 | * | |
143 | * Ensure that we always set both PMD entries. | |
144 | */ | |
145 | static inline void | |
146 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) | |
147 | { | |
1da177e4 | 148 | /* |
d30e45ee | 149 | * The pmd must be loaded with the physical address of the PTE table |
1da177e4 | 150 | */ |
d30e45ee | 151 | __pmd_populate(pmdp, __pa(ptep), _PAGE_KERNEL_TABLE); |
1da177e4 LT |
152 | } |
153 | ||
154 | static inline void | |
2f569afd | 155 | pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) |
1da177e4 | 156 | { |
1d4d3715 JL |
157 | extern pmdval_t user_pmd_table; |
158 | pmdval_t prot; | |
159 | ||
160 | if (__LINUX_ARM_ARCH__ >= 6 && !IS_ENABLED(CONFIG_ARM_LPAE)) | |
161 | prot = user_pmd_table; | |
162 | else | |
163 | prot = _PAGE_USER_TABLE; | |
164 | ||
165 | __pmd_populate(pmdp, page_to_phys(ptep), prot); | |
1da177e4 | 166 | } |
2f569afd | 167 | #define pmd_pgtable(pmd) pmd_page(pmd) |
1da177e4 | 168 | |
002547b4 RK |
169 | #endif /* CONFIG_MMU */ |
170 | ||
1da177e4 | 171 | #endif |