Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1965aae3 PA |
2 | #ifndef _ASM_X86_PGALLOC_H |
3 | #define _ASM_X86_PGALLOC_H | |
4f76cd38 JF |
4 | |
5 | #include <linux/threads.h> | |
6 | #include <linux/mm.h> /* for struct page */ | |
7 | #include <linux/pagemap.h> | |
8 | ||
5fba4af4 | 9 | #define __HAVE_ARCH_PTE_ALLOC_ONE |
f9cb654c | 10 | #define __HAVE_ARCH_PGD_FREE |
1355c31e | 11 | #include <asm-generic/pgalloc.h> |
5fba4af4 | 12 | |
eba0045f JF |
13 | static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } |
14 | ||
fdc0269e | 15 | #ifdef CONFIG_PARAVIRT_XXL |
1d262d3a JF |
16 | #include <asm/paravirt.h> |
17 | #else | |
eba0045f JF |
18 | #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm) |
19 | static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {} | |
286cd494 JF |
20 | static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {} |
21 | static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {} | |
22 | static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn, | |
23 | unsigned long start, unsigned long count) {} | |
24 | static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {} | |
335437fb | 25 | static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) {} |
286cd494 JF |
26 | static inline void paravirt_release_pte(unsigned long pfn) {} |
27 | static inline void paravirt_release_pmd(unsigned long pfn) {} | |
28 | static inline void paravirt_release_pud(unsigned long pfn) {} | |
335437fb | 29 | static inline void paravirt_release_p4d(unsigned long pfn) {} |
1d262d3a JF |
30 | #endif |
31 | ||
14315592 IC |
32 | /* |
33 | * Flags to use when allocating a user page table page. | |
34 | */ | |
35 | extern gfp_t __userpte_alloc_gfp; | |
36 | ||
ea4654e0 | 37 | #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION |
d9e9a641 DH |
38 | /* |
39 | * Instead of one PGD, we acquire two PGDs. Being order-1, it is | |
40 | * both 8k in size and 8k-aligned. That lets us just flip bit 12 | |
41 | * in a pointer to swap between the two 4k halves. | |
42 | */ | |
43 | #define PGD_ALLOCATION_ORDER 1 | |
44 | #else | |
45 | #define PGD_ALLOCATION_ORDER 0 | |
46 | #endif | |
47 | ||
4f76cd38 JF |
48 | /* |
49 | * Allocate and free page tables. | |
50 | */ | |
51 | extern pgd_t *pgd_alloc(struct mm_struct *); | |
52 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); | |
53 | ||
4cf58924 | 54 | extern pgtable_t pte_alloc_one(struct mm_struct *); |
4f76cd38 | 55 | |
9e1b32ca BH |
56 | extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte); |
57 | ||
58 | static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, | |
59 | unsigned long address) | |
60 | { | |
61 | ___pte_free_tlb(tlb, pte); | |
62 | } | |
397f687a | 63 | |
170fdff7 JF |
64 | static inline void pmd_populate_kernel(struct mm_struct *mm, |
65 | pmd_t *pmd, pte_t *pte) | |
66 | { | |
6944a9c8 | 67 | paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); |
170fdff7 JF |
68 | set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); |
69 | } | |
70 | ||
0a9fe8ca DW |
71 | static inline void pmd_populate_kernel_safe(struct mm_struct *mm, |
72 | pmd_t *pmd, pte_t *pte) | |
73 | { | |
74 | paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); | |
75 | set_pmd_safe(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); | |
76 | } | |
77 | ||
170fdff7 JF |
78 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, |
79 | struct page *pte) | |
80 | { | |
81 | unsigned long pfn = page_to_pfn(pte); | |
82 | ||
6944a9c8 | 83 | paravirt_alloc_pte(mm, pfn); |
170fdff7 JF |
84 | set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE)); |
85 | } | |
86 | ||
98233368 | 87 | #if CONFIG_PGTABLE_LEVELS > 2 |
9e1b32ca BH |
88 | extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); |
89 | ||
90 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, | |
b595076a | 91 | unsigned long address) |
9e1b32ca BH |
92 | { |
93 | ___pmd_free_tlb(tlb, pmd); | |
94 | } | |
170fdff7 | 95 | |
5a5f8f42 JF |
96 | #ifdef CONFIG_X86_PAE |
97 | extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); | |
98 | #else /* !CONFIG_X86_PAE */ | |
99 | static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |
100 | { | |
6944a9c8 | 101 | paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); |
5a5f8f42 JF |
102 | set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))); |
103 | } | |
0a9fe8ca DW |
104 | |
105 | static inline void pud_populate_safe(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |
106 | { | |
107 | paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); | |
108 | set_pud_safe(pud, __pud(_PAGE_TABLE | __pa(pmd))); | |
109 | } | |
5a5f8f42 JF |
110 | #endif /* CONFIG_X86_PAE */ |
111 | ||
98233368 | 112 | #if CONFIG_PGTABLE_LEVELS > 3 |
f2a6a705 | 113 | static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) |
5a5f8f42 | 114 | { |
2761fa09 | 115 | paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); |
f2a6a705 | 116 | set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud))); |
5a5f8f42 JF |
117 | } |
118 | ||
0a9fe8ca DW |
119 | static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) |
120 | { | |
121 | paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); | |
122 | set_p4d_safe(p4d, __p4d(_PAGE_TABLE | __pa(pud))); | |
123 | } | |
124 | ||
9e1b32ca BH |
125 | extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud); |
126 | ||
127 | static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, | |
128 | unsigned long address) | |
129 | { | |
130 | ___pud_free_tlb(tlb, pud); | |
131 | } | |
132 | ||
f2a6a705 KS |
133 | #if CONFIG_PGTABLE_LEVELS > 4 |
134 | static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) | |
135 | { | |
ed7588d5 | 136 | if (!pgtable_l5_enabled()) |
98219dda | 137 | return; |
f2a6a705 KS |
138 | paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); |
139 | set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); | |
140 | } | |
141 | ||
0a9fe8ca DW |
142 | static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) |
143 | { | |
144 | if (!pgtable_l5_enabled()) | |
145 | return; | |
146 | paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); | |
147 | set_pgd_safe(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); | |
148 | } | |
149 | ||
f2a6a705 KS |
150 | static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr) |
151 | { | |
152 | gfp_t gfp = GFP_KERNEL_ACCOUNT; | |
153 | ||
154 | if (mm == &init_mm) | |
155 | gfp &= ~__GFP_ACCOUNT; | |
156 | return (p4d_t *)get_zeroed_page(gfp); | |
157 | } | |
158 | ||
159 | static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) | |
160 | { | |
0e311d23 AR |
161 | if (!pgtable_l5_enabled()) |
162 | return; | |
163 | ||
f2a6a705 KS |
164 | BUG_ON((unsigned long)p4d & (PAGE_SIZE-1)); |
165 | free_page((unsigned long)p4d); | |
166 | } | |
167 | ||
168 | extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d); | |
169 | ||
170 | static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, | |
171 | unsigned long address) | |
172 | { | |
ed7588d5 | 173 | if (pgtable_l5_enabled()) |
98219dda | 174 | ___p4d_free_tlb(tlb, p4d); |
f2a6a705 KS |
175 | } |
176 | ||
177 | #endif /* CONFIG_PGTABLE_LEVELS > 4 */ | |
98233368 KS |
178 | #endif /* CONFIG_PGTABLE_LEVELS > 3 */ |
179 | #endif /* CONFIG_PGTABLE_LEVELS > 2 */ | |
4f76cd38 | 180 | |
1965aae3 | 181 | #endif /* _ASM_X86_PGALLOC_H */ |