Commit | Line | Data |
---|---|---|
2874c5fd | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
75a9b8a6 AK |
2 | #ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H |
3 | #define _ASM_POWERPC_BOOK3S_64_PGALLOC_H | |
101ad5c6 | 4 | /* |
101ad5c6 AK |
5 | */ |
6 | ||
7 | #include <linux/slab.h> | |
8 | #include <linux/cpumask.h> | |
a984506c | 9 | #include <linux/kmemleak.h> |
101ad5c6 AK |
10 | #include <linux/percpu.h> |
11 | ||
12 | struct vmemmap_backing { | |
13 | struct vmemmap_backing *list; | |
14 | unsigned long phys; | |
15 | unsigned long virt_addr; | |
16 | }; | |
17 | extern struct vmemmap_backing *vmemmap_list; | |
18 | ||
8a6c697b | 19 | extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long); |
8a6c697b | 20 | extern void pmd_fragment_free(unsigned long *); |
934828ed | 21 | extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift); |
934828ed | 22 | extern void __tlb_remove_table(void *_table); |
a95d133c | 23 | void pte_frag_destroy(void *pte_frag); |
934828ed | 24 | |
a2f41eb9 AK |
25 | static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm) |
26 | { | |
27 | #ifdef CONFIG_PPC_64K_PAGES | |
de3b8761 | 28 | return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP)); |
a2f41eb9 AK |
29 | #else |
30 | struct page *page; | |
dcda9b04 | 31 | page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL), |
de3b8761 | 32 | 4); |
a2f41eb9 AK |
33 | if (!page) |
34 | return NULL; | |
35 | return (pgd_t *) page_address(page); | |
36 | #endif | |
37 | } | |
38 | ||
39 | static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd) | |
40 | { | |
41 | #ifdef CONFIG_PPC_64K_PAGES | |
42 | free_page((unsigned long)pgd); | |
43 | #else | |
44 | free_pages((unsigned long)pgd, 4); | |
45 | #endif | |
46 | } | |
47 | ||
101ad5c6 AK |
48 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
49 | { | |
fc5c2f4a AK |
50 | pgd_t *pgd; |
51 | ||
a2f41eb9 AK |
52 | if (radix_enabled()) |
53 | return radix__pgd_alloc(mm); | |
fc5c2f4a AK |
54 | |
55 | pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), | |
56 | pgtable_gfp_flags(mm, GFP_KERNEL)); | |
f3935626 RL |
57 | if (unlikely(!pgd)) |
58 | return pgd; | |
59 | ||
a984506c ME |
60 | /* |
61 | * Don't scan the PGD for pointers, it contains references to PUDs but | |
62 | * those references are not full pointers and so can't be recognised by | |
63 | * kmemleak. | |
64 | */ | |
65 | kmemleak_no_scan(pgd); | |
66 | ||
872a100a AK |
67 | /* |
68 | * With hugetlb, we don't clear the second half of the page table. | |
69 | * If we share the same slab cache with the pmd or pud level table, | |
70 | * we need to make sure we zero out the full table on alloc. | |
71 | * With 4K we don't store slot in the second half. Hence we don't | |
72 | * need to do this for 4k. | |
73 | */ | |
74 | #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \ | |
738f9645 | 75 | (H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX) |
fc5c2f4a | 76 | memset(pgd, 0, PGD_TABLE_SIZE); |
872a100a | 77 | #endif |
fc5c2f4a | 78 | return pgd; |
101ad5c6 AK |
79 | } |
80 | ||
81 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |
82 | { | |
a2f41eb9 AK |
83 | if (radix_enabled()) |
84 | return radix__pgd_free(mm, pgd); | |
101ad5c6 AK |
85 | kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); |
86 | } | |
87 | ||
2fb47060 | 88 | static inline void p4d_populate(struct mm_struct *mm, p4d_t *pgd, pud_t *pud) |
75a9b8a6 | 89 | { |
2fb47060 | 90 | *pgd = __p4d(__pgtable_ptr_val(pud) | PGD_VAL_BITS); |
75a9b8a6 | 91 | } |
101ad5c6 AK |
92 | |
93 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) | |
94 | { | |
a984506c ME |
95 | pud_t *pud; |
96 | ||
97 | pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX), | |
98 | pgtable_gfp_flags(mm, GFP_KERNEL)); | |
99 | /* | |
100 | * Tell kmemleak to ignore the PUD, that means don't scan it for | |
101 | * pointers and don't consider it a leak. PUDs are typically only | |
102 | * referred to by their PGD, but kmemleak is not able to recognise those | |
103 | * as pointers, leading to false leak reports. | |
104 | */ | |
105 | kmemleak_ignore(pud); | |
106 | ||
107 | return pud; | |
101ad5c6 AK |
108 | } |
109 | ||
645d5ce2 AK |
110 | static inline void __pud_free(pud_t *pud) |
111 | { | |
112 | struct page *page = virt_to_page(pud); | |
113 | ||
114 | /* | |
115 | * Early pud pages allocated via memblock allocator | |
a5edf981 NM |
116 | * can't be directly freed to slab. KFENCE pages have |
117 | * both reserved and slab flags set so need to be freed | |
118 | * kmem_cache_free. | |
645d5ce2 | 119 | */ |
a5edf981 | 120 | if (PageReserved(page) && !PageSlab(page)) |
645d5ce2 AK |
121 | free_reserved_page(page); |
122 | else | |
123 | kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud); | |
124 | } | |
125 | ||
101ad5c6 AK |
126 | static inline void pud_free(struct mm_struct *mm, pud_t *pud) |
127 | { | |
645d5ce2 | 128 | return __pud_free(pud); |
101ad5c6 AK |
129 | } |
130 | ||
131 | static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |
132 | { | |
c746ca00 | 133 | *pud = __pud(__pgtable_ptr_val(pmd) | PUD_VAL_BITS); |
101ad5c6 AK |
134 | } |
135 | ||
934828ed | 136 | static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, |
0c4d2680 | 137 | unsigned long address) |
934828ed | 138 | { |
0c4d2680 | 139 | pgtable_free_tlb(tlb, pud, PUD_INDEX); |
934828ed AK |
140 | } |
141 | ||
142 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) | |
143 | { | |
738f9645 | 144 | return pmd_fragment_alloc(mm, addr); |
934828ed AK |
145 | } |
146 | ||
147 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | |
148 | { | |
738f9645 | 149 | pmd_fragment_free((unsigned long *)pmd); |
934828ed AK |
150 | } |
151 | ||
152 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, | |
0c4d2680 | 153 | unsigned long address) |
934828ed | 154 | { |
0c4d2680 | 155 | return pgtable_free_tlb(tlb, pmd, PMD_INDEX); |
934828ed AK |
156 | } |
157 | ||
101ad5c6 AK |
158 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, |
159 | pte_t *pte) | |
160 | { | |
c746ca00 | 161 | *pmd = __pmd(__pgtable_ptr_val(pte) | PMD_VAL_BITS); |
101ad5c6 | 162 | } |
934828ed | 163 | |
101ad5c6 AK |
164 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, |
165 | pgtable_t pte_page) | |
166 | { | |
c746ca00 | 167 | *pmd = __pmd(__pgtable_ptr_val(pte_page) | PMD_VAL_BITS); |
101ad5c6 AK |
168 | } |
169 | ||
101ad5c6 AK |
170 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, |
171 | unsigned long address) | |
172 | { | |
0c4d2680 | 173 | pgtable_free_tlb(tlb, table, PTE_INDEX); |
101ad5c6 | 174 | } |
101ad5c6 | 175 | |
a2dc009a AK |
176 | extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT]; |
177 | static inline void update_page_count(int psize, long count) | |
178 | { | |
179 | if (IS_ENABLED(CONFIG_PROC_FS)) | |
180 | atomic_long_add(count, &direct_pages_count[psize]); | |
181 | } | |
182 | ||
75a9b8a6 | 183 | #endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */ |