Commit | Line | Data |
---|---|---|
399145f9 AK |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * This kernel test validates architecture page table helpers and | |
4 | * accessors and helps in verifying their continued compliance with | |
5 | * expected generic MM semantics. | |
6 | * | |
7 | * Copyright (C) 2019 ARM Ltd. | |
8 | * | |
9 | * Author: Anshuman Khandual <anshuman.khandual@arm.com> | |
10 | */ | |
11 | #define pr_fmt(fmt) "debug_vm_pgtable: %s: " fmt, __func__ | |
12 | ||
13 | #include <linux/gfp.h> | |
14 | #include <linux/highmem.h> | |
15 | #include <linux/hugetlb.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/kconfig.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/mman.h> | |
20 | #include <linux/mm_types.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/pfn_t.h> | |
23 | #include <linux/printk.h> | |
24 | #include <linux/random.h> | |
25 | #include <linux/spinlock.h> | |
26 | #include <linux/swap.h> | |
27 | #include <linux/swapops.h> | |
28 | #include <linux/start_kernel.h> | |
29 | #include <linux/sched/mm.h> | |
30 | #include <asm/pgalloc.h> | |
399145f9 AK |
31 | |
32 | #define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC) | |
33 | ||
34 | /* | |
35 | * On s390 platform, the lower 4 bits are used to identify given page table | |
36 | * entry type. But these bits might affect the ability to clear entries with | |
37 | * pxx_clear() because of how dynamic page table folding works on s390. So | |
38 | * while loading up the entries do not change the lower 4 bits. It does not | |
39 | * have affect any other platform. | |
40 | */ | |
41 | #define S390_MASK_BITS 4 | |
42 | #define RANDOM_ORVALUE GENMASK(BITS_PER_LONG - 1, S390_MASK_BITS) | |
43 | #define RANDOM_NZVALUE GENMASK(7, 0) | |
44 | ||
45 | static void __init pte_basic_tests(unsigned long pfn, pgprot_t prot) | |
46 | { | |
47 | pte_t pte = pfn_pte(pfn, prot); | |
48 | ||
49 | WARN_ON(!pte_same(pte, pte)); | |
50 | WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte)))); | |
51 | WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte)))); | |
52 | WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte)))); | |
53 | WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte)))); | |
54 | WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte)))); | |
55 | WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte)))); | |
56 | } | |
57 | ||
58 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
59 | static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot) | |
60 | { | |
61 | pmd_t pmd = pfn_pmd(pfn, prot); | |
62 | ||
787d563b AK |
63 | if (!has_transparent_hugepage()) |
64 | return; | |
65 | ||
399145f9 AK |
66 | WARN_ON(!pmd_same(pmd, pmd)); |
67 | WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd)))); | |
68 | WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd)))); | |
69 | WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd)))); | |
70 | WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd)))); | |
71 | WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd)))); | |
72 | WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd)))); | |
73 | /* | |
74 | * A huge page does not point to next level page table | |
75 | * entry. Hence this must qualify as pmd_bad(). | |
76 | */ | |
77 | WARN_ON(!pmd_bad(pmd_mkhuge(pmd))); | |
78 | } | |
79 | ||
80 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | |
81 | static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) | |
82 | { | |
83 | pud_t pud = pfn_pud(pfn, prot); | |
84 | ||
787d563b AK |
85 | if (!has_transparent_hugepage()) |
86 | return; | |
87 | ||
399145f9 AK |
88 | WARN_ON(!pud_same(pud, pud)); |
89 | WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud)))); | |
90 | WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud)))); | |
91 | WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud)))); | |
92 | WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud)))); | |
93 | ||
94 | if (mm_pmd_folded(mm)) | |
95 | return; | |
96 | ||
97 | /* | |
98 | * A huge page does not point to next level page table | |
99 | * entry. Hence this must qualify as pud_bad(). | |
100 | */ | |
101 | WARN_ON(!pud_bad(pud_mkhuge(pud))); | |
102 | } | |
103 | #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ | |
104 | static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { } | |
105 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ | |
106 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ | |
107 | static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot) { } | |
108 | static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { } | |
109 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
110 | ||
111 | static void __init p4d_basic_tests(unsigned long pfn, pgprot_t prot) | |
112 | { | |
113 | p4d_t p4d; | |
114 | ||
115 | memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t)); | |
116 | WARN_ON(!p4d_same(p4d, p4d)); | |
117 | } | |
118 | ||
119 | static void __init pgd_basic_tests(unsigned long pfn, pgprot_t prot) | |
120 | { | |
121 | pgd_t pgd; | |
122 | ||
123 | memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t)); | |
124 | WARN_ON(!pgd_same(pgd, pgd)); | |
125 | } | |
126 | ||
127 | #ifndef __PAGETABLE_PUD_FOLDED | |
128 | static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) | |
129 | { | |
130 | pud_t pud = READ_ONCE(*pudp); | |
131 | ||
132 | if (mm_pmd_folded(mm)) | |
133 | return; | |
134 | ||
135 | pud = __pud(pud_val(pud) | RANDOM_ORVALUE); | |
136 | WRITE_ONCE(*pudp, pud); | |
137 | pud_clear(pudp); | |
138 | pud = READ_ONCE(*pudp); | |
139 | WARN_ON(!pud_none(pud)); | |
140 | } | |
141 | ||
142 | static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp, | |
143 | pmd_t *pmdp) | |
144 | { | |
145 | pud_t pud; | |
146 | ||
147 | if (mm_pmd_folded(mm)) | |
148 | return; | |
149 | /* | |
150 | * This entry points to next level page table page. | |
151 | * Hence this must not qualify as pud_bad(). | |
152 | */ | |
153 | pmd_clear(pmdp); | |
154 | pud_clear(pudp); | |
155 | pud_populate(mm, pudp, pmdp); | |
156 | pud = READ_ONCE(*pudp); | |
157 | WARN_ON(pud_bad(pud)); | |
158 | } | |
159 | #else /* !__PAGETABLE_PUD_FOLDED */ | |
160 | static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { } | |
161 | static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp, | |
162 | pmd_t *pmdp) | |
163 | { | |
164 | } | |
165 | #endif /* PAGETABLE_PUD_FOLDED */ | |
166 | ||
167 | #ifndef __PAGETABLE_P4D_FOLDED | |
168 | static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) | |
169 | { | |
170 | p4d_t p4d = READ_ONCE(*p4dp); | |
171 | ||
172 | if (mm_pud_folded(mm)) | |
173 | return; | |
174 | ||
175 | p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE); | |
176 | WRITE_ONCE(*p4dp, p4d); | |
177 | p4d_clear(p4dp); | |
178 | p4d = READ_ONCE(*p4dp); | |
179 | WARN_ON(!p4d_none(p4d)); | |
180 | } | |
181 | ||
182 | static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp, | |
183 | pud_t *pudp) | |
184 | { | |
185 | p4d_t p4d; | |
186 | ||
187 | if (mm_pud_folded(mm)) | |
188 | return; | |
189 | ||
190 | /* | |
191 | * This entry points to next level page table page. | |
192 | * Hence this must not qualify as p4d_bad(). | |
193 | */ | |
194 | pud_clear(pudp); | |
195 | p4d_clear(p4dp); | |
196 | p4d_populate(mm, p4dp, pudp); | |
197 | p4d = READ_ONCE(*p4dp); | |
198 | WARN_ON(p4d_bad(p4d)); | |
199 | } | |
200 | ||
201 | static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) | |
202 | { | |
203 | pgd_t pgd = READ_ONCE(*pgdp); | |
204 | ||
205 | if (mm_p4d_folded(mm)) | |
206 | return; | |
207 | ||
208 | pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE); | |
209 | WRITE_ONCE(*pgdp, pgd); | |
210 | pgd_clear(pgdp); | |
211 | pgd = READ_ONCE(*pgdp); | |
212 | WARN_ON(!pgd_none(pgd)); | |
213 | } | |
214 | ||
215 | static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp, | |
216 | p4d_t *p4dp) | |
217 | { | |
218 | pgd_t pgd; | |
219 | ||
220 | if (mm_p4d_folded(mm)) | |
221 | return; | |
222 | ||
223 | /* | |
224 | * This entry points to next level page table page. | |
225 | * Hence this must not qualify as pgd_bad(). | |
226 | */ | |
227 | p4d_clear(p4dp); | |
228 | pgd_clear(pgdp); | |
229 | pgd_populate(mm, pgdp, p4dp); | |
230 | pgd = READ_ONCE(*pgdp); | |
231 | WARN_ON(pgd_bad(pgd)); | |
232 | } | |
233 | #else /* !__PAGETABLE_P4D_FOLDED */ | |
234 | static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { } | |
235 | static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { } | |
236 | static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp, | |
237 | pud_t *pudp) | |
238 | { | |
239 | } | |
240 | static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp, | |
241 | p4d_t *p4dp) | |
242 | { | |
243 | } | |
244 | #endif /* PAGETABLE_P4D_FOLDED */ | |
245 | ||
246 | static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep, | |
247 | unsigned long vaddr) | |
248 | { | |
249 | pte_t pte = READ_ONCE(*ptep); | |
250 | ||
251 | pte = __pte(pte_val(pte) | RANDOM_ORVALUE); | |
252 | set_pte_at(mm, vaddr, ptep, pte); | |
253 | barrier(); | |
254 | pte_clear(mm, vaddr, ptep); | |
255 | pte = READ_ONCE(*ptep); | |
256 | WARN_ON(!pte_none(pte)); | |
257 | } | |
258 | ||
259 | static void __init pmd_clear_tests(struct mm_struct *mm, pmd_t *pmdp) | |
260 | { | |
261 | pmd_t pmd = READ_ONCE(*pmdp); | |
262 | ||
263 | pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE); | |
264 | WRITE_ONCE(*pmdp, pmd); | |
265 | pmd_clear(pmdp); | |
266 | pmd = READ_ONCE(*pmdp); | |
267 | WARN_ON(!pmd_none(pmd)); | |
268 | } | |
269 | ||
270 | static void __init pmd_populate_tests(struct mm_struct *mm, pmd_t *pmdp, | |
271 | pgtable_t pgtable) | |
272 | { | |
273 | pmd_t pmd; | |
274 | ||
275 | /* | |
276 | * This entry points to next level page table page. | |
277 | * Hence this must not qualify as pmd_bad(). | |
278 | */ | |
279 | pmd_clear(pmdp); | |
280 | pmd_populate(mm, pmdp, pgtable); | |
281 | pmd = READ_ONCE(*pmdp); | |
282 | WARN_ON(pmd_bad(pmd)); | |
283 | } | |
284 | ||
285 | static unsigned long __init get_random_vaddr(void) | |
286 | { | |
287 | unsigned long random_vaddr, random_pages, total_user_pages; | |
288 | ||
289 | total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE; | |
290 | ||
291 | random_pages = get_random_long() % total_user_pages; | |
292 | random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE; | |
293 | ||
294 | return random_vaddr; | |
295 | } | |
296 | ||
297 | static int __init debug_vm_pgtable(void) | |
298 | { | |
299 | struct mm_struct *mm; | |
300 | pgd_t *pgdp; | |
301 | p4d_t *p4dp, *saved_p4dp; | |
302 | pud_t *pudp, *saved_pudp; | |
303 | pmd_t *pmdp, *saved_pmdp, pmd; | |
304 | pte_t *ptep; | |
305 | pgtable_t saved_ptep; | |
306 | pgprot_t prot; | |
307 | phys_addr_t paddr; | |
308 | unsigned long vaddr, pte_aligned, pmd_aligned; | |
309 | unsigned long pud_aligned, p4d_aligned, pgd_aligned; | |
310 | spinlock_t *uninitialized_var(ptl); | |
311 | ||
312 | pr_info("Validating architecture page table helpers\n"); | |
313 | prot = vm_get_page_prot(VMFLAGS); | |
314 | vaddr = get_random_vaddr(); | |
315 | mm = mm_alloc(); | |
316 | if (!mm) { | |
317 | pr_err("mm_struct allocation failed\n"); | |
318 | return 1; | |
319 | } | |
320 | ||
321 | /* | |
322 | * PFN for mapping at PTE level is determined from a standard kernel | |
323 | * text symbol. But pfns for higher page table levels are derived by | |
324 | * masking lower bits of this real pfn. These derived pfns might not | |
325 | * exist on the platform but that does not really matter as pfn_pxx() | |
326 | * helpers will still create appropriate entries for the test. This | |
327 | * helps avoid large memory block allocations to be used for mapping | |
328 | * at higher page table levels. | |
329 | */ | |
330 | paddr = __pa_symbol(&start_kernel); | |
331 | ||
332 | pte_aligned = (paddr & PAGE_MASK) >> PAGE_SHIFT; | |
333 | pmd_aligned = (paddr & PMD_MASK) >> PAGE_SHIFT; | |
334 | pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT; | |
335 | p4d_aligned = (paddr & P4D_MASK) >> PAGE_SHIFT; | |
336 | pgd_aligned = (paddr & PGDIR_MASK) >> PAGE_SHIFT; | |
337 | WARN_ON(!pfn_valid(pte_aligned)); | |
338 | ||
339 | pgdp = pgd_offset(mm, vaddr); | |
340 | p4dp = p4d_alloc(mm, pgdp, vaddr); | |
341 | pudp = pud_alloc(mm, p4dp, vaddr); | |
342 | pmdp = pmd_alloc(mm, pudp, vaddr); | |
343 | ptep = pte_alloc_map_lock(mm, pmdp, vaddr, &ptl); | |
344 | ||
345 | /* | |
346 | * Save all the page table page addresses as the page table | |
347 | * entries will be used for testing with random or garbage | |
348 | * values. These saved addresses will be used for freeing | |
349 | * page table pages. | |
350 | */ | |
351 | pmd = READ_ONCE(*pmdp); | |
352 | saved_p4dp = p4d_offset(pgdp, 0UL); | |
353 | saved_pudp = pud_offset(p4dp, 0UL); | |
354 | saved_pmdp = pmd_offset(pudp, 0UL); | |
355 | saved_ptep = pmd_pgtable(pmd); | |
356 | ||
357 | pte_basic_tests(pte_aligned, prot); | |
358 | pmd_basic_tests(pmd_aligned, prot); | |
359 | pud_basic_tests(pud_aligned, prot); | |
360 | p4d_basic_tests(p4d_aligned, prot); | |
361 | pgd_basic_tests(pgd_aligned, prot); | |
362 | ||
363 | pte_clear_tests(mm, ptep, vaddr); | |
364 | pmd_clear_tests(mm, pmdp); | |
365 | pud_clear_tests(mm, pudp); | |
366 | p4d_clear_tests(mm, p4dp); | |
367 | pgd_clear_tests(mm, pgdp); | |
368 | ||
369 | pte_unmap_unlock(ptep, ptl); | |
370 | ||
371 | pmd_populate_tests(mm, pmdp, saved_ptep); | |
372 | pud_populate_tests(mm, pudp, saved_pmdp); | |
373 | p4d_populate_tests(mm, p4dp, saved_pudp); | |
374 | pgd_populate_tests(mm, pgdp, saved_p4dp); | |
375 | ||
376 | p4d_free(mm, saved_p4dp); | |
377 | pud_free(mm, saved_pudp); | |
378 | pmd_free(mm, saved_pmdp); | |
379 | pte_free(mm, saved_ptep); | |
380 | ||
381 | mm_dec_nr_puds(mm); | |
382 | mm_dec_nr_pmds(mm); | |
383 | mm_dec_nr_ptes(mm); | |
384 | mmdrop(mm); | |
385 | return 0; | |
386 | } | |
387 | late_initcall(debug_vm_pgtable); |