Commit | Line | Data |
---|---|---|
399145f9 AK |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * This kernel test validates architecture page table helpers and | |
4 | * accessors and helps in verifying their continued compliance with | |
5 | * expected generic MM semantics. | |
6 | * | |
7 | * Copyright (C) 2019 ARM Ltd. | |
8 | * | |
9 | * Author: Anshuman Khandual <anshuman.khandual@arm.com> | |
10 | */ | |
6315df41 | 11 | #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__ |
399145f9 AK |
12 | |
13 | #include <linux/gfp.h> | |
14 | #include <linux/highmem.h> | |
15 | #include <linux/hugetlb.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/kconfig.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/mman.h> | |
20 | #include <linux/mm_types.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/pfn_t.h> | |
23 | #include <linux/printk.h> | |
a5c3b9ff | 24 | #include <linux/pgtable.h> |
399145f9 AK |
25 | #include <linux/random.h> |
26 | #include <linux/spinlock.h> | |
27 | #include <linux/swap.h> | |
28 | #include <linux/swapops.h> | |
29 | #include <linux/start_kernel.h> | |
30 | #include <linux/sched/mm.h> | |
85a14463 | 31 | #include <linux/io.h> |
8c5b3a8a GS |
32 | |
33 | #include <asm/cacheflush.h> | |
399145f9 | 34 | #include <asm/pgalloc.h> |
a5c3b9ff | 35 | #include <asm/tlbflush.h> |
399145f9 | 36 | |
b1d00007 | 37 | /* |
ee65728e | 38 | * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics |
b1d00007 AK |
39 | * expectations that are being validated here. All future changes in here |
40 | * or the documentation need to be in sync. | |
d7e679b6 | 41 | * |
399145f9 AK |
42 | * On s390 platform, the lower 4 bits are used to identify given page table |
43 | * entry type. But these bits might affect the ability to clear entries with | |
44 | * pxx_clear() because of how dynamic page table folding works on s390. So | |
45 | * while loading up the entries do not change the lower 4 bits. It does not | |
cfc5bbc4 AK |
46 | * have affect any other platform. Also avoid the 62nd bit on ppc64 that is |
47 | * used to mark a pte entry. | |
399145f9 | 48 | */ |
cfc5bbc4 AK |
49 | #define S390_SKIP_MASK GENMASK(3, 0) |
50 | #if __BITS_PER_LONG == 64 | |
51 | #define PPC64_SKIP_MASK GENMASK(62, 62) | |
52 | #else | |
53 | #define PPC64_SKIP_MASK 0x0 | |
54 | #endif | |
55 | #define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK) | |
56 | #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK) | |
399145f9 AK |
57 | #define RANDOM_NZVALUE GENMASK(7, 0) |
58 | ||
3c9b84f0 GS |
59 | struct pgtable_debug_args { |
60 | struct mm_struct *mm; | |
61 | struct vm_area_struct *vma; | |
62 | ||
63 | pgd_t *pgdp; | |
64 | p4d_t *p4dp; | |
65 | pud_t *pudp; | |
66 | pmd_t *pmdp; | |
67 | pte_t *ptep; | |
68 | ||
69 | p4d_t *start_p4dp; | |
70 | pud_t *start_pudp; | |
71 | pmd_t *start_pmdp; | |
72 | pgtable_t start_ptep; | |
73 | ||
74 | unsigned long vaddr; | |
75 | pgprot_t page_prot; | |
76 | pgprot_t page_prot_none; | |
77 | ||
78 | bool is_contiguous_page; | |
79 | unsigned long pud_pfn; | |
80 | unsigned long pmd_pfn; | |
81 | unsigned long pte_pfn; | |
82 | ||
83 | unsigned long fixed_pgd_pfn; | |
84 | unsigned long fixed_p4d_pfn; | |
85 | unsigned long fixed_pud_pfn; | |
86 | unsigned long fixed_pmd_pfn; | |
87 | unsigned long fixed_pte_pfn; | |
88 | }; | |
89 | ||
36b77d1e | 90 | static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx) |
399145f9 | 91 | { |
31d17076 | 92 | pgprot_t prot = vm_get_page_prot(idx); |
36b77d1e | 93 | pte_t pte = pfn_pte(args->fixed_pte_pfn, prot); |
2e326c07 | 94 | unsigned long val = idx, *ptr = &val; |
399145f9 | 95 | |
2e326c07 | 96 | pr_debug("Validating PTE basic (%pGv)\n", ptr); |
bb5c47ce AK |
97 | |
98 | /* | |
99 | * This test needs to be executed after the given page table entry | |
31d17076 | 100 | * is created with pfn_pte() to make sure that vm_get_page_prot(idx) |
bb5c47ce AK |
101 | * does not have the dirty bit enabled from the beginning. This is |
102 | * important for platforms like arm64 where (!PTE_RDONLY) indicate | |
103 | * dirty bit being set. | |
104 | */ | |
105 | WARN_ON(pte_dirty(pte_wrprotect(pte))); | |
106 | ||
399145f9 AK |
107 | WARN_ON(!pte_same(pte, pte)); |
108 | WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte)))); | |
109 | WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte)))); | |
110 | WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte)))); | |
111 | WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte)))); | |
112 | WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte)))); | |
113 | WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte)))); | |
bb5c47ce AK |
114 | WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte)))); |
115 | WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte)))); | |
399145f9 AK |
116 | } |
117 | ||
44966c44 | 118 | static void __init pte_advanced_tests(struct pgtable_debug_args *args) |
a5c3b9ff | 119 | { |
8c5b3a8a | 120 | struct page *page; |
b593b90d | 121 | pte_t pte; |
a5c3b9ff | 122 | |
c3824e18 AK |
123 | /* |
124 | * Architectures optimize set_pte_at by avoiding TLB flush. | |
125 | * This requires set_pte_at to be not used to update an | |
126 | * existing pte entry. Clear pte before we do set_pte_at | |
8c5b3a8a GS |
127 | * |
128 | * flush_dcache_page() is called after set_pte_at() to clear | |
129 | * PG_arch_1 for the page on ARM64. The page flag isn't cleared | |
130 | * when it's released and page allocation check will fail when | |
131 | * the page is allocated again. For architectures other than ARM64, | |
132 | * the unexpected overhead of cache flushing is acceptable. | |
c3824e18 | 133 | */ |
8c5b3a8a GS |
134 | page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; |
135 | if (!page) | |
44966c44 | 136 | return; |
c3824e18 | 137 | |
6315df41 | 138 | pr_debug("Validating PTE advanced\n"); |
44966c44 GS |
139 | pte = pfn_pte(args->pte_pfn, args->page_prot); |
140 | set_pte_at(args->mm, args->vaddr, args->ptep, pte); | |
8c5b3a8a | 141 | flush_dcache_page(page); |
44966c44 GS |
142 | ptep_set_wrprotect(args->mm, args->vaddr, args->ptep); |
143 | pte = ptep_get(args->ptep); | |
a5c3b9ff | 144 | WARN_ON(pte_write(pte)); |
44966c44 GS |
145 | ptep_get_and_clear(args->mm, args->vaddr, args->ptep); |
146 | pte = ptep_get(args->ptep); | |
a5c3b9ff AK |
147 | WARN_ON(!pte_none(pte)); |
148 | ||
44966c44 | 149 | pte = pfn_pte(args->pte_pfn, args->page_prot); |
a5c3b9ff AK |
150 | pte = pte_wrprotect(pte); |
151 | pte = pte_mkclean(pte); | |
44966c44 | 152 | set_pte_at(args->mm, args->vaddr, args->ptep, pte); |
8c5b3a8a | 153 | flush_dcache_page(page); |
a5c3b9ff AK |
154 | pte = pte_mkwrite(pte); |
155 | pte = pte_mkdirty(pte); | |
44966c44 GS |
156 | ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1); |
157 | pte = ptep_get(args->ptep); | |
a5c3b9ff | 158 | WARN_ON(!(pte_write(pte) && pte_dirty(pte))); |
44966c44 GS |
159 | ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1); |
160 | pte = ptep_get(args->ptep); | |
a5c3b9ff AK |
161 | WARN_ON(!pte_none(pte)); |
162 | ||
44966c44 | 163 | pte = pfn_pte(args->pte_pfn, args->page_prot); |
a5c3b9ff | 164 | pte = pte_mkyoung(pte); |
44966c44 | 165 | set_pte_at(args->mm, args->vaddr, args->ptep, pte); |
8c5b3a8a | 166 | flush_dcache_page(page); |
44966c44 GS |
167 | ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep); |
168 | pte = ptep_get(args->ptep); | |
a5c3b9ff | 169 | WARN_ON(pte_young(pte)); |
fb5222aa PT |
170 | |
171 | ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1); | |
a5c3b9ff AK |
172 | } |
173 | ||
399145f9 | 174 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
36b77d1e | 175 | static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) |
399145f9 | 176 | { |
31d17076 | 177 | pgprot_t prot = vm_get_page_prot(idx); |
2e326c07 | 178 | unsigned long val = idx, *ptr = &val; |
65ac1a60 | 179 | pmd_t pmd; |
399145f9 | 180 | |
787d563b AK |
181 | if (!has_transparent_hugepage()) |
182 | return; | |
183 | ||
2e326c07 | 184 | pr_debug("Validating PMD basic (%pGv)\n", ptr); |
36b77d1e | 185 | pmd = pfn_pmd(args->fixed_pmd_pfn, prot); |
bb5c47ce AK |
186 | |
187 | /* | |
188 | * This test needs to be executed after the given page table entry | |
31d17076 | 189 | * is created with pfn_pmd() to make sure that vm_get_page_prot(idx) |
bb5c47ce AK |
190 | * does not have the dirty bit enabled from the beginning. This is |
191 | * important for platforms like arm64 where (!PTE_RDONLY) indicate | |
192 | * dirty bit being set. | |
193 | */ | |
194 | WARN_ON(pmd_dirty(pmd_wrprotect(pmd))); | |
195 | ||
196 | ||
399145f9 AK |
197 | WARN_ON(!pmd_same(pmd, pmd)); |
198 | WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd)))); | |
199 | WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd)))); | |
200 | WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd)))); | |
201 | WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd)))); | |
202 | WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd)))); | |
203 | WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd)))); | |
bb5c47ce AK |
204 | WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd)))); |
205 | WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd)))); | |
399145f9 AK |
206 | /* |
207 | * A huge page does not point to next level page table | |
208 | * entry. Hence this must qualify as pmd_bad(). | |
209 | */ | |
210 | WARN_ON(!pmd_bad(pmd_mkhuge(pmd))); | |
211 | } | |
212 | ||
c0fe07b0 | 213 | static void __init pmd_advanced_tests(struct pgtable_debug_args *args) |
a5c3b9ff | 214 | { |
8c5b3a8a | 215 | struct page *page; |
65ac1a60 | 216 | pmd_t pmd; |
c0fe07b0 | 217 | unsigned long vaddr = args->vaddr; |
a5c3b9ff AK |
218 | |
219 | if (!has_transparent_hugepage()) | |
220 | return; | |
221 | ||
8c5b3a8a GS |
222 | page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL; |
223 | if (!page) | |
c0fe07b0 GS |
224 | return; |
225 | ||
8c5b3a8a GS |
226 | /* |
227 | * flush_dcache_page() is called after set_pmd_at() to clear | |
228 | * PG_arch_1 for the page on ARM64. The page flag isn't cleared | |
229 | * when it's released and page allocation check will fail when | |
230 | * the page is allocated again. For architectures other than ARM64, | |
231 | * the unexpected overhead of cache flushing is acceptable. | |
232 | */ | |
6315df41 | 233 | pr_debug("Validating PMD advanced\n"); |
a5c3b9ff | 234 | /* Align the address wrt HPAGE_PMD_SIZE */ |
04f7ce3f | 235 | vaddr &= HPAGE_PMD_MASK; |
a5c3b9ff | 236 | |
c0fe07b0 | 237 | pgtable_trans_huge_deposit(args->mm, args->pmdp, args->start_ptep); |
87f34986 | 238 | |
c0fe07b0 GS |
239 | pmd = pfn_pmd(args->pmd_pfn, args->page_prot); |
240 | set_pmd_at(args->mm, vaddr, args->pmdp, pmd); | |
8c5b3a8a | 241 | flush_dcache_page(page); |
c0fe07b0 GS |
242 | pmdp_set_wrprotect(args->mm, vaddr, args->pmdp); |
243 | pmd = READ_ONCE(*args->pmdp); | |
a5c3b9ff | 244 | WARN_ON(pmd_write(pmd)); |
c0fe07b0 GS |
245 | pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp); |
246 | pmd = READ_ONCE(*args->pmdp); | |
a5c3b9ff AK |
247 | WARN_ON(!pmd_none(pmd)); |
248 | ||
c0fe07b0 | 249 | pmd = pfn_pmd(args->pmd_pfn, args->page_prot); |
a5c3b9ff AK |
250 | pmd = pmd_wrprotect(pmd); |
251 | pmd = pmd_mkclean(pmd); | |
c0fe07b0 | 252 | set_pmd_at(args->mm, vaddr, args->pmdp, pmd); |
8c5b3a8a | 253 | flush_dcache_page(page); |
a5c3b9ff AK |
254 | pmd = pmd_mkwrite(pmd); |
255 | pmd = pmd_mkdirty(pmd); | |
c0fe07b0 GS |
256 | pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1); |
257 | pmd = READ_ONCE(*args->pmdp); | |
a5c3b9ff | 258 | WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd))); |
c0fe07b0 GS |
259 | pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1); |
260 | pmd = READ_ONCE(*args->pmdp); | |
a5c3b9ff AK |
261 | WARN_ON(!pmd_none(pmd)); |
262 | ||
c0fe07b0 | 263 | pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot)); |
a5c3b9ff | 264 | pmd = pmd_mkyoung(pmd); |
c0fe07b0 | 265 | set_pmd_at(args->mm, vaddr, args->pmdp, pmd); |
8c5b3a8a | 266 | flush_dcache_page(page); |
c0fe07b0 GS |
267 | pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp); |
268 | pmd = READ_ONCE(*args->pmdp); | |
a5c3b9ff | 269 | WARN_ON(pmd_young(pmd)); |
87f34986 | 270 | |
13af0506 | 271 | /* Clear the pte entries */ |
c0fe07b0 GS |
272 | pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp); |
273 | pgtable_trans_huge_withdraw(args->mm, args->pmdp); | |
a5c3b9ff AK |
274 | } |
275 | ||
8983d231 | 276 | static void __init pmd_leaf_tests(struct pgtable_debug_args *args) |
a5c3b9ff | 277 | { |
65ac1a60 AK |
278 | pmd_t pmd; |
279 | ||
280 | if (!has_transparent_hugepage()) | |
281 | return; | |
a5c3b9ff | 282 | |
6315df41 | 283 | pr_debug("Validating PMD leaf\n"); |
8983d231 | 284 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); |
65ac1a60 | 285 | |
a5c3b9ff AK |
286 | /* |
287 | * PMD based THP is a leaf entry. | |
288 | */ | |
289 | pmd = pmd_mkhuge(pmd); | |
290 | WARN_ON(!pmd_leaf(pmd)); | |
291 | } | |
292 | ||
399145f9 | 293 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
36b77d1e | 294 | static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) |
399145f9 | 295 | { |
31d17076 | 296 | pgprot_t prot = vm_get_page_prot(idx); |
2e326c07 | 297 | unsigned long val = idx, *ptr = &val; |
65ac1a60 | 298 | pud_t pud; |
399145f9 | 299 | |
787d563b AK |
300 | if (!has_transparent_hugepage()) |
301 | return; | |
302 | ||
2e326c07 | 303 | pr_debug("Validating PUD basic (%pGv)\n", ptr); |
36b77d1e | 304 | pud = pfn_pud(args->fixed_pud_pfn, prot); |
bb5c47ce AK |
305 | |
306 | /* | |
307 | * This test needs to be executed after the given page table entry | |
31d17076 | 308 | * is created with pfn_pud() to make sure that vm_get_page_prot(idx) |
bb5c47ce AK |
309 | * does not have the dirty bit enabled from the beginning. This is |
310 | * important for platforms like arm64 where (!PTE_RDONLY) indicate | |
311 | * dirty bit being set. | |
312 | */ | |
313 | WARN_ON(pud_dirty(pud_wrprotect(pud))); | |
314 | ||
399145f9 AK |
315 | WARN_ON(!pud_same(pud, pud)); |
316 | WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud)))); | |
bb5c47ce AK |
317 | WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud)))); |
318 | WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud)))); | |
399145f9 AK |
319 | WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud)))); |
320 | WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud)))); | |
321 | WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud)))); | |
bb5c47ce AK |
322 | WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud)))); |
323 | WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud)))); | |
399145f9 | 324 | |
36b77d1e | 325 | if (mm_pmd_folded(args->mm)) |
399145f9 AK |
326 | return; |
327 | ||
328 | /* | |
329 | * A huge page does not point to next level page table | |
330 | * entry. Hence this must qualify as pud_bad(). | |
331 | */ | |
332 | WARN_ON(!pud_bad(pud_mkhuge(pud))); | |
333 | } | |
a5c3b9ff | 334 | |
4cbde03b | 335 | static void __init pud_advanced_tests(struct pgtable_debug_args *args) |
a5c3b9ff | 336 | { |
8c5b3a8a | 337 | struct page *page; |
4cbde03b | 338 | unsigned long vaddr = args->vaddr; |
65ac1a60 | 339 | pud_t pud; |
a5c3b9ff AK |
340 | |
341 | if (!has_transparent_hugepage()) | |
342 | return; | |
343 | ||
8c5b3a8a GS |
344 | page = (args->pud_pfn != ULONG_MAX) ? pfn_to_page(args->pud_pfn) : NULL; |
345 | if (!page) | |
4cbde03b GS |
346 | return; |
347 | ||
8c5b3a8a GS |
348 | /* |
349 | * flush_dcache_page() is called after set_pud_at() to clear | |
350 | * PG_arch_1 for the page on ARM64. The page flag isn't cleared | |
351 | * when it's released and page allocation check will fail when | |
352 | * the page is allocated again. For architectures other than ARM64, | |
353 | * the unexpected overhead of cache flushing is acceptable. | |
354 | */ | |
6315df41 | 355 | pr_debug("Validating PUD advanced\n"); |
a5c3b9ff | 356 | /* Align the address wrt HPAGE_PUD_SIZE */ |
04f7ce3f | 357 | vaddr &= HPAGE_PUD_MASK; |
a5c3b9ff | 358 | |
4cbde03b GS |
359 | pud = pfn_pud(args->pud_pfn, args->page_prot); |
360 | set_pud_at(args->mm, vaddr, args->pudp, pud); | |
8c5b3a8a | 361 | flush_dcache_page(page); |
4cbde03b GS |
362 | pudp_set_wrprotect(args->mm, vaddr, args->pudp); |
363 | pud = READ_ONCE(*args->pudp); | |
a5c3b9ff AK |
364 | WARN_ON(pud_write(pud)); |
365 | ||
366 | #ifndef __PAGETABLE_PMD_FOLDED | |
4cbde03b GS |
367 | pudp_huge_get_and_clear(args->mm, vaddr, args->pudp); |
368 | pud = READ_ONCE(*args->pudp); | |
a5c3b9ff | 369 | WARN_ON(!pud_none(pud)); |
a5c3b9ff | 370 | #endif /* __PAGETABLE_PMD_FOLDED */ |
4cbde03b | 371 | pud = pfn_pud(args->pud_pfn, args->page_prot); |
a5c3b9ff AK |
372 | pud = pud_wrprotect(pud); |
373 | pud = pud_mkclean(pud); | |
4cbde03b | 374 | set_pud_at(args->mm, vaddr, args->pudp, pud); |
8c5b3a8a | 375 | flush_dcache_page(page); |
a5c3b9ff AK |
376 | pud = pud_mkwrite(pud); |
377 | pud = pud_mkdirty(pud); | |
4cbde03b GS |
378 | pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1); |
379 | pud = READ_ONCE(*args->pudp); | |
a5c3b9ff AK |
380 | WARN_ON(!(pud_write(pud) && pud_dirty(pud))); |
381 | ||
c3824e18 | 382 | #ifndef __PAGETABLE_PMD_FOLDED |
4cbde03b GS |
383 | pudp_huge_get_and_clear_full(args->mm, vaddr, args->pudp, 1); |
384 | pud = READ_ONCE(*args->pudp); | |
c3824e18 AK |
385 | WARN_ON(!pud_none(pud)); |
386 | #endif /* __PAGETABLE_PMD_FOLDED */ | |
387 | ||
4cbde03b | 388 | pud = pfn_pud(args->pud_pfn, args->page_prot); |
a5c3b9ff | 389 | pud = pud_mkyoung(pud); |
4cbde03b | 390 | set_pud_at(args->mm, vaddr, args->pudp, pud); |
8c5b3a8a | 391 | flush_dcache_page(page); |
4cbde03b GS |
392 | pudp_test_and_clear_young(args->vma, vaddr, args->pudp); |
393 | pud = READ_ONCE(*args->pudp); | |
a5c3b9ff | 394 | WARN_ON(pud_young(pud)); |
13af0506 | 395 | |
4cbde03b | 396 | pudp_huge_get_and_clear(args->mm, vaddr, args->pudp); |
a5c3b9ff AK |
397 | } |
398 | ||
8983d231 | 399 | static void __init pud_leaf_tests(struct pgtable_debug_args *args) |
a5c3b9ff | 400 | { |
65ac1a60 AK |
401 | pud_t pud; |
402 | ||
403 | if (!has_transparent_hugepage()) | |
404 | return; | |
a5c3b9ff | 405 | |
6315df41 | 406 | pr_debug("Validating PUD leaf\n"); |
8983d231 | 407 | pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); |
a5c3b9ff AK |
408 | /* |
409 | * PUD based THP is a leaf entry. | |
410 | */ | |
411 | pud = pud_mkhuge(pud); | |
412 | WARN_ON(!pud_leaf(pud)); | |
413 | } | |
399145f9 | 414 | #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
36b77d1e | 415 | static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { } |
4cbde03b | 416 | static void __init pud_advanced_tests(struct pgtable_debug_args *args) { } |
8983d231 | 417 | static void __init pud_leaf_tests(struct pgtable_debug_args *args) { } |
399145f9 AK |
418 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
419 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ | |
36b77d1e GS |
420 | static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { } |
421 | static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { } | |
c0fe07b0 | 422 | static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { } |
4cbde03b | 423 | static void __init pud_advanced_tests(struct pgtable_debug_args *args) { } |
8983d231 GS |
424 | static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { } |
425 | static void __init pud_leaf_tests(struct pgtable_debug_args *args) { } | |
5fe77be6 SL |
426 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
427 | ||
428 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP | |
c0fe07b0 | 429 | static void __init pmd_huge_tests(struct pgtable_debug_args *args) |
a5c3b9ff | 430 | { |
5fe77be6 SL |
431 | pmd_t pmd; |
432 | ||
c0fe07b0 | 433 | if (!arch_vmap_pmd_supported(args->page_prot)) |
5fe77be6 SL |
434 | return; |
435 | ||
436 | pr_debug("Validating PMD huge\n"); | |
437 | /* | |
438 | * X86 defined pmd_set_huge() verifies that the given | |
439 | * PMD is not a populated non-leaf entry. | |
440 | */ | |
c0fe07b0 GS |
441 | WRITE_ONCE(*args->pmdp, __pmd(0)); |
442 | WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot)); | |
443 | WARN_ON(!pmd_clear_huge(args->pmdp)); | |
444 | pmd = READ_ONCE(*args->pmdp); | |
5fe77be6 | 445 | WARN_ON(!pmd_none(pmd)); |
a5c3b9ff | 446 | } |
5fe77be6 | 447 | |
4cbde03b | 448 | static void __init pud_huge_tests(struct pgtable_debug_args *args) |
a5c3b9ff | 449 | { |
5fe77be6 SL |
450 | pud_t pud; |
451 | ||
4cbde03b | 452 | if (!arch_vmap_pud_supported(args->page_prot)) |
5fe77be6 SL |
453 | return; |
454 | ||
455 | pr_debug("Validating PUD huge\n"); | |
456 | /* | |
457 | * X86 defined pud_set_huge() verifies that the given | |
458 | * PUD is not a populated non-leaf entry. | |
459 | */ | |
4cbde03b GS |
460 | WRITE_ONCE(*args->pudp, __pud(0)); |
461 | WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot)); | |
462 | WARN_ON(!pud_clear_huge(args->pudp)); | |
463 | pud = READ_ONCE(*args->pudp); | |
5fe77be6 | 464 | WARN_ON(!pud_none(pud)); |
a5c3b9ff | 465 | } |
5fe77be6 | 466 | #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ |
c0fe07b0 | 467 | static void __init pmd_huge_tests(struct pgtable_debug_args *args) { } |
4cbde03b | 468 | static void __init pud_huge_tests(struct pgtable_debug_args *args) { } |
5fe77be6 | 469 | #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ |
399145f9 | 470 | |
36b77d1e | 471 | static void __init p4d_basic_tests(struct pgtable_debug_args *args) |
399145f9 AK |
472 | { |
473 | p4d_t p4d; | |
474 | ||
6315df41 | 475 | pr_debug("Validating P4D basic\n"); |
399145f9 AK |
476 | memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t)); |
477 | WARN_ON(!p4d_same(p4d, p4d)); | |
478 | } | |
479 | ||
36b77d1e | 480 | static void __init pgd_basic_tests(struct pgtable_debug_args *args) |
399145f9 AK |
481 | { |
482 | pgd_t pgd; | |
483 | ||
6315df41 | 484 | pr_debug("Validating PGD basic\n"); |
399145f9 AK |
485 | memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t)); |
486 | WARN_ON(!pgd_same(pgd, pgd)); | |
487 | } | |
488 | ||
489 | #ifndef __PAGETABLE_PUD_FOLDED | |
4cbde03b | 490 | static void __init pud_clear_tests(struct pgtable_debug_args *args) |
399145f9 | 491 | { |
4cbde03b | 492 | pud_t pud = READ_ONCE(*args->pudp); |
399145f9 | 493 | |
4cbde03b | 494 | if (mm_pmd_folded(args->mm)) |
399145f9 AK |
495 | return; |
496 | ||
6315df41 | 497 | pr_debug("Validating PUD clear\n"); |
399145f9 | 498 | pud = __pud(pud_val(pud) | RANDOM_ORVALUE); |
4cbde03b GS |
499 | WRITE_ONCE(*args->pudp, pud); |
500 | pud_clear(args->pudp); | |
501 | pud = READ_ONCE(*args->pudp); | |
399145f9 AK |
502 | WARN_ON(!pud_none(pud)); |
503 | } | |
504 | ||
4cbde03b | 505 | static void __init pud_populate_tests(struct pgtable_debug_args *args) |
399145f9 AK |
506 | { |
507 | pud_t pud; | |
508 | ||
4cbde03b | 509 | if (mm_pmd_folded(args->mm)) |
399145f9 | 510 | return; |
6315df41 AK |
511 | |
512 | pr_debug("Validating PUD populate\n"); | |
399145f9 AK |
513 | /* |
514 | * This entry points to next level page table page. | |
515 | * Hence this must not qualify as pud_bad(). | |
516 | */ | |
4cbde03b GS |
517 | pud_populate(args->mm, args->pudp, args->start_pmdp); |
518 | pud = READ_ONCE(*args->pudp); | |
399145f9 AK |
519 | WARN_ON(pud_bad(pud)); |
520 | } | |
521 | #else /* !__PAGETABLE_PUD_FOLDED */ | |
4cbde03b GS |
522 | static void __init pud_clear_tests(struct pgtable_debug_args *args) { } |
523 | static void __init pud_populate_tests(struct pgtable_debug_args *args) { } | |
399145f9 AK |
524 | #endif /* PAGETABLE_PUD_FOLDED */ |
525 | ||
526 | #ifndef __PAGETABLE_P4D_FOLDED | |
2f87f8c3 | 527 | static void __init p4d_clear_tests(struct pgtable_debug_args *args) |
399145f9 | 528 | { |
2f87f8c3 | 529 | p4d_t p4d = READ_ONCE(*args->p4dp); |
399145f9 | 530 | |
2f87f8c3 | 531 | if (mm_pud_folded(args->mm)) |
399145f9 AK |
532 | return; |
533 | ||
6315df41 | 534 | pr_debug("Validating P4D clear\n"); |
399145f9 | 535 | p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE); |
2f87f8c3 GS |
536 | WRITE_ONCE(*args->p4dp, p4d); |
537 | p4d_clear(args->p4dp); | |
538 | p4d = READ_ONCE(*args->p4dp); | |
399145f9 AK |
539 | WARN_ON(!p4d_none(p4d)); |
540 | } | |
541 | ||
2f87f8c3 | 542 | static void __init p4d_populate_tests(struct pgtable_debug_args *args) |
399145f9 AK |
543 | { |
544 | p4d_t p4d; | |
545 | ||
2f87f8c3 | 546 | if (mm_pud_folded(args->mm)) |
399145f9 AK |
547 | return; |
548 | ||
6315df41 | 549 | pr_debug("Validating P4D populate\n"); |
399145f9 AK |
550 | /* |
551 | * This entry points to next level page table page. | |
552 | * Hence this must not qualify as p4d_bad(). | |
553 | */ | |
2f87f8c3 GS |
554 | pud_clear(args->pudp); |
555 | p4d_clear(args->p4dp); | |
556 | p4d_populate(args->mm, args->p4dp, args->start_pudp); | |
557 | p4d = READ_ONCE(*args->p4dp); | |
399145f9 AK |
558 | WARN_ON(p4d_bad(p4d)); |
559 | } | |
560 | ||
2f87f8c3 | 561 | static void __init pgd_clear_tests(struct pgtable_debug_args *args) |
399145f9 | 562 | { |
2f87f8c3 | 563 | pgd_t pgd = READ_ONCE(*(args->pgdp)); |
399145f9 | 564 | |
2f87f8c3 | 565 | if (mm_p4d_folded(args->mm)) |
399145f9 AK |
566 | return; |
567 | ||
6315df41 | 568 | pr_debug("Validating PGD clear\n"); |
399145f9 | 569 | pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE); |
2f87f8c3 GS |
570 | WRITE_ONCE(*args->pgdp, pgd); |
571 | pgd_clear(args->pgdp); | |
572 | pgd = READ_ONCE(*args->pgdp); | |
399145f9 AK |
573 | WARN_ON(!pgd_none(pgd)); |
574 | } | |
575 | ||
2f87f8c3 | 576 | static void __init pgd_populate_tests(struct pgtable_debug_args *args) |
399145f9 AK |
577 | { |
578 | pgd_t pgd; | |
579 | ||
2f87f8c3 | 580 | if (mm_p4d_folded(args->mm)) |
399145f9 AK |
581 | return; |
582 | ||
6315df41 | 583 | pr_debug("Validating PGD populate\n"); |
399145f9 AK |
584 | /* |
585 | * This entry points to next level page table page. | |
586 | * Hence this must not qualify as pgd_bad(). | |
587 | */ | |
2f87f8c3 GS |
588 | p4d_clear(args->p4dp); |
589 | pgd_clear(args->pgdp); | |
590 | pgd_populate(args->mm, args->pgdp, args->start_p4dp); | |
591 | pgd = READ_ONCE(*args->pgdp); | |
399145f9 AK |
592 | WARN_ON(pgd_bad(pgd)); |
593 | } | |
594 | #else /* !__PAGETABLE_P4D_FOLDED */ | |
2f87f8c3 GS |
595 | static void __init p4d_clear_tests(struct pgtable_debug_args *args) { } |
596 | static void __init pgd_clear_tests(struct pgtable_debug_args *args) { } | |
597 | static void __init p4d_populate_tests(struct pgtable_debug_args *args) { } | |
598 | static void __init pgd_populate_tests(struct pgtable_debug_args *args) { } | |
399145f9 AK |
599 | #endif /* PAGETABLE_P4D_FOLDED */ |
600 | ||
44966c44 | 601 | static void __init pte_clear_tests(struct pgtable_debug_args *args) |
399145f9 | 602 | { |
8c5b3a8a | 603 | struct page *page; |
44966c44 GS |
604 | pte_t pte = pfn_pte(args->pte_pfn, args->page_prot); |
605 | ||
8c5b3a8a GS |
606 | page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; |
607 | if (!page) | |
44966c44 | 608 | return; |
399145f9 | 609 | |
8c5b3a8a GS |
610 | /* |
611 | * flush_dcache_page() is called after set_pte_at() to clear | |
612 | * PG_arch_1 for the page on ARM64. The page flag isn't cleared | |
613 | * when it's released and page allocation check will fail when | |
614 | * the page is allocated again. For architectures other than ARM64, | |
615 | * the unexpected overhead of cache flushing is acceptable. | |
616 | */ | |
6315df41 | 617 | pr_debug("Validating PTE clear\n"); |
401035d5 | 618 | #ifndef CONFIG_RISCV |
399145f9 | 619 | pte = __pte(pte_val(pte) | RANDOM_ORVALUE); |
401035d5 | 620 | #endif |
44966c44 | 621 | set_pte_at(args->mm, args->vaddr, args->ptep, pte); |
8c5b3a8a | 622 | flush_dcache_page(page); |
399145f9 | 623 | barrier(); |
08d5b29e | 624 | ptep_clear(args->mm, args->vaddr, args->ptep); |
44966c44 | 625 | pte = ptep_get(args->ptep); |
399145f9 AK |
626 | WARN_ON(!pte_none(pte)); |
627 | } | |
628 | ||
c0fe07b0 | 629 | static void __init pmd_clear_tests(struct pgtable_debug_args *args) |
399145f9 | 630 | { |
c0fe07b0 | 631 | pmd_t pmd = READ_ONCE(*args->pmdp); |
399145f9 | 632 | |
6315df41 | 633 | pr_debug("Validating PMD clear\n"); |
399145f9 | 634 | pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE); |
c0fe07b0 GS |
635 | WRITE_ONCE(*args->pmdp, pmd); |
636 | pmd_clear(args->pmdp); | |
637 | pmd = READ_ONCE(*args->pmdp); | |
399145f9 AK |
638 | WARN_ON(!pmd_none(pmd)); |
639 | } | |
640 | ||
c0fe07b0 | 641 | static void __init pmd_populate_tests(struct pgtable_debug_args *args) |
399145f9 AK |
642 | { |
643 | pmd_t pmd; | |
644 | ||
6315df41 | 645 | pr_debug("Validating PMD populate\n"); |
399145f9 AK |
646 | /* |
647 | * This entry points to next level page table page. | |
648 | * Hence this must not qualify as pmd_bad(). | |
649 | */ | |
c0fe07b0 GS |
650 | pmd_populate(args->mm, args->pmdp, args->start_ptep); |
651 | pmd = READ_ONCE(*args->pmdp); | |
399145f9 AK |
652 | WARN_ON(pmd_bad(pmd)); |
653 | } | |
654 | ||
8cb183f2 | 655 | static void __init pte_special_tests(struct pgtable_debug_args *args) |
05289402 | 656 | { |
8cb183f2 | 657 | pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); |
05289402 AK |
658 | |
659 | if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) | |
660 | return; | |
661 | ||
6315df41 | 662 | pr_debug("Validating PTE special\n"); |
05289402 AK |
663 | WARN_ON(!pte_special(pte_mkspecial(pte))); |
664 | } | |
665 | ||
8cb183f2 | 666 | static void __init pte_protnone_tests(struct pgtable_debug_args *args) |
05289402 | 667 | { |
8cb183f2 | 668 | pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none); |
05289402 AK |
669 | |
670 | if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) | |
671 | return; | |
672 | ||
6315df41 | 673 | pr_debug("Validating PTE protnone\n"); |
05289402 AK |
674 | WARN_ON(!pte_protnone(pte)); |
675 | WARN_ON(!pte_present(pte)); | |
676 | } | |
677 | ||
678 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
8cb183f2 | 679 | static void __init pmd_protnone_tests(struct pgtable_debug_args *args) |
05289402 | 680 | { |
65ac1a60 | 681 | pmd_t pmd; |
05289402 AK |
682 | |
683 | if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) | |
684 | return; | |
685 | ||
65ac1a60 AK |
686 | if (!has_transparent_hugepage()) |
687 | return; | |
688 | ||
6315df41 | 689 | pr_debug("Validating PMD protnone\n"); |
8cb183f2 | 690 | pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none)); |
05289402 AK |
691 | WARN_ON(!pmd_protnone(pmd)); |
692 | WARN_ON(!pmd_present(pmd)); | |
693 | } | |
694 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ | |
8cb183f2 | 695 | static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { } |
05289402 AK |
696 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
697 | ||
698 | #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP | |
8cb183f2 | 699 | static void __init pte_devmap_tests(struct pgtable_debug_args *args) |
05289402 | 700 | { |
8cb183f2 | 701 | pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); |
05289402 | 702 | |
6315df41 | 703 | pr_debug("Validating PTE devmap\n"); |
05289402 AK |
704 | WARN_ON(!pte_devmap(pte_mkdevmap(pte))); |
705 | } | |
706 | ||
707 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
8cb183f2 | 708 | static void __init pmd_devmap_tests(struct pgtable_debug_args *args) |
05289402 | 709 | { |
65ac1a60 AK |
710 | pmd_t pmd; |
711 | ||
712 | if (!has_transparent_hugepage()) | |
713 | return; | |
05289402 | 714 | |
6315df41 | 715 | pr_debug("Validating PMD devmap\n"); |
8cb183f2 | 716 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); |
05289402 AK |
717 | WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd))); |
718 | } | |
719 | ||
720 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | |
8cb183f2 | 721 | static void __init pud_devmap_tests(struct pgtable_debug_args *args) |
05289402 | 722 | { |
65ac1a60 AK |
723 | pud_t pud; |
724 | ||
725 | if (!has_transparent_hugepage()) | |
726 | return; | |
05289402 | 727 | |
6315df41 | 728 | pr_debug("Validating PUD devmap\n"); |
8cb183f2 | 729 | pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); |
05289402 AK |
730 | WARN_ON(!pud_devmap(pud_mkdevmap(pud))); |
731 | } | |
732 | #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ | |
8cb183f2 | 733 | static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } |
05289402 AK |
734 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
735 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
8cb183f2 GS |
736 | static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { } |
737 | static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } | |
05289402 AK |
738 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
739 | #else | |
8cb183f2 GS |
740 | static void __init pte_devmap_tests(struct pgtable_debug_args *args) { } |
741 | static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { } | |
742 | static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } | |
05289402 AK |
743 | #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */ |
744 | ||
5f447e80 | 745 | static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args) |
05289402 | 746 | { |
5f447e80 | 747 | pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); |
05289402 AK |
748 | |
749 | if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) | |
750 | return; | |
751 | ||
6315df41 | 752 | pr_debug("Validating PTE soft dirty\n"); |
05289402 AK |
753 | WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte))); |
754 | WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte))); | |
755 | } | |
756 | ||
5f447e80 | 757 | static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args) |
05289402 | 758 | { |
5f447e80 | 759 | pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); |
05289402 AK |
760 | |
761 | if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) | |
762 | return; | |
763 | ||
6315df41 | 764 | pr_debug("Validating PTE swap soft dirty\n"); |
05289402 AK |
765 | WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte))); |
766 | WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte))); | |
767 | } | |
768 | ||
769 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
5f447e80 | 770 | static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) |
05289402 | 771 | { |
65ac1a60 | 772 | pmd_t pmd; |
05289402 AK |
773 | |
774 | if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) | |
775 | return; | |
776 | ||
65ac1a60 AK |
777 | if (!has_transparent_hugepage()) |
778 | return; | |
779 | ||
6315df41 | 780 | pr_debug("Validating PMD soft dirty\n"); |
5f447e80 | 781 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); |
05289402 AK |
782 | WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd))); |
783 | WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd))); | |
784 | } | |
785 | ||
5f447e80 | 786 | static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) |
05289402 | 787 | { |
65ac1a60 | 788 | pmd_t pmd; |
05289402 AK |
789 | |
790 | if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) || | |
791 | !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION)) | |
792 | return; | |
793 | ||
65ac1a60 AK |
794 | if (!has_transparent_hugepage()) |
795 | return; | |
796 | ||
6315df41 | 797 | pr_debug("Validating PMD swap soft dirty\n"); |
5f447e80 | 798 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); |
05289402 AK |
799 | WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd))); |
800 | WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd))); | |
801 | } | |
b593b90d | 802 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ |
5f447e80 GS |
803 | static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { } |
804 | static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { } | |
b593b90d | 805 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
05289402 | 806 | |
210d1e8a DH |
807 | static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args) |
808 | { | |
809 | #ifdef __HAVE_ARCH_PTE_SWP_EXCLUSIVE | |
810 | pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); | |
811 | ||
812 | pr_debug("Validating PTE swap exclusive\n"); | |
813 | pte = pte_swp_mkexclusive(pte); | |
814 | WARN_ON(!pte_swp_exclusive(pte)); | |
815 | pte = pte_swp_clear_exclusive(pte); | |
816 | WARN_ON(pte_swp_exclusive(pte)); | |
817 | #endif /* __HAVE_ARCH_PTE_SWP_EXCLUSIVE */ | |
818 | } | |
819 | ||
5f447e80 | 820 | static void __init pte_swap_tests(struct pgtable_debug_args *args) |
05289402 AK |
821 | { |
822 | swp_entry_t swp; | |
823 | pte_t pte; | |
824 | ||
6315df41 | 825 | pr_debug("Validating PTE swap\n"); |
5f447e80 | 826 | pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); |
05289402 AK |
827 | swp = __pte_to_swp_entry(pte); |
828 | pte = __swp_entry_to_pte(swp); | |
5f447e80 | 829 | WARN_ON(args->fixed_pte_pfn != pte_pfn(pte)); |
05289402 AK |
830 | } |
831 | ||
832 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION | |
5f447e80 | 833 | static void __init pmd_swap_tests(struct pgtable_debug_args *args) |
05289402 AK |
834 | { |
835 | swp_entry_t swp; | |
836 | pmd_t pmd; | |
837 | ||
65ac1a60 AK |
838 | if (!has_transparent_hugepage()) |
839 | return; | |
840 | ||
6315df41 | 841 | pr_debug("Validating PMD swap\n"); |
5f447e80 | 842 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); |
05289402 AK |
843 | swp = __pmd_to_swp_entry(pmd); |
844 | pmd = __swp_entry_to_pmd(swp); | |
5f447e80 | 845 | WARN_ON(args->fixed_pmd_pfn != pmd_pfn(pmd)); |
05289402 AK |
846 | } |
847 | #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */ | |
5f447e80 | 848 | static void __init pmd_swap_tests(struct pgtable_debug_args *args) { } |
05289402 AK |
849 | #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ |
850 | ||
4878a888 | 851 | static void __init swap_migration_tests(struct pgtable_debug_args *args) |
05289402 AK |
852 | { |
853 | struct page *page; | |
854 | swp_entry_t swp; | |
855 | ||
856 | if (!IS_ENABLED(CONFIG_MIGRATION)) | |
857 | return; | |
6315df41 | 858 | |
05289402 AK |
859 | /* |
860 | * swap_migration_tests() requires a dedicated page as it needs to | |
861 | * be locked before creating a migration entry from it. Locking the | |
862 | * page that actually maps kernel text ('start_kernel') can be real | |
4878a888 GS |
863 | * problematic. Lets use the allocated page explicitly for this |
864 | * purpose. | |
05289402 | 865 | */ |
4878a888 GS |
866 | page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; |
867 | if (!page) | |
05289402 | 868 | return; |
4878a888 GS |
869 | |
870 | pr_debug("Validating swap migration\n"); | |
05289402 AK |
871 | |
872 | /* | |
23647618 AK |
873 | * make_[readable|writable]_migration_entry() expects given page to |
874 | * be locked, otherwise it stumbles upon a BUG_ON(). | |
05289402 AK |
875 | */ |
876 | __SetPageLocked(page); | |
4dd845b5 | 877 | swp = make_writable_migration_entry(page_to_pfn(page)); |
05289402 | 878 | WARN_ON(!is_migration_entry(swp)); |
4dd845b5 | 879 | WARN_ON(!is_writable_migration_entry(swp)); |
05289402 | 880 | |
4dd845b5 | 881 | swp = make_readable_migration_entry(swp_offset(swp)); |
05289402 | 882 | WARN_ON(!is_migration_entry(swp)); |
4dd845b5 | 883 | WARN_ON(is_writable_migration_entry(swp)); |
05289402 | 884 | |
4dd845b5 | 885 | swp = make_readable_migration_entry(page_to_pfn(page)); |
05289402 | 886 | WARN_ON(!is_migration_entry(swp)); |
4dd845b5 | 887 | WARN_ON(is_writable_migration_entry(swp)); |
05289402 | 888 | __ClearPageLocked(page); |
05289402 AK |
889 | } |
890 | ||
891 | #ifdef CONFIG_HUGETLB_PAGE | |
36b77d1e | 892 | static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) |
05289402 AK |
893 | { |
894 | struct page *page; | |
895 | pte_t pte; | |
896 | ||
6315df41 | 897 | pr_debug("Validating HugeTLB basic\n"); |
05289402 AK |
898 | /* |
899 | * Accessing the page associated with the pfn is safe here, | |
900 | * as it was previously derived from a real kernel symbol. | |
901 | */ | |
36b77d1e GS |
902 | page = pfn_to_page(args->fixed_pmd_pfn); |
903 | pte = mk_huge_pte(page, args->page_prot); | |
05289402 AK |
904 | |
905 | WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte))); | |
906 | WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte)))); | |
907 | WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte)))); | |
908 | ||
909 | #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB | |
36b77d1e | 910 | pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot); |
05289402 AK |
911 | |
912 | WARN_ON(!pte_huge(pte_mkhuge(pte))); | |
913 | #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ | |
914 | } | |
915 | #else /* !CONFIG_HUGETLB_PAGE */ | |
36b77d1e | 916 | static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { } |
05289402 AK |
917 | #endif /* CONFIG_HUGETLB_PAGE */ |
918 | ||
919 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
4878a888 | 920 | static void __init pmd_thp_tests(struct pgtable_debug_args *args) |
05289402 AK |
921 | { |
922 | pmd_t pmd; | |
923 | ||
924 | if (!has_transparent_hugepage()) | |
925 | return; | |
926 | ||
6315df41 | 927 | pr_debug("Validating PMD based THP\n"); |
05289402 AK |
928 | /* |
929 | * pmd_trans_huge() and pmd_present() must return positive after | |
930 | * MMU invalidation with pmd_mkinvalid(). This behavior is an | |
931 | * optimization for transparent huge page. pmd_trans_huge() must | |
932 | * be true if pmd_page() returns a valid THP to avoid taking the | |
933 | * pmd_lock when others walk over non transhuge pmds (i.e. there | |
934 | * are no THP allocated). Especially when splitting a THP and | |
935 | * removing the present bit from the pmd, pmd_trans_huge() still | |
936 | * needs to return true. pmd_present() should be true whenever | |
937 | * pmd_trans_huge() returns true. | |
938 | */ | |
4878a888 | 939 | pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); |
05289402 AK |
940 | WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd))); |
941 | ||
942 | #ifndef __HAVE_ARCH_PMDP_INVALIDATE | |
943 | WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd)))); | |
944 | WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd)))); | |
945 | #endif /* __HAVE_ARCH_PMDP_INVALIDATE */ | |
946 | } | |
947 | ||
948 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | |
4878a888 | 949 | static void __init pud_thp_tests(struct pgtable_debug_args *args) |
05289402 AK |
950 | { |
951 | pud_t pud; | |
952 | ||
953 | if (!has_transparent_hugepage()) | |
954 | return; | |
955 | ||
6315df41 | 956 | pr_debug("Validating PUD based THP\n"); |
4878a888 | 957 | pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); |
05289402 AK |
958 | WARN_ON(!pud_trans_huge(pud_mkhuge(pud))); |
959 | ||
960 | /* | |
961 | * pud_mkinvalid() has been dropped for now. Enable back | |
962 | * these tests when it comes back with a modified pud_present(). | |
963 | * | |
964 | * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud)))); | |
965 | * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud)))); | |
966 | */ | |
967 | } | |
968 | #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ | |
4878a888 | 969 | static void __init pud_thp_tests(struct pgtable_debug_args *args) { } |
05289402 AK |
970 | #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ |
971 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ | |
4878a888 GS |
972 | static void __init pmd_thp_tests(struct pgtable_debug_args *args) { } |
973 | static void __init pud_thp_tests(struct pgtable_debug_args *args) { } | |
05289402 AK |
974 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
975 | ||
399145f9 AK |
976 | static unsigned long __init get_random_vaddr(void) |
977 | { | |
978 | unsigned long random_vaddr, random_pages, total_user_pages; | |
979 | ||
980 | total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE; | |
981 | ||
982 | random_pages = get_random_long() % total_user_pages; | |
983 | random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE; | |
984 | ||
985 | return random_vaddr; | |
986 | } | |
987 | ||
3c9b84f0 GS |
988 | static void __init destroy_args(struct pgtable_debug_args *args) |
989 | { | |
990 | struct page *page = NULL; | |
991 | ||
992 | /* Free (huge) page */ | |
993 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && | |
994 | IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && | |
995 | has_transparent_hugepage() && | |
996 | args->pud_pfn != ULONG_MAX) { | |
997 | if (args->is_contiguous_page) { | |
998 | free_contig_range(args->pud_pfn, | |
999 | (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT))); | |
1000 | } else { | |
1001 | page = pfn_to_page(args->pud_pfn); | |
1002 | __free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT); | |
1003 | } | |
1004 | ||
1005 | args->pud_pfn = ULONG_MAX; | |
1006 | args->pmd_pfn = ULONG_MAX; | |
1007 | args->pte_pfn = ULONG_MAX; | |
1008 | } | |
1009 | ||
1010 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && | |
1011 | has_transparent_hugepage() && | |
1012 | args->pmd_pfn != ULONG_MAX) { | |
1013 | if (args->is_contiguous_page) { | |
1014 | free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER)); | |
1015 | } else { | |
1016 | page = pfn_to_page(args->pmd_pfn); | |
1017 | __free_pages(page, HPAGE_PMD_ORDER); | |
1018 | } | |
1019 | ||
1020 | args->pmd_pfn = ULONG_MAX; | |
1021 | args->pte_pfn = ULONG_MAX; | |
1022 | } | |
1023 | ||
1024 | if (args->pte_pfn != ULONG_MAX) { | |
1025 | page = pfn_to_page(args->pte_pfn); | |
1026 | __free_pages(page, 0); | |
1027 | ||
1028 | args->pte_pfn = ULONG_MAX; | |
1029 | } | |
1030 | ||
1031 | /* Free page table entries */ | |
1032 | if (args->start_ptep) { | |
1033 | pte_free(args->mm, args->start_ptep); | |
1034 | mm_dec_nr_ptes(args->mm); | |
1035 | } | |
1036 | ||
1037 | if (args->start_pmdp) { | |
1038 | pmd_free(args->mm, args->start_pmdp); | |
1039 | mm_dec_nr_pmds(args->mm); | |
1040 | } | |
1041 | ||
1042 | if (args->start_pudp) { | |
1043 | pud_free(args->mm, args->start_pudp); | |
1044 | mm_dec_nr_puds(args->mm); | |
1045 | } | |
1046 | ||
1047 | if (args->start_p4dp) | |
1048 | p4d_free(args->mm, args->start_p4dp); | |
1049 | ||
1050 | /* Free vma and mm struct */ | |
1051 | if (args->vma) | |
1052 | vm_area_free(args->vma); | |
1053 | ||
1054 | if (args->mm) | |
1055 | mmdrop(args->mm); | |
1056 | } | |
1057 | ||
1058 | static struct page * __init | |
1059 | debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order) | |
1060 | { | |
1061 | struct page *page = NULL; | |
1062 | ||
1063 | #ifdef CONFIG_CONTIG_ALLOC | |
1064 | if (order >= MAX_ORDER) { | |
1065 | page = alloc_contig_pages((1 << order), GFP_KERNEL, | |
1066 | first_online_node, NULL); | |
1067 | if (page) { | |
1068 | args->is_contiguous_page = true; | |
1069 | return page; | |
1070 | } | |
1071 | } | |
1072 | #endif | |
1073 | ||
1074 | if (order < MAX_ORDER) | |
1075 | page = alloc_pages(GFP_KERNEL, order); | |
1076 | ||
1077 | return page; | |
1078 | } | |
1079 | ||
1080 | static int __init init_args(struct pgtable_debug_args *args) | |
1081 | { | |
1082 | struct page *page = NULL; | |
1083 | phys_addr_t phys; | |
1084 | int ret = 0; | |
1085 | ||
1086 | /* | |
1087 | * Initialize the debugging data. | |
1088 | * | |
31d17076 AK |
1089 | * vm_get_page_prot(VM_NONE) or vm_get_page_prot(VM_SHARED|VM_NONE) |
1090 | * will help create page table entries with PROT_NONE permission as | |
1091 | * required for pxx_protnone_tests(). | |
3c9b84f0 GS |
1092 | */ |
1093 | memset(args, 0, sizeof(*args)); | |
1094 | args->vaddr = get_random_vaddr(); | |
d7e679b6 | 1095 | args->page_prot = vm_get_page_prot(VM_ACCESS_FLAGS); |
31d17076 | 1096 | args->page_prot_none = vm_get_page_prot(VM_NONE); |
3c9b84f0 GS |
1097 | args->is_contiguous_page = false; |
1098 | args->pud_pfn = ULONG_MAX; | |
1099 | args->pmd_pfn = ULONG_MAX; | |
1100 | args->pte_pfn = ULONG_MAX; | |
1101 | args->fixed_pgd_pfn = ULONG_MAX; | |
1102 | args->fixed_p4d_pfn = ULONG_MAX; | |
1103 | args->fixed_pud_pfn = ULONG_MAX; | |
1104 | args->fixed_pmd_pfn = ULONG_MAX; | |
1105 | args->fixed_pte_pfn = ULONG_MAX; | |
1106 | ||
1107 | /* Allocate mm and vma */ | |
1108 | args->mm = mm_alloc(); | |
1109 | if (!args->mm) { | |
1110 | pr_err("Failed to allocate mm struct\n"); | |
1111 | ret = -ENOMEM; | |
1112 | goto error; | |
1113 | } | |
1114 | ||
1115 | args->vma = vm_area_alloc(args->mm); | |
1116 | if (!args->vma) { | |
1117 | pr_err("Failed to allocate vma\n"); | |
1118 | ret = -ENOMEM; | |
1119 | goto error; | |
1120 | } | |
1121 | ||
1122 | /* | |
1123 | * Allocate page table entries. They will be modified in the tests. | |
1124 | * Lets save the page table entries so that they can be released | |
1125 | * when the tests are completed. | |
1126 | */ | |
1127 | args->pgdp = pgd_offset(args->mm, args->vaddr); | |
1128 | args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr); | |
1129 | if (!args->p4dp) { | |
1130 | pr_err("Failed to allocate p4d entries\n"); | |
1131 | ret = -ENOMEM; | |
1132 | goto error; | |
1133 | } | |
1134 | args->start_p4dp = p4d_offset(args->pgdp, 0UL); | |
1135 | WARN_ON(!args->start_p4dp); | |
1136 | ||
1137 | args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr); | |
1138 | if (!args->pudp) { | |
1139 | pr_err("Failed to allocate pud entries\n"); | |
1140 | ret = -ENOMEM; | |
1141 | goto error; | |
1142 | } | |
1143 | args->start_pudp = pud_offset(args->p4dp, 0UL); | |
1144 | WARN_ON(!args->start_pudp); | |
1145 | ||
1146 | args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr); | |
1147 | if (!args->pmdp) { | |
1148 | pr_err("Failed to allocate pmd entries\n"); | |
1149 | ret = -ENOMEM; | |
1150 | goto error; | |
1151 | } | |
1152 | args->start_pmdp = pmd_offset(args->pudp, 0UL); | |
1153 | WARN_ON(!args->start_pmdp); | |
1154 | ||
1155 | if (pte_alloc(args->mm, args->pmdp)) { | |
1156 | pr_err("Failed to allocate pte entries\n"); | |
1157 | ret = -ENOMEM; | |
1158 | goto error; | |
1159 | } | |
1160 | args->start_ptep = pmd_pgtable(READ_ONCE(*args->pmdp)); | |
1161 | WARN_ON(!args->start_ptep); | |
1162 | ||
1163 | /* | |
1164 | * PFN for mapping at PTE level is determined from a standard kernel | |
1165 | * text symbol. But pfns for higher page table levels are derived by | |
1166 | * masking lower bits of this real pfn. These derived pfns might not | |
1167 | * exist on the platform but that does not really matter as pfn_pxx() | |
1168 | * helpers will still create appropriate entries for the test. This | |
1169 | * helps avoid large memory block allocations to be used for mapping | |
1170 | * at higher page table levels in some of the tests. | |
1171 | */ | |
1172 | phys = __pa_symbol(&start_kernel); | |
1173 | args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK); | |
1174 | args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK); | |
1175 | args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK); | |
1176 | args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK); | |
1177 | args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK); | |
1178 | WARN_ON(!pfn_valid(args->fixed_pte_pfn)); | |
1179 | ||
1180 | /* | |
1181 | * Allocate (huge) pages because some of the tests need to access | |
1182 | * the data in the pages. The corresponding tests will be skipped | |
1183 | * if we fail to allocate (huge) pages. | |
1184 | */ | |
1185 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && | |
1186 | IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && | |
1187 | has_transparent_hugepage()) { | |
1188 | page = debug_vm_pgtable_alloc_huge_page(args, | |
1189 | HPAGE_PUD_SHIFT - PAGE_SHIFT); | |
1190 | if (page) { | |
1191 | args->pud_pfn = page_to_pfn(page); | |
1192 | args->pmd_pfn = args->pud_pfn; | |
1193 | args->pte_pfn = args->pud_pfn; | |
1194 | return 0; | |
1195 | } | |
1196 | } | |
1197 | ||
1198 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && | |
1199 | has_transparent_hugepage()) { | |
1200 | page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER); | |
1201 | if (page) { | |
1202 | args->pmd_pfn = page_to_pfn(page); | |
1203 | args->pte_pfn = args->pmd_pfn; | |
1204 | return 0; | |
1205 | } | |
1206 | } | |
1207 | ||
1208 | page = alloc_pages(GFP_KERNEL, 0); | |
1209 | if (page) | |
1210 | args->pte_pfn = page_to_pfn(page); | |
1211 | ||
1212 | return 0; | |
1213 | ||
1214 | error: | |
1215 | destroy_args(args); | |
1216 | return ret; | |
1217 | } | |
1218 | ||
399145f9 AK |
1219 | static int __init debug_vm_pgtable(void) |
1220 | { | |
3c9b84f0 | 1221 | struct pgtable_debug_args args; |
fea1120c | 1222 | spinlock_t *ptl = NULL; |
3c9b84f0 | 1223 | int idx, ret; |
399145f9 AK |
1224 | |
1225 | pr_info("Validating architecture page table helpers\n"); | |
3c9b84f0 GS |
1226 | ret = init_args(&args); |
1227 | if (ret) | |
1228 | return ret; | |
1229 | ||
2e326c07 | 1230 | /* |
31d17076 | 1231 | * Iterate over each possible vm_flags to make sure that all |
2e326c07 AK |
1232 | * the basic page table transformation validations just hold |
1233 | * true irrespective of the starting protection value for a | |
1234 | * given page table entry. | |
31d17076 AK |
1235 | * |
1236 | * Protection based vm_flags combinatins are always linear | |
1237 | * and increasing i.e starting from VM_NONE and going upto | |
1238 | * (VM_SHARED | READ | WRITE | EXEC). | |
2e326c07 | 1239 | */ |
31d17076 AK |
1240 | #define VM_FLAGS_START (VM_NONE) |
1241 | #define VM_FLAGS_END (VM_SHARED | VM_EXEC | VM_WRITE | VM_READ) | |
1242 | ||
1243 | for (idx = VM_FLAGS_START; idx <= VM_FLAGS_END; idx++) { | |
36b77d1e GS |
1244 | pte_basic_tests(&args, idx); |
1245 | pmd_basic_tests(&args, idx); | |
1246 | pud_basic_tests(&args, idx); | |
2e326c07 AK |
1247 | } |
1248 | ||
1249 | /* | |
1250 | * Both P4D and PGD level tests are very basic which do not | |
1251 | * involve creating page table entries from the protection | |
1252 | * value and the given pfn. Hence just keep them out from | |
1253 | * the above iteration for now to save some test execution | |
1254 | * time. | |
1255 | */ | |
36b77d1e GS |
1256 | p4d_basic_tests(&args); |
1257 | pgd_basic_tests(&args); | |
399145f9 | 1258 | |
8983d231 GS |
1259 | pmd_leaf_tests(&args); |
1260 | pud_leaf_tests(&args); | |
a5c3b9ff | 1261 | |
8cb183f2 GS |
1262 | pte_special_tests(&args); |
1263 | pte_protnone_tests(&args); | |
1264 | pmd_protnone_tests(&args); | |
05289402 | 1265 | |
8cb183f2 GS |
1266 | pte_devmap_tests(&args); |
1267 | pmd_devmap_tests(&args); | |
1268 | pud_devmap_tests(&args); | |
05289402 | 1269 | |
5f447e80 GS |
1270 | pte_soft_dirty_tests(&args); |
1271 | pmd_soft_dirty_tests(&args); | |
1272 | pte_swap_soft_dirty_tests(&args); | |
1273 | pmd_swap_soft_dirty_tests(&args); | |
05289402 | 1274 | |
210d1e8a DH |
1275 | pte_swap_exclusive_tests(&args); |
1276 | ||
5f447e80 GS |
1277 | pte_swap_tests(&args); |
1278 | pmd_swap_tests(&args); | |
05289402 | 1279 | |
4878a888 | 1280 | swap_migration_tests(&args); |
05289402 | 1281 | |
4878a888 GS |
1282 | pmd_thp_tests(&args); |
1283 | pud_thp_tests(&args); | |
05289402 | 1284 | |
36b77d1e | 1285 | hugetlb_basic_tests(&args); |
e8edf0ad | 1286 | |
6f302e27 AK |
1287 | /* |
1288 | * Page table modifying tests. They need to hold | |
1289 | * proper page table lock. | |
1290 | */ | |
e8edf0ad | 1291 | |
44966c44 GS |
1292 | args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl); |
1293 | pte_clear_tests(&args); | |
1294 | pte_advanced_tests(&args); | |
1295 | pte_unmap_unlock(args.ptep, ptl); | |
e8edf0ad | 1296 | |
c0fe07b0 GS |
1297 | ptl = pmd_lock(args.mm, args.pmdp); |
1298 | pmd_clear_tests(&args); | |
1299 | pmd_advanced_tests(&args); | |
1300 | pmd_huge_tests(&args); | |
1301 | pmd_populate_tests(&args); | |
6f302e27 AK |
1302 | spin_unlock(ptl); |
1303 | ||
4cbde03b GS |
1304 | ptl = pud_lock(args.mm, args.pudp); |
1305 | pud_clear_tests(&args); | |
1306 | pud_advanced_tests(&args); | |
1307 | pud_huge_tests(&args); | |
1308 | pud_populate_tests(&args); | |
6f302e27 | 1309 | spin_unlock(ptl); |
e8edf0ad | 1310 | |
2f87f8c3 GS |
1311 | spin_lock(&(args.mm->page_table_lock)); |
1312 | p4d_clear_tests(&args); | |
1313 | pgd_clear_tests(&args); | |
1314 | p4d_populate_tests(&args); | |
1315 | pgd_populate_tests(&args); | |
1316 | spin_unlock(&(args.mm->page_table_lock)); | |
e8edf0ad | 1317 | |
3c9b84f0 | 1318 | destroy_args(&args); |
399145f9 AK |
1319 | return 0; |
1320 | } | |
1321 | late_initcall(debug_vm_pgtable); |