Commit | Line | Data |
---|---|---|
1802d0be | 1 | // SPDX-License-Identifier: GPL-2.0-only |
084bd298 SC |
2 | /* |
3 | * arch/arm64/mm/hugetlbpage.c | |
4 | * | |
5 | * Copyright (C) 2013 Linaro Ltd. | |
6 | * | |
7 | * Based on arch/x86/mm/hugetlbpage.c. | |
084bd298 SC |
8 | */ |
9 | ||
10 | #include <linux/init.h> | |
11 | #include <linux/fs.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/hugetlb.h> | |
14 | #include <linux/pagemap.h> | |
15 | #include <linux/err.h> | |
16 | #include <linux/sysctl.h> | |
17 | #include <asm/mman.h> | |
18 | #include <asm/tlb.h> | |
19 | #include <asm/tlbflush.h> | |
084bd298 | 20 | |
abb7962a AK |
21 | /* |
22 | * HugeTLB Support Matrix | |
23 | * | |
24 | * --------------------------------------------------- | |
25 | * | Page Size | CONT PTE | PMD | CONT PMD | PUD | | |
26 | * --------------------------------------------------- | |
27 | * | 4K | 64K | 2M | 32M | 1G | | |
28 | * | 16K | 2M | 32M | 1G | | | |
29 | * | 64K | 2M | 512M | 16G | | | |
30 | * --------------------------------------------------- | |
31 | */ | |
32 | ||
33 | /* | |
34 | * Reserve CMA areas for the largest supported gigantic | |
35 | * huge page when requested. Any other smaller gigantic | |
36 | * huge pages could still be served from those areas. | |
37 | */ | |
38 | #ifdef CONFIG_CMA | |
39 | void __init arm64_hugetlb_cma_reserve(void) | |
40 | { | |
41 | int order; | |
42 | ||
f8b46c4b AK |
43 | if (pud_sect_supported()) |
44 | order = PUD_SHIFT - PAGE_SHIFT; | |
45 | else | |
e6359798 WD |
46 | order = CONT_PMD_SHIFT - PAGE_SHIFT; |
47 | ||
abb7962a AK |
48 | /* |
49 | * HugeTLB CMA reservation is required for gigantic | |
50 | * huge pages which could not be allocated via the | |
51 | * page allocator. Just warn if there is any change | |
52 | * breaking this assumption. | |
53 | */ | |
54 | WARN_ON(order <= MAX_ORDER); | |
55 | hugetlb_cma_reserve(order); | |
56 | } | |
57 | #endif /* CONFIG_CMA */ | |
58 | ||
a8a733b2 | 59 | static bool __hugetlb_valid_size(unsigned long size) |
5480280d | 60 | { |
a8a733b2 | 61 | switch (size) { |
f8b46c4b | 62 | #ifndef __PAGETABLE_PMD_FOLDED |
5480280d | 63 | case PUD_SIZE: |
f8b46c4b | 64 | return pud_sect_supported(); |
5480280d | 65 | #endif |
5480280d | 66 | case CONT_PMD_SIZE: |
a8a733b2 | 67 | case PMD_SIZE: |
5480280d AK |
68 | case CONT_PTE_SIZE: |
69 | return true; | |
70 | } | |
a8a733b2 | 71 | |
5480280d AK |
72 | return false; |
73 | } | |
a8a733b2 AK |
74 | |
75 | #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION | |
76 | bool arch_hugetlb_migration_supported(struct hstate *h) | |
77 | { | |
78 | size_t pagesize = huge_page_size(h); | |
79 | ||
80 | if (!__hugetlb_valid_size(pagesize)) { | |
81 | pr_warn("%s: unrecognized huge page size 0x%lx\n", | |
82 | __func__, pagesize); | |
83 | return false; | |
84 | } | |
85 | return true; | |
86 | } | |
5480280d AK |
87 | #endif |
88 | ||
084bd298 SC |
89 | int pmd_huge(pmd_t pmd) |
90 | { | |
fd28f5d4 | 91 | return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); |
084bd298 SC |
92 | } |
93 | ||
94 | int pud_huge(pud_t pud) | |
95 | { | |
4797ec2d | 96 | #ifndef __PAGETABLE_PMD_FOLDED |
fd28f5d4 | 97 | return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT); |
4797ec2d MS |
98 | #else |
99 | return 0; | |
100 | #endif | |
084bd298 SC |
101 | } |
102 | ||
66b3923a | 103 | static int find_num_contig(struct mm_struct *mm, unsigned long addr, |
bb9dd3df | 104 | pte_t *ptep, size_t *pgsize) |
66b3923a | 105 | { |
20a004e7 | 106 | pgd_t *pgdp = pgd_offset(mm, addr); |
e9f63768 | 107 | p4d_t *p4dp; |
20a004e7 WD |
108 | pud_t *pudp; |
109 | pmd_t *pmdp; | |
66b3923a DW |
110 | |
111 | *pgsize = PAGE_SIZE; | |
e9f63768 MR |
112 | p4dp = p4d_offset(pgdp, addr); |
113 | pudp = pud_offset(p4dp, addr); | |
20a004e7 WD |
114 | pmdp = pmd_offset(pudp, addr); |
115 | if ((pte_t *)pmdp == ptep) { | |
66b3923a DW |
116 | *pgsize = PMD_SIZE; |
117 | return CONT_PMDS; | |
118 | } | |
119 | return CONT_PTES; | |
120 | } | |
121 | ||
c3e4ed5c PA |
122 | static inline int num_contig_ptes(unsigned long size, size_t *pgsize) |
123 | { | |
124 | int contig_ptes = 0; | |
125 | ||
126 | *pgsize = size; | |
127 | ||
128 | switch (size) { | |
f8b46c4b | 129 | #ifndef __PAGETABLE_PMD_FOLDED |
c3e4ed5c | 130 | case PUD_SIZE: |
f8b46c4b AK |
131 | if (pud_sect_supported()) |
132 | contig_ptes = 1; | |
133 | break; | |
c3e4ed5c PA |
134 | #endif |
135 | case PMD_SIZE: | |
136 | contig_ptes = 1; | |
137 | break; | |
138 | case CONT_PMD_SIZE: | |
139 | *pgsize = PMD_SIZE; | |
140 | contig_ptes = CONT_PMDS; | |
141 | break; | |
142 | case CONT_PTE_SIZE: | |
143 | *pgsize = PAGE_SIZE; | |
144 | contig_ptes = CONT_PTES; | |
145 | break; | |
146 | } | |
147 | ||
148 | return contig_ptes; | |
149 | } | |
150 | ||
bc5dfb4f BW |
151 | pte_t huge_ptep_get(pte_t *ptep) |
152 | { | |
153 | int ncontig, i; | |
154 | size_t pgsize; | |
155 | pte_t orig_pte = ptep_get(ptep); | |
156 | ||
157 | if (!pte_present(orig_pte) || !pte_cont(orig_pte)) | |
158 | return orig_pte; | |
159 | ||
160 | ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize); | |
161 | for (i = 0; i < ncontig; i++, ptep++) { | |
162 | pte_t pte = ptep_get(ptep); | |
163 | ||
164 | if (pte_dirty(pte)) | |
165 | orig_pte = pte_mkdirty(orig_pte); | |
166 | ||
167 | if (pte_young(pte)) | |
168 | orig_pte = pte_mkyoung(orig_pte); | |
169 | } | |
170 | return orig_pte; | |
171 | } | |
172 | ||
d8bdcff2 SC |
173 | /* |
174 | * Changing some bits of contiguous entries requires us to follow a | |
175 | * Break-Before-Make approach, breaking the whole contiguous set | |
176 | * before we can change any entries. See ARM DDI 0487A.k_iss10775, | |
177 | * "Misprogramming of the Contiguous bit", page D4-1762. | |
178 | * | |
179 | * This helper performs the break step. | |
180 | */ | |
fb396bb4 | 181 | static pte_t get_clear_contig(struct mm_struct *mm, |
d8bdcff2 SC |
182 | unsigned long addr, |
183 | pte_t *ptep, | |
184 | unsigned long pgsize, | |
185 | unsigned long ncontig) | |
186 | { | |
f0d9d79e | 187 | pte_t orig_pte = ptep_get(ptep); |
fb396bb4 | 188 | unsigned long i; |
d8bdcff2 SC |
189 | |
190 | for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { | |
191 | pte_t pte = ptep_get_and_clear(mm, addr, ptep); | |
192 | ||
193 | /* | |
194 | * If HW_AFDBM is enabled, then the HW could turn on | |
469ed9d8 SC |
195 | * the dirty or accessed bit for any page in the set, |
196 | * so check them all. | |
d8bdcff2 SC |
197 | */ |
198 | if (pte_dirty(pte)) | |
199 | orig_pte = pte_mkdirty(orig_pte); | |
469ed9d8 SC |
200 | |
201 | if (pte_young(pte)) | |
202 | orig_pte = pte_mkyoung(orig_pte); | |
d8bdcff2 | 203 | } |
d8bdcff2 SC |
204 | return orig_pte; |
205 | } | |
206 | ||
41098230 WD |
207 | static pte_t get_clear_contig_flush(struct mm_struct *mm, |
208 | unsigned long addr, | |
209 | pte_t *ptep, | |
210 | unsigned long pgsize, | |
211 | unsigned long ncontig) | |
212 | { | |
213 | pte_t orig_pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig); | |
214 | struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); | |
215 | ||
216 | flush_tlb_range(&vma, addr, addr + (pgsize * ncontig)); | |
217 | return orig_pte; | |
218 | } | |
219 | ||
d8bdcff2 SC |
220 | /* |
221 | * Changing some bits of contiguous entries requires us to follow a | |
222 | * Break-Before-Make approach, breaking the whole contiguous set | |
223 | * before we can change any entries. See ARM DDI 0487A.k_iss10775, | |
224 | * "Misprogramming of the Contiguous bit", page D4-1762. | |
225 | * | |
226 | * This helper performs the break step for use cases where the | |
227 | * original pte is not needed. | |
228 | */ | |
229 | static void clear_flush(struct mm_struct *mm, | |
230 | unsigned long addr, | |
231 | pte_t *ptep, | |
232 | unsigned long pgsize, | |
233 | unsigned long ncontig) | |
234 | { | |
8b11ec1b | 235 | struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); |
d8bdcff2 SC |
236 | unsigned long i, saddr = addr; |
237 | ||
238 | for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) | |
00de2c9f | 239 | ptep_clear(mm, addr, ptep); |
d8bdcff2 SC |
240 | |
241 | flush_tlb_range(&vma, saddr, addr); | |
242 | } | |
243 | ||
66b3923a | 244 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, |
935d4f0c | 245 | pte_t *ptep, pte_t pte, unsigned long sz) |
66b3923a DW |
246 | { |
247 | size_t pgsize; | |
248 | int i; | |
bb9dd3df | 249 | int ncontig; |
29a7287d | 250 | unsigned long pfn, dpfn; |
66b3923a DW |
251 | pgprot_t hugeprot; |
252 | ||
6f1bace9 | 253 | ncontig = num_contig_ptes(sz, &pgsize); |
18f39629 | 254 | |
6f1bace9 RR |
255 | if (!pte_present(pte)) { |
256 | for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) | |
18f39629 QZ |
257 | set_pte_at(mm, addr, ptep, pte); |
258 | return; | |
259 | } | |
d3ea7952 | 260 | |
bb9dd3df | 261 | if (!pte_cont(pte)) { |
66b3923a DW |
262 | set_pte_at(mm, addr, ptep, pte); |
263 | return; | |
264 | } | |
265 | ||
266 | pfn = pte_pfn(pte); | |
29a7287d | 267 | dpfn = pgsize >> PAGE_SHIFT; |
b5b0be86 | 268 | hugeprot = pte_pgprot(pte); |
29a7287d | 269 | |
d8bdcff2 SC |
270 | clear_flush(mm, addr, ptep, pgsize, ncontig); |
271 | ||
20a004e7 | 272 | for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) |
66b3923a | 273 | set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); |
66b3923a DW |
274 | } |
275 | ||
aec44e0f | 276 | pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, |
66b3923a DW |
277 | unsigned long addr, unsigned long sz) |
278 | { | |
20a004e7 | 279 | pgd_t *pgdp; |
e9f63768 | 280 | p4d_t *p4dp; |
20a004e7 WD |
281 | pud_t *pudp; |
282 | pmd_t *pmdp; | |
283 | pte_t *ptep = NULL; | |
284 | ||
285 | pgdp = pgd_offset(mm, addr); | |
e9f63768 MR |
286 | p4dp = p4d_offset(pgdp, addr); |
287 | pudp = pud_alloc(mm, p4dp, addr); | |
20a004e7 | 288 | if (!pudp) |
66b3923a DW |
289 | return NULL; |
290 | ||
291 | if (sz == PUD_SIZE) { | |
20a004e7 | 292 | ptep = (pte_t *)pudp; |
441a6278 | 293 | } else if (sz == (CONT_PTE_SIZE)) { |
20a004e7 | 294 | pmdp = pmd_alloc(mm, pudp, addr); |
027d0c71 MR |
295 | if (!pmdp) |
296 | return NULL; | |
66b3923a DW |
297 | |
298 | WARN_ON(addr & (sz - 1)); | |
cafcb9ca | 299 | ptep = pte_alloc_huge(mm, pmdp, addr); |
66b3923a | 300 | } else if (sz == PMD_SIZE) { |
c1991e07 | 301 | if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp))) |
aec44e0f | 302 | ptep = huge_pmd_share(mm, vma, addr, pudp); |
66b3923a | 303 | else |
20a004e7 | 304 | ptep = (pte_t *)pmd_alloc(mm, pudp, addr); |
441a6278 | 305 | } else if (sz == (CONT_PMD_SIZE)) { |
20a004e7 | 306 | pmdp = pmd_alloc(mm, pudp, addr); |
66b3923a | 307 | WARN_ON(addr & (sz - 1)); |
20a004e7 | 308 | return (pte_t *)pmdp; |
66b3923a DW |
309 | } |
310 | ||
20a004e7 | 311 | return ptep; |
66b3923a DW |
312 | } |
313 | ||
7868a208 PA |
314 | pte_t *huge_pte_offset(struct mm_struct *mm, |
315 | unsigned long addr, unsigned long sz) | |
66b3923a | 316 | { |
20a004e7 | 317 | pgd_t *pgdp; |
e9f63768 | 318 | p4d_t *p4dp; |
20a004e7 WD |
319 | pud_t *pudp, pud; |
320 | pmd_t *pmdp, pmd; | |
66b3923a | 321 | |
20a004e7 WD |
322 | pgdp = pgd_offset(mm, addr); |
323 | if (!pgd_present(READ_ONCE(*pgdp))) | |
66b3923a | 324 | return NULL; |
f02ab08a | 325 | |
e9f63768 MR |
326 | p4dp = p4d_offset(pgdp, addr); |
327 | if (!p4d_present(READ_ONCE(*p4dp))) | |
328 | return NULL; | |
329 | ||
330 | pudp = pud_offset(p4dp, addr); | |
20a004e7 WD |
331 | pud = READ_ONCE(*pudp); |
332 | if (sz != PUD_SIZE && pud_none(pud)) | |
66b3923a | 333 | return NULL; |
30f3ac00 | 334 | /* hugepage or swap? */ |
20a004e7 WD |
335 | if (pud_huge(pud) || !pud_present(pud)) |
336 | return (pte_t *)pudp; | |
f02ab08a PA |
337 | /* table; check the next level */ |
338 | ||
30f3ac00 PA |
339 | if (sz == CONT_PMD_SIZE) |
340 | addr &= CONT_PMD_MASK; | |
341 | ||
20a004e7 WD |
342 | pmdp = pmd_offset(pudp, addr); |
343 | pmd = READ_ONCE(*pmdp); | |
30f3ac00 | 344 | if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && |
20a004e7 | 345 | pmd_none(pmd)) |
66b3923a | 346 | return NULL; |
20a004e7 WD |
347 | if (pmd_huge(pmd) || !pmd_present(pmd)) |
348 | return (pte_t *)pmdp; | |
f02ab08a | 349 | |
20a004e7 | 350 | if (sz == CONT_PTE_SIZE) |
cafcb9ca | 351 | return pte_offset_huge(pmdp, (addr & CONT_PTE_MASK)); |
30f3ac00 | 352 | |
66b3923a DW |
353 | return NULL; |
354 | } | |
355 | ||
1bcdb769 BW |
356 | unsigned long hugetlb_mask_last_page(struct hstate *h) |
357 | { | |
358 | unsigned long hp_size = huge_page_size(h); | |
359 | ||
360 | switch (hp_size) { | |
361 | #ifndef __PAGETABLE_PMD_FOLDED | |
362 | case PUD_SIZE: | |
363 | return PGDIR_SIZE - PUD_SIZE; | |
364 | #endif | |
365 | case CONT_PMD_SIZE: | |
366 | return PUD_SIZE - CONT_PMD_SIZE; | |
367 | case PMD_SIZE: | |
368 | return PUD_SIZE - PMD_SIZE; | |
369 | case CONT_PTE_SIZE: | |
370 | return PMD_SIZE - CONT_PTE_SIZE; | |
371 | default: | |
372 | break; | |
373 | } | |
374 | ||
375 | return 0UL; | |
376 | } | |
377 | ||
79c1c594 | 378 | pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) |
66b3923a | 379 | { |
79c1c594 | 380 | size_t pagesize = 1UL << shift; |
66b3923a | 381 | |
16785bd7 | 382 | entry = pte_mkhuge(entry); |
66b3923a DW |
383 | if (pagesize == CONT_PTE_SIZE) { |
384 | entry = pte_mkcont(entry); | |
385 | } else if (pagesize == CONT_PMD_SIZE) { | |
386 | entry = pmd_pte(pmd_mkcont(pte_pmd(entry))); | |
387 | } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { | |
388 | pr_warn("%s: unrecognized huge page size 0x%lx\n", | |
389 | __func__, pagesize); | |
390 | } | |
391 | return entry; | |
392 | } | |
393 | ||
c3e4ed5c PA |
394 | void huge_pte_clear(struct mm_struct *mm, unsigned long addr, |
395 | pte_t *ptep, unsigned long sz) | |
396 | { | |
397 | int i, ncontig; | |
398 | size_t pgsize; | |
399 | ||
400 | ncontig = num_contig_ptes(sz, &pgsize); | |
401 | ||
402 | for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) | |
403 | pte_clear(mm, addr, ptep); | |
404 | } | |
405 | ||
66b3923a DW |
406 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, |
407 | unsigned long addr, pte_t *ptep) | |
408 | { | |
d8bdcff2 | 409 | int ncontig; |
29a7287d | 410 | size_t pgsize; |
f0d9d79e | 411 | pte_t orig_pte = ptep_get(ptep); |
29a7287d SC |
412 | |
413 | if (!pte_cont(orig_pte)) | |
66b3923a | 414 | return ptep_get_and_clear(mm, addr, ptep); |
29a7287d SC |
415 | |
416 | ncontig = find_num_contig(mm, addr, ptep, &pgsize); | |
29a7287d | 417 | |
fb396bb4 | 418 | return get_clear_contig(mm, addr, ptep, pgsize, ncontig); |
66b3923a DW |
419 | } |
420 | ||
031e6e6b SC |
421 | /* |
422 | * huge_ptep_set_access_flags will update access flags (dirty, accesssed) | |
423 | * and write permission. | |
424 | * | |
425 | * For a contiguous huge pte range we need to check whether or not write | |
426 | * permission has to change only on the first pte in the set. Then for | |
427 | * all the contiguous ptes we need to check whether or not there is a | |
428 | * discrepancy between dirty or young. | |
429 | */ | |
430 | static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig) | |
431 | { | |
432 | int i; | |
433 | ||
f0d9d79e | 434 | if (pte_write(pte) != pte_write(ptep_get(ptep))) |
031e6e6b SC |
435 | return 1; |
436 | ||
437 | for (i = 0; i < ncontig; i++) { | |
f0d9d79e | 438 | pte_t orig_pte = ptep_get(ptep + i); |
031e6e6b SC |
439 | |
440 | if (pte_dirty(pte) != pte_dirty(orig_pte)) | |
441 | return 1; | |
442 | ||
443 | if (pte_young(pte) != pte_young(orig_pte)) | |
444 | return 1; | |
445 | } | |
446 | ||
447 | return 0; | |
448 | } | |
449 | ||
66b3923a DW |
450 | int huge_ptep_set_access_flags(struct vm_area_struct *vma, |
451 | unsigned long addr, pte_t *ptep, | |
452 | pte_t pte, int dirty) | |
453 | { | |
031e6e6b | 454 | int ncontig, i; |
29a7287d SC |
455 | size_t pgsize = 0; |
456 | unsigned long pfn = pte_pfn(pte), dpfn; | |
41098230 | 457 | struct mm_struct *mm = vma->vm_mm; |
29a7287d | 458 | pgprot_t hugeprot; |
d8bdcff2 | 459 | pte_t orig_pte; |
29a7287d SC |
460 | |
461 | if (!pte_cont(pte)) | |
66b3923a | 462 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); |
29a7287d | 463 | |
41098230 | 464 | ncontig = find_num_contig(mm, addr, ptep, &pgsize); |
29a7287d | 465 | dpfn = pgsize >> PAGE_SHIFT; |
29a7287d | 466 | |
031e6e6b SC |
467 | if (!__cont_access_flags_changed(ptep, pte, ncontig)) |
468 | return 0; | |
469 | ||
41098230 | 470 | orig_pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig); |
d8bdcff2 | 471 | |
469ed9d8 | 472 | /* Make sure we don't lose the dirty or young state */ |
d8bdcff2 SC |
473 | if (pte_dirty(orig_pte)) |
474 | pte = pte_mkdirty(pte); | |
475 | ||
469ed9d8 SC |
476 | if (pte_young(orig_pte)) |
477 | pte = pte_mkyoung(pte); | |
478 | ||
d8bdcff2 SC |
479 | hugeprot = pte_pgprot(pte); |
480 | for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) | |
41098230 | 481 | set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); |
29a7287d | 482 | |
031e6e6b | 483 | return 1; |
66b3923a DW |
484 | } |
485 | ||
486 | void huge_ptep_set_wrprotect(struct mm_struct *mm, | |
487 | unsigned long addr, pte_t *ptep) | |
488 | { | |
d8bdcff2 SC |
489 | unsigned long pfn, dpfn; |
490 | pgprot_t hugeprot; | |
29a7287d SC |
491 | int ncontig, i; |
492 | size_t pgsize; | |
d8bdcff2 | 493 | pte_t pte; |
66b3923a | 494 | |
20a004e7 | 495 | if (!pte_cont(READ_ONCE(*ptep))) { |
66b3923a | 496 | ptep_set_wrprotect(mm, addr, ptep); |
29a7287d | 497 | return; |
66b3923a | 498 | } |
29a7287d SC |
499 | |
500 | ncontig = find_num_contig(mm, addr, ptep, &pgsize); | |
d8bdcff2 SC |
501 | dpfn = pgsize >> PAGE_SHIFT; |
502 | ||
41098230 | 503 | pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig); |
d8bdcff2 SC |
504 | pte = pte_wrprotect(pte); |
505 | ||
506 | hugeprot = pte_pgprot(pte); | |
507 | pfn = pte_pfn(pte); | |
508 | ||
509 | for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) | |
510 | set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); | |
66b3923a DW |
511 | } |
512 | ||
ae075629 BW |
513 | pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, |
514 | unsigned long addr, pte_t *ptep) | |
66b3923a | 515 | { |
41098230 | 516 | struct mm_struct *mm = vma->vm_mm; |
29a7287d | 517 | size_t pgsize; |
d8bdcff2 | 518 | int ncontig; |
29a7287d | 519 | |
ae075629 BW |
520 | if (!pte_cont(READ_ONCE(*ptep))) |
521 | return ptep_clear_flush(vma, addr, ptep); | |
29a7287d | 522 | |
41098230 WD |
523 | ncontig = find_num_contig(mm, addr, ptep, &pgsize); |
524 | return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig); | |
66b3923a DW |
525 | } |
526 | ||
a21b0b78 AP |
527 | static int __init hugetlbpage_init(void) |
528 | { | |
f8b46c4b AK |
529 | if (pud_sect_supported()) |
530 | hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); | |
531 | ||
a1634a54 | 532 | hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT); |
38237830 | 533 | hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); |
a1634a54 | 534 | hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT); |
a21b0b78 AP |
535 | |
536 | return 0; | |
537 | } | |
538 | arch_initcall(hugetlbpage_init); | |
539 | ||
ae94da89 | 540 | bool __init arch_hugetlb_valid_size(unsigned long size) |
084bd298 | 541 | { |
a8a733b2 | 542 | return __hugetlb_valid_size(size); |
ae94da89 | 543 | } |
5db568e7 AK |
544 | |
545 | pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | |
546 | { | |
547 | if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) && | |
548 | cpus_have_const_cap(ARM64_WORKAROUND_2645198)) { | |
549 | /* | |
550 | * Break-before-make (BBM) is required for all user space mappings | |
551 | * when the permission changes from executable to non-executable | |
552 | * in cases where cpu is affected with errata #2645198. | |
553 | */ | |
554 | if (pte_user_exec(READ_ONCE(*ptep))) | |
555 | return huge_ptep_clear_flush(vma, addr, ptep); | |
556 | } | |
557 | return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); | |
558 | } | |
559 | ||
560 | void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, | |
561 | pte_t old_pte, pte_t pte) | |
562 | { | |
935d4f0c RR |
563 | unsigned long psize = huge_page_size(hstate_vma(vma)); |
564 | ||
565 | set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); | |
5db568e7 | 566 | } |