1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/hugetlb.h>
5 #include <asm/pgalloc.h>
7 #include <asm/fixmap.h>
10 #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
11 phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
12 EXPORT_SYMBOL(physical_mask);
16 #define PGTABLE_HIGHMEM __GFP_HIGHMEM
18 #define PGTABLE_HIGHMEM 0
21 #ifndef CONFIG_PARAVIRT
23 void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
25 tlb_remove_page(tlb, table);
29 gfp_t __userpte_alloc_gfp = GFP_PGTABLE_USER | PGTABLE_HIGHMEM;
31 pgtable_t pte_alloc_one(struct mm_struct *mm)
33 return __pte_alloc_one(mm, __userpte_alloc_gfp);
36 static int __init setup_userpte(char *arg)
42 * "userpte=nohigh" disables allocation of user pagetables in
45 if (strcmp(arg, "nohigh") == 0)
46 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
51 early_param("userpte", setup_userpte);
53 void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
55 pgtable_pte_page_dtor(pte);
56 paravirt_release_pte(page_to_pfn(pte));
57 paravirt_tlb_remove_table(tlb, pte);
60 #if CONFIG_PGTABLE_LEVELS > 2
61 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
63 struct page *page = virt_to_page(pmd);
64 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
66 * NOTE! For PAE, any changes to the top page-directory-pointer-table
67 * entries need a full cr3 reload to flush.
70 tlb->need_flush_all = 1;
72 pgtable_pmd_page_dtor(page);
73 paravirt_tlb_remove_table(tlb, page);
76 #if CONFIG_PGTABLE_LEVELS > 3
77 void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
79 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
80 paravirt_tlb_remove_table(tlb, virt_to_page(pud));
83 #if CONFIG_PGTABLE_LEVELS > 4
84 void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
86 paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
87 paravirt_tlb_remove_table(tlb, virt_to_page(p4d));
89 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
90 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
91 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
93 static inline void pgd_list_add(pgd_t *pgd)
95 struct page *page = virt_to_page(pgd);
97 list_add(&page->lru, &pgd_list);
100 static inline void pgd_list_del(pgd_t *pgd)
102 struct page *page = virt_to_page(pgd);
104 list_del(&page->lru);
107 #define UNSHARED_PTRS_PER_PGD \
108 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
109 #define MAX_UNSHARED_PTRS_PER_PGD \
110 max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
113 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
115 virt_to_page(pgd)->pt_mm = mm;
118 struct mm_struct *pgd_page_get_mm(struct page *page)
123 static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
125 /* If the pgd points to a shared pagetable level (either the
126 ptes in non-PAE, or shared PMD in PAE), then just copy the
127 references from swapper_pg_dir. */
128 if (CONFIG_PGTABLE_LEVELS == 2 ||
129 (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
130 CONFIG_PGTABLE_LEVELS >= 4) {
131 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
132 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
136 /* list required to sync kernel mapping updates */
137 if (!SHARED_KERNEL_PMD) {
143 static void pgd_dtor(pgd_t *pgd)
145 if (SHARED_KERNEL_PMD)
148 spin_lock(&pgd_lock);
150 spin_unlock(&pgd_lock);
154 * List of all pgd's needed for non-PAE so it can invalidate entries
155 * in both cached and uncached pgd's; not needed for PAE since the
156 * kernel pmd is shared. If PAE were not to share the pmd a similar
157 * tactic would be needed. This is essentially codepath-based locking
158 * against pageattr.c; it is the unique case in which a valid change
159 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
160 * vmalloc faults work because attached pagetables are never freed.
164 #ifdef CONFIG_X86_PAE
166 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
167 * updating the top-level pagetable entries to guarantee the
168 * processor notices the update. Since this is expensive, and
169 * all 4 top-level entries are used almost immediately in a
170 * new process's life, we just pre-populate them here.
172 * Also, if we're in a paravirt environment where the kernel pmd is
173 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
174 * and initialize the kernel pmds here.
176 #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
177 #define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD
180 * We allocate separate PMDs for the kernel part of the user page-table
181 * when PTI is enabled. We need them to map the per-process LDT into the
182 * user-space page-table.
184 #define PREALLOCATED_USER_PMDS (boot_cpu_has(X86_FEATURE_PTI) ? \
186 #define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
188 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
190 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
192 /* Note: almost everything apart from _PAGE_PRESENT is
193 reserved at the pmd (PDPT) level. */
194 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
197 * According to Intel App note "TLBs, Paging-Structure Caches,
198 * and Their Invalidation", April 2007, document 317080-001,
199 * section 8.1: in PAE mode we explicitly have to flush the
200 * TLB via cr3 if the top-level pgd is changed...
204 #else /* !CONFIG_X86_PAE */
206 /* No need to prepopulate any pagetable entries in non-PAE modes. */
207 #define PREALLOCATED_PMDS 0
208 #define MAX_PREALLOCATED_PMDS 0
209 #define PREALLOCATED_USER_PMDS 0
210 #define MAX_PREALLOCATED_USER_PMDS 0
211 #endif /* CONFIG_X86_PAE */
213 static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
217 for (i = 0; i < count; i++)
219 pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
220 free_page((unsigned long)pmds[i]);
225 static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
229 gfp_t gfp = GFP_PGTABLE_USER;
232 gfp &= ~__GFP_ACCOUNT;
234 for (i = 0; i < count; i++) {
235 pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
238 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
239 free_page((unsigned long)pmd);
249 free_pmds(mm, pmds, count);
257 * Mop up any pmd pages which may still be attached to the pgd.
258 * Normally they will be freed by munmap/exit_mmap, but any pmd we
259 * preallocate which never got a corresponding vma will need to be
262 static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
266 if (pgd_val(pgd) != 0) {
267 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
271 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
277 static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
281 for (i = 0; i < PREALLOCATED_PMDS; i++)
282 mop_up_one_pmd(mm, &pgdp[i]);
284 #ifdef CONFIG_PAGE_TABLE_ISOLATION
286 if (!boot_cpu_has(X86_FEATURE_PTI))
289 pgdp = kernel_to_user_pgdp(pgdp);
291 for (i = 0; i < PREALLOCATED_USER_PMDS; i++)
292 mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]);
296 static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
302 p4d = p4d_offset(pgd, 0);
303 pud = pud_offset(p4d, 0);
305 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
306 pmd_t *pmd = pmds[i];
308 if (i >= KERNEL_PGD_BOUNDARY)
309 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
310 sizeof(pmd_t) * PTRS_PER_PMD);
312 pud_populate(mm, pud, pmd);
316 #ifdef CONFIG_PAGE_TABLE_ISOLATION
317 static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
318 pgd_t *k_pgd, pmd_t *pmds[])
320 pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir);
321 pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
326 u_p4d = p4d_offset(u_pgd, 0);
327 u_pud = pud_offset(u_p4d, 0);
329 s_pgd += KERNEL_PGD_BOUNDARY;
330 u_pud += KERNEL_PGD_BOUNDARY;
332 for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) {
333 pmd_t *pmd = pmds[i];
335 memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd),
336 sizeof(pmd_t) * PTRS_PER_PMD);
338 pud_populate(mm, u_pud, pmd);
343 static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
344 pgd_t *k_pgd, pmd_t *pmds[])
349 * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
350 * assumes that pgd should be in one page.
352 * But kernel with PAE paging that is not running as a Xen domain
353 * only needs to allocate 32 bytes for pgd instead of one page.
355 #ifdef CONFIG_X86_PAE
357 #include <linux/slab.h>
359 #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
362 static struct kmem_cache *pgd_cache;
364 void __init pgtable_cache_init(void)
367 * When PAE kernel is running as a Xen domain, it does not use
368 * shared kernel pmd. And this requires a whole page for pgd.
370 if (!SHARED_KERNEL_PMD)
374 * when PAE kernel is not running as a Xen domain, it uses
375 * shared kernel pmd. Shared kernel pmd does not require a whole
376 * page for pgd. We are able to just allocate a 32-byte for pgd.
377 * During boot time, we create a 32-byte slab for pgd table allocation.
379 pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
383 static inline pgd_t *_pgd_alloc(void)
386 * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
387 * We allocate one page for pgd.
389 if (!SHARED_KERNEL_PMD)
390 return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
391 PGD_ALLOCATION_ORDER);
394 * Now PAE kernel is not running as a Xen domain. We can allocate
395 * a 32-byte slab for pgd to save memory space.
397 return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER);
400 static inline void _pgd_free(pgd_t *pgd)
402 if (!SHARED_KERNEL_PMD)
403 free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
405 kmem_cache_free(pgd_cache, pgd);
409 static inline pgd_t *_pgd_alloc(void)
411 return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
412 PGD_ALLOCATION_ORDER);
415 static inline void _pgd_free(pgd_t *pgd)
417 free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
419 #endif /* CONFIG_X86_PAE */
421 pgd_t *pgd_alloc(struct mm_struct *mm)
424 pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
425 pmd_t *pmds[MAX_PREALLOCATED_PMDS];
434 if (sizeof(pmds) != 0 &&
435 preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0)
438 if (sizeof(u_pmds) != 0 &&
439 preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0)
442 if (paravirt_pgd_alloc(mm) != 0)
443 goto out_free_user_pmds;
446 * Make sure that pre-populating the pmds is atomic with
447 * respect to anything walking the pgd_list, so that they
448 * never see a partially populated pgd.
450 spin_lock(&pgd_lock);
453 if (sizeof(pmds) != 0)
454 pgd_prepopulate_pmd(mm, pgd, pmds);
456 if (sizeof(u_pmds) != 0)
457 pgd_prepopulate_user_pmd(mm, pgd, u_pmds);
459 spin_unlock(&pgd_lock);
464 if (sizeof(u_pmds) != 0)
465 free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS);
467 if (sizeof(pmds) != 0)
468 free_pmds(mm, pmds, PREALLOCATED_PMDS);
475 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
477 pgd_mop_up_pmds(mm, pgd);
479 paravirt_pgd_free(mm, pgd);
484 * Used to set accessed or dirty bits in the page table entries
485 * on other architectures. On x86, the accessed and dirty bits
486 * are tracked by hardware. However, do_wp_page calls this function
487 * to also make the pte writeable at the same time the dirty bit is
488 * set. In that case we do actually need to write the PTE.
490 int ptep_set_access_flags(struct vm_area_struct *vma,
491 unsigned long address, pte_t *ptep,
492 pte_t entry, int dirty)
494 int changed = !pte_same(*ptep, entry);
496 if (changed && dirty)
497 set_pte(ptep, entry);
502 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
503 int pmdp_set_access_flags(struct vm_area_struct *vma,
504 unsigned long address, pmd_t *pmdp,
505 pmd_t entry, int dirty)
507 int changed = !pmd_same(*pmdp, entry);
509 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
511 if (changed && dirty) {
512 set_pmd(pmdp, entry);
514 * We had a write-protection fault here and changed the pmd
515 * to to more permissive. No need to flush the TLB for that,
516 * #PF is architecturally guaranteed to do that and in the
517 * worst-case we'll generate a spurious fault.
524 int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
525 pud_t *pudp, pud_t entry, int dirty)
527 int changed = !pud_same(*pudp, entry);
529 VM_BUG_ON(address & ~HPAGE_PUD_MASK);
531 if (changed && dirty) {
532 set_pud(pudp, entry);
534 * We had a write-protection fault here and changed the pud
535 * to to more permissive. No need to flush the TLB for that,
536 * #PF is architecturally guaranteed to do that and in the
537 * worst-case we'll generate a spurious fault.
545 int ptep_test_and_clear_young(struct vm_area_struct *vma,
546 unsigned long addr, pte_t *ptep)
550 if (pte_young(*ptep))
551 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
552 (unsigned long *) &ptep->pte);
557 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
558 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
559 unsigned long addr, pmd_t *pmdp)
563 if (pmd_young(*pmdp))
564 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
565 (unsigned long *)pmdp);
571 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
572 int pudp_test_and_clear_young(struct vm_area_struct *vma,
573 unsigned long addr, pud_t *pudp)
577 if (pud_young(*pudp))
578 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
579 (unsigned long *)pudp);
585 int ptep_clear_flush_young(struct vm_area_struct *vma,
586 unsigned long address, pte_t *ptep)
589 * On x86 CPUs, clearing the accessed bit without a TLB flush
590 * doesn't cause data corruption. [ It could cause incorrect
591 * page aging and the (mistaken) reclaim of hot pages, but the
592 * chance of that should be relatively low. ]
594 * So as a performance optimization don't flush the TLB when
595 * clearing the accessed bit, it will eventually be flushed by
596 * a context switch or a VM operation anyway. [ In the rare
597 * event of it not getting flushed for a long time the delay
598 * shouldn't really matter because there's no real memory
599 * pressure for swapout to react to. ]
601 return ptep_test_and_clear_young(vma, address, ptep);
604 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
605 int pmdp_clear_flush_young(struct vm_area_struct *vma,
606 unsigned long address, pmd_t *pmdp)
610 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
612 young = pmdp_test_and_clear_young(vma, address, pmdp);
614 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
619 pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
623 * No flush is necessary. Once an invalid PTE is established, the PTE's
624 * access and dirty bits cannot be updated.
626 return pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
631 * reserve_top_address - reserves a hole in the top of kernel address space
632 * @reserve - size of hole to reserve
634 * Can be used to relocate the fixmap area and poke a hole in the top
635 * of kernel address space to make room for a hypervisor.
637 void __init reserve_top_address(unsigned long reserve)
640 BUG_ON(fixmaps_set > 0);
641 __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
642 printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
643 -reserve, __FIXADDR_TOP + PAGE_SIZE);
649 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
651 unsigned long address = __fix_to_virt(idx);
655 * Ensure that the static initial page tables are covering the
658 BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
659 (FIXMAP_PMD_NUM * PTRS_PER_PTE));
662 if (idx >= __end_of_fixed_addresses) {
666 set_pte_vaddr(address, pte);
670 void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
671 phys_addr_t phys, pgprot_t flags)
673 /* Sanitize 'prot' against any unsupported bits: */
674 pgprot_val(flags) &= __default_kernel_pte_mask;
676 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
679 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
680 #ifdef CONFIG_X86_5LEVEL
682 * p4d_set_huge - setup kernel P4D mapping
684 * No 512GB pages yet -- always return 0
686 int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
692 * p4d_clear_huge - clear kernel P4D mapping when it is set
694 * No 512GB pages yet -- always return 0
696 void p4d_clear_huge(p4d_t *p4d)
702 * pud_set_huge - setup kernel PUD mapping
704 * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
705 * function sets up a huge page only if any of the following conditions are met:
707 * - MTRRs are disabled, or
709 * - MTRRs are enabled and the range is completely covered by a single MTRR, or
711 * - MTRRs are enabled and the corresponding MTRR memory type is WB, which
712 * has no effect on the requested PAT memory type.
714 * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
715 * page mapping attempt fails.
717 * Returns 1 on success and 0 on failure.
719 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
723 mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
724 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
725 (mtrr != MTRR_TYPE_WRBACK))
728 /* Bail out if we are we on a populated non-leaf entry: */
729 if (pud_present(*pud) && !pud_huge(*pud))
732 set_pte((pte_t *)pud, pfn_pte(
733 (u64)addr >> PAGE_SHIFT,
734 __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
740 * pmd_set_huge - setup kernel PMD mapping
742 * See text over pud_set_huge() above.
744 * Returns 1 on success and 0 on failure.
746 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
750 mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
751 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
752 (mtrr != MTRR_TYPE_WRBACK)) {
753 pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
754 __func__, addr, addr + PMD_SIZE);
758 /* Bail out if we are we on a populated non-leaf entry: */
759 if (pmd_present(*pmd) && !pmd_huge(*pmd))
762 set_pte((pte_t *)pmd, pfn_pte(
763 (u64)addr >> PAGE_SHIFT,
764 __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
770 * pud_clear_huge - clear kernel PUD mapping when it is set
772 * Returns 1 on success and 0 on failure (no PUD map is found).
774 int pud_clear_huge(pud_t *pud)
776 if (pud_large(*pud)) {
785 * pmd_clear_huge - clear kernel PMD mapping when it is set
787 * Returns 1 on success and 0 on failure (no PMD map is found).
789 int pmd_clear_huge(pmd_t *pmd)
791 if (pmd_large(*pmd)) {
801 * pud_free_pmd_page - Clear pud entry and free pmd page.
802 * @pud: Pointer to a PUD.
803 * @addr: Virtual address associated with pud.
805 * Context: The pud range has been unmapped and TLB purged.
806 * Return: 1 if clearing the entry succeeded. 0 otherwise.
808 * NOTE: Callers must allow a single page allocation.
810 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
816 pmd = pud_pgtable(*pud);
817 pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
821 for (i = 0; i < PTRS_PER_PMD; i++) {
823 if (!pmd_none(pmd[i]))
829 /* INVLPG to clear all paging-structure caches */
830 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
832 for (i = 0; i < PTRS_PER_PMD; i++) {
833 if (!pmd_none(pmd_sv[i])) {
834 pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
835 free_page((unsigned long)pte);
839 free_page((unsigned long)pmd_sv);
841 pgtable_pmd_page_dtor(virt_to_page(pmd));
842 free_page((unsigned long)pmd);
848 * pmd_free_pte_page - Clear pmd entry and free pte page.
849 * @pmd: Pointer to a PMD.
850 * @addr: Virtual address associated with pmd.
852 * Context: The pmd range has been unmapped and TLB purged.
853 * Return: 1 if clearing the entry succeeded. 0 otherwise.
855 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
859 pte = (pte_t *)pmd_page_vaddr(*pmd);
862 /* INVLPG to clear all paging-structure caches */
863 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
865 free_page((unsigned long)pte);
870 #else /* !CONFIG_X86_64 */
873 * Disable free page handling on x86-PAE. This assures that ioremap()
874 * does not update sync'd pmd entries. See vmalloc_sync_one().
876 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
878 return pmd_none(*pmd);
881 #endif /* CONFIG_X86_64 */
882 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */