1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 * -"/proc/meminfo | grep PageTables" kept on increasing
7 * Recently added pgtable dtor was not getting called.
10 * -Variable pg-sz means that Page Tables could be variable sized themselves
11 * So calculate it based on addr traversal split [pgd-bits:pte-bits:xxx]
12 * -Page Table size capped to max 1 to save memory - hence verified.
13 * -Since these deal with constants, gcc compile-time optimizes them.
16 * -Added pgtable ctor/dtor used for pgtable mem accounting
19 * -Switched pgtable_t from being struct page * to unsigned long
20 * =Needed so that Page Table allocator (pte_alloc_one) is not forced to
21 * to deal with struct page. Thay way in future we can make it allocate
22 * multiple PG Tbls in one Page Frame
23 * =sweet side effect is avoiding calls to ugly page_address( ) from the
24 * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate
26 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
29 #ifndef _ASM_ARC_PGALLOC_H
30 #define _ASM_ARC_PGALLOC_H
33 #include <linux/log2.h>
36 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
42 pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep)
44 pmd_set(pmd, (pte_t *) ptep);
47 static inline int __get_order_pgd(void)
49 return get_order(PTRS_PER_PGD * sizeof(pgd_t));
52 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
55 pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd());
58 num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
59 memzero(ret, num * sizeof(pgd_t));
61 num2 = VMALLOC_SIZE / PGDIR_SIZE;
62 memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
64 memzero(ret + num + num2,
65 (PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
71 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
73 free_pages((unsigned long)pgd, __get_order_pgd());
78 * With software-only page-tables, addr-split for traversal is tweakable and
79 * that directly governs how big tables would be at each level.
80 * Further, the MMU page size is configurable.
81 * Thus we need to programatically assert the size constraint
82 * All of this is const math, allowing gcc to do constant folding/propagation.
85 static inline int __get_order_pte(void)
87 return get_order(PTRS_PER_PTE * sizeof(pte_t));
90 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
94 pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
100 static inline pgtable_t
101 pte_alloc_one(struct mm_struct *mm)
106 pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
109 memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
110 page = virt_to_page(pte_pg);
111 if (!pgtable_page_ctor(page)) {
119 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
121 free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */
124 static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
126 pgtable_page_dtor(virt_to_page(ptep));
127 free_pages((unsigned long)ptep, __get_order_pte());
130 #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
132 #define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd))
134 #endif /* _ASM_ARC_PGALLOC_H */