Commit | Line | Data |
---|---|---|
4f76cd38 JF |
1 | #ifndef _ASM_X86_PGALLOC_H |
2 | #define _ASM_X86_PGALLOC_H | |
3 | ||
4 | #include <linux/threads.h> | |
5 | #include <linux/mm.h> /* for struct page */ | |
6 | #include <linux/pagemap.h> | |
7 | ||
1d262d3a JF |
8 | #ifdef CONFIG_PARAVIRT |
9 | #include <asm/paravirt.h> | |
10 | #else | |
11 | #define paravirt_alloc_pt(mm, pfn) do { } while (0) | |
12 | #define paravirt_alloc_pd(mm, pfn) do { } while (0) | |
13 | #define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0) | |
14 | #define paravirt_release_pt(pfn) do { } while (0) | |
15 | #define paravirt_release_pd(pfn) do { } while (0) | |
16 | #endif | |
17 | ||
4f76cd38 JF |
18 | /* |
19 | * Allocate and free page tables. | |
20 | */ | |
21 | extern pgd_t *pgd_alloc(struct mm_struct *); | |
22 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); | |
23 | ||
24 | extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); | |
25 | extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long); | |
26 | ||
397f687a JF |
27 | /* Should really implement gc for free page table pages. This could be |
28 | done with a reference count in struct page. */ | |
29 | ||
30 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | |
31 | { | |
32 | BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); | |
33 | free_page((unsigned long)pte); | |
34 | } | |
35 | ||
36 | static inline void pte_free(struct mm_struct *mm, struct page *pte) | |
37 | { | |
38 | __free_page(pte); | |
39 | } | |
40 | ||
41 | extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte); | |
42 | ||
170fdff7 JF |
43 | static inline void pmd_populate_kernel(struct mm_struct *mm, |
44 | pmd_t *pmd, pte_t *pte) | |
45 | { | |
46 | paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT); | |
47 | set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); | |
48 | } | |
49 | ||
50 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | |
51 | struct page *pte) | |
52 | { | |
53 | unsigned long pfn = page_to_pfn(pte); | |
54 | ||
55 | paravirt_alloc_pt(mm, pfn); | |
56 | set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE)); | |
57 | } | |
58 | ||
59 | #define pmd_pgtable(pmd) pmd_page(pmd) | |
60 | ||
61 | #if PAGETABLE_LEVELS > 2 | |
62 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) | |
63 | { | |
64 | return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); | |
65 | } | |
66 | ||
67 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | |
68 | { | |
69 | BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); | |
70 | free_page((unsigned long)pmd); | |
71 | } | |
72 | ||
73 | extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); | |
170fdff7 | 74 | |
5a5f8f42 JF |
75 | #ifdef CONFIG_X86_PAE |
76 | extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); | |
77 | #else /* !CONFIG_X86_PAE */ | |
78 | static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |
79 | { | |
80 | paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT); | |
81 | set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))); | |
82 | } | |
83 | #endif /* CONFIG_X86_PAE */ | |
84 | ||
85 | #if PAGETABLE_LEVELS > 3 | |
86 | static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) | |
87 | { | |
88 | set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud))); | |
89 | } | |
90 | ||
91 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) | |
92 | { | |
93 | return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); | |
94 | } | |
95 | ||
96 | static inline void pud_free(struct mm_struct *mm, pud_t *pud) | |
97 | { | |
98 | BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); | |
99 | free_page((unsigned long)pud); | |
100 | } | |
101 | ||
102 | extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud); | |
103 | #endif /* PAGETABLE_LEVELS > 3 */ | |
104 | #endif /* PAGETABLE_LEVELS > 2 */ | |
4f76cd38 JF |
105 | |
106 | #endif /* _ASM_X86_PGALLOC_H */ |