Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _X86_64_PGALLOC_H |
2 | #define _X86_64_PGALLOC_H | |
3 | ||
4 | #include <asm/fixmap.h> | |
5 | #include <asm/pda.h> | |
6 | #include <linux/threads.h> | |
7 | #include <linux/mm.h> | |
8 | ||
9 | #define pmd_populate_kernel(mm, pmd, pte) \ | |
10 | set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte))) | |
11 | #define pud_populate(mm, pud, pmd) \ | |
12 | set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))) | |
13 | #define pgd_populate(mm, pgd, pud) \ | |
14 | set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud))) | |
15 | ||
16 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte) | |
17 | { | |
18 | set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT))); | |
19 | } | |
20 | ||
9c0aa0f9 | 21 | static inline pmd_t *get_pmd(void) |
1da177e4 LT |
22 | { |
23 | return (pmd_t *)get_zeroed_page(GFP_KERNEL); | |
24 | } | |
25 | ||
9c0aa0f9 | 26 | static inline void pmd_free(pmd_t *pmd) |
1da177e4 LT |
27 | { |
28 | BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); | |
29 | free_page((unsigned long)pmd); | |
30 | } | |
31 | ||
32 | static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr) | |
33 | { | |
34 | return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); | |
35 | } | |
36 | ||
37 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) | |
38 | { | |
39 | return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); | |
40 | } | |
41 | ||
42 | static inline void pud_free (pud_t *pud) | |
43 | { | |
44 | BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); | |
45 | free_page((unsigned long)pud); | |
46 | } | |
47 | ||
48 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |
49 | { | |
50 | unsigned boundary; | |
51 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); | |
52 | if (!pgd) | |
53 | return NULL; | |
54 | /* | |
55 | * Copy kernel pointers in from init. | |
56 | * Could keep a freelist or slab cache of those because the kernel | |
57 | * part never changes. | |
58 | */ | |
59 | boundary = pgd_index(__PAGE_OFFSET); | |
60 | memset(pgd, 0, boundary * sizeof(pgd_t)); | |
61 | memcpy(pgd + boundary, | |
62 | init_level4_pgt + boundary, | |
63 | (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); | |
64 | return pgd; | |
65 | } | |
66 | ||
67 | static inline void pgd_free(pgd_t *pgd) | |
68 | { | |
69 | BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); | |
70 | free_page((unsigned long)pgd); | |
71 | } | |
72 | ||
73 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | |
74 | { | |
75 | return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); | |
76 | } | |
77 | ||
78 | static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | |
79 | { | |
80 | void *p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); | |
81 | if (!p) | |
82 | return NULL; | |
83 | return virt_to_page(p); | |
84 | } | |
85 | ||
86 | /* Should really implement gc for free page table pages. This could be | |
87 | done with a reference count in struct page. */ | |
88 | ||
9c0aa0f9 | 89 | static inline void pte_free_kernel(pte_t *pte) |
1da177e4 LT |
90 | { |
91 | BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); | |
92 | free_page((unsigned long)pte); | |
93 | } | |
94 | ||
9c0aa0f9 | 95 | static inline void pte_free(struct page *pte) |
1da177e4 LT |
96 | { |
97 | __free_page(pte); | |
98 | } | |
99 | ||
100 | #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) | |
101 | ||
102 | #define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) | |
103 | #define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) | |
104 | ||
105 | #endif /* _X86_64_PGALLOC_H */ |