Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 1994 - 2001, 2003 by Ralf Baechle | |
7 | * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. | |
8 | */ | |
9 | #ifndef _ASM_PGALLOC_H | |
10 | #define _ASM_PGALLOC_H | |
11 | ||
12 | #include <linux/config.h> | |
13 | #include <linux/highmem.h> | |
14 | #include <linux/mm.h> | |
15 | ||
16 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, | |
17 | pte_t *pte) | |
18 | { | |
19 | set_pmd(pmd, __pmd((unsigned long)pte)); | |
20 | } | |
21 | ||
22 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | |
23 | struct page *pte) | |
24 | { | |
25 | set_pmd(pmd, __pmd((unsigned long)page_address(pte))); | |
26 | } | |
27 | ||
28 | /* | |
29 | * Initialize a new pgd / pmd table with invalid pointers. | |
30 | */ | |
31 | extern void pgd_init(unsigned long page); | |
32 | extern void pmd_init(unsigned long page, unsigned long pagetable); | |
33 | ||
34 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |
35 | { | |
36 | pgd_t *ret, *init; | |
37 | ||
38 | ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); | |
39 | if (ret) { | |
40 | init = pgd_offset(&init_mm, 0); | |
41 | pgd_init((unsigned long)ret); | |
42 | memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, | |
43 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | |
44 | } | |
45 | ||
46 | return ret; | |
47 | } | |
48 | ||
49 | static inline void pgd_free(pgd_t *pgd) | |
50 | { | |
51 | free_pages((unsigned long)pgd, PGD_ORDER); | |
52 | } | |
53 | ||
54 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | |
55 | unsigned long address) | |
56 | { | |
57 | pte_t *pte; | |
58 | ||
59 | pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER); | |
60 | ||
61 | return pte; | |
62 | } | |
63 | ||
64 | static inline struct page *pte_alloc_one(struct mm_struct *mm, | |
65 | unsigned long address) | |
66 | { | |
67 | struct page *pte; | |
68 | ||
69 | pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); | |
70 | if (pte) | |
71 | clear_highpage(pte); | |
72 | ||
73 | return pte; | |
74 | } | |
75 | ||
76 | static inline void pte_free_kernel(pte_t *pte) | |
77 | { | |
78 | free_pages((unsigned long)pte, PTE_ORDER); | |
79 | } | |
80 | ||
81 | static inline void pte_free(struct page *pte) | |
82 | { | |
83 | __free_pages(pte, PTE_ORDER); | |
84 | } | |
85 | ||
86 | #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) | |
87 | ||
88 | #ifdef CONFIG_MIPS32 | |
89 | #define pgd_populate(mm, pmd, pte) BUG() | |
90 | ||
91 | /* | |
92 | * allocating and freeing a pmd is trivial: the 1-entry pmd is | |
93 | * inside the pgd, so has no extra memory associated with it. | |
94 | */ | |
95 | #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) | |
96 | #define pmd_free(x) do { } while (0) | |
97 | #define __pmd_free_tlb(tlb,x) do { } while (0) | |
98 | #endif | |
99 | ||
100 | #ifdef CONFIG_MIPS64 | |
101 | ||
102 | #define pgd_populate(mm, pgd, pmd) set_pgd(pgd, __pgd(pmd)) | |
103 | ||
104 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | |
105 | { | |
106 | pmd_t *pmd; | |
107 | ||
108 | pmd = (pmd_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PMD_ORDER); | |
109 | if (pmd) | |
110 | pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); | |
111 | return pmd; | |
112 | } | |
113 | ||
114 | static inline void pmd_free(pmd_t *pmd) | |
115 | { | |
116 | free_pages((unsigned long)pmd, PMD_ORDER); | |
117 | } | |
118 | ||
119 | #define __pmd_free_tlb(tlb,x) pmd_free(x) | |
120 | ||
121 | #endif | |
122 | ||
123 | #define check_pgt_cache() do { } while (0) | |
124 | ||
125 | #endif /* _ASM_PGALLOC_H */ |