1 #ifndef _ASM_POWERPC_PGALLOC_64_H
2 #define _ASM_POWERPC_PGALLOC_64_H
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/slab.h>
11 #include <linux/cpumask.h>
12 #include <linux/percpu.h>
14 struct vmemmap_backing {
15 struct vmemmap_backing *list;
17 unsigned long virt_addr;
19 extern struct vmemmap_backing *vmemmap_list;
22 * Functions that deal with pagetables that could be at any level of
23 * the table need to be passed an "index_size" so they know how to
24 * handle allocation. For PTE pages (which are linked to a struct
25 * page for now, and drawn from the main get_free_pages() pool), the
26 * allocation size will be (2^index_size * sizeof(pointer)) and
27 * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
29 * The maximum index size needs to be big enough to allow any
30 * pagetable sizes we need, but small enough to fit in the low bits of
31 * any page table pointer. In other words all pagetables, even tiny
32 * ones, must be aligned to allow at least enough low 0 bits to
33 * contain this value. This value is also used as a mask, so it must
34 * be one less than a power of two.
36 #define MAX_PGTABLE_INDEX_SIZE 0xf
38 extern struct kmem_cache *pgtable_cache[];
39 #define PGT_CACHE(shift) ({ \
41 pgtable_cache[(shift) - 1]; \
44 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
46 return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
49 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
51 kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
54 #ifndef CONFIG_PPC_64K_PAGES
56 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, (unsigned long)PUD)
58 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
60 return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
61 GFP_KERNEL|__GFP_REPEAT);
64 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
66 kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
69 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
71 pud_set(pud, (unsigned long)pmd);
74 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
77 pmd_set(pmd, (unsigned long)pte);
80 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
83 pmd_set(pmd, (unsigned long)page_address(pte_page));
86 #define pmd_pgtable(pmd) pmd_page(pmd)
88 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
89 unsigned long address)
91 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
94 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
95 unsigned long address)
100 pte = pte_alloc_one_kernel(mm, address);
103 page = virt_to_page(pte);
104 if (!pgtable_page_ctor(page)) {
111 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
113 free_page((unsigned long)pte);
116 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
118 pgtable_page_dtor(ptepage);
119 __free_page(ptepage);
122 extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
124 extern void __tlb_remove_table(void *_table);
126 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
127 unsigned long address)
129 tlb_flush_pgtable(tlb, address);
130 pgtable_free_tlb(tlb, page_address(table), 0);
133 #else /* if CONFIG_PPC_64K_PAGES */
135 extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
136 extern void pte_fragment_free(unsigned long *, int);
137 extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
139 extern void __tlb_remove_table(void *_table);
142 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
144 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
147 pmd_set(pmd, (unsigned long)pte);
150 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
153 pmd_set(pmd, (unsigned long)pte_page);
156 static inline pgtable_t pmd_pgtable(pmd_t pmd)
158 return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS);
161 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
162 unsigned long address)
164 return (pte_t *)pte_fragment_alloc(mm, address, 1);
167 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
168 unsigned long address)
170 return (pgtable_t)pte_fragment_alloc(mm, address, 0);
173 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
175 pte_fragment_free((unsigned long *)pte, 1);
178 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
180 pte_fragment_free((unsigned long *)ptepage, 0);
183 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
184 unsigned long address)
186 tlb_flush_pgtable(tlb, address);
187 pgtable_free_tlb(tlb, table, 0);
189 #endif /* CONFIG_PPC_64K_PAGES */
191 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
193 return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
194 GFP_KERNEL|__GFP_REPEAT);
197 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
199 kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
202 #define __pmd_free_tlb(tlb, pmd, addr) \
203 pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX)
204 #ifndef CONFIG_PPC_64K_PAGES
205 #define __pud_free_tlb(tlb, pud, addr) \
206 pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
208 #endif /* CONFIG_PPC_64K_PAGES */
210 #define check_pgt_cache() do { } while (0)
212 #endif /* _ASM_POWERPC_PGALLOC_64_H */