1 #ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H
2 #define _ASM_POWERPC_BOOK3S_64_PGALLOC_H
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/slab.h>
11 #include <linux/cpumask.h>
12 #include <linux/percpu.h>
14 struct vmemmap_backing {
15 struct vmemmap_backing *list;
17 unsigned long virt_addr;
19 extern struct vmemmap_backing *vmemmap_list;
22 * Functions that deal with pagetables that could be at any level of
23 * the table need to be passed an "index_size" so they know how to
24 * handle allocation. For PTE pages (which are linked to a struct
25 * page for now, and drawn from the main get_free_pages() pool), the
26 * allocation size will be (2^index_size * sizeof(pointer)) and
27 * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
29 * The maximum index size needs to be big enough to allow any
30 * pagetable sizes we need, but small enough to fit in the low bits of
31 * any page table pointer. In other words all pagetables, even tiny
32 * ones, must be aligned to allow at least enough low 0 bits to
33 * contain this value. This value is also used as a mask, so it must
34 * be one less than a power of two.
36 #define MAX_PGTABLE_INDEX_SIZE 0xf
38 extern struct kmem_cache *pgtable_cache[];
39 #define PGT_CACHE(shift) ({ \
41 pgtable_cache[(shift) - 1]; \
44 extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
45 extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
46 extern void pte_fragment_free(unsigned long *, int);
47 extern void pmd_fragment_free(unsigned long *);
48 extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
50 extern void __tlb_remove_table(void *_table);
53 static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
55 #ifdef CONFIG_PPC_64K_PAGES
56 return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
59 page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL),
63 return (pgd_t *) page_address(page);
67 static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
69 #ifdef CONFIG_PPC_64K_PAGES
70 free_page((unsigned long)pgd);
72 free_pages((unsigned long)pgd, 4);
76 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
81 return radix__pgd_alloc(mm);
83 pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
84 pgtable_gfp_flags(mm, GFP_KERNEL));
86 * With hugetlb, we don't clear the second half of the page table.
87 * If we share the same slab cache with the pmd or pud level table,
88 * we need to make sure we zero out the full table on alloc.
89 * With 4K we don't store slot in the second half. Hence we don't
90 * need to do this for 4k.
92 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \
93 ((H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX) || \
94 (H_PGD_INDEX_SIZE == H_PMD_CACHE_INDEX))
95 memset(pgd, 0, PGD_TABLE_SIZE);
100 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
103 return radix__pgd_free(mm, pgd);
104 kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
107 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
109 pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS);
112 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
114 return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
115 pgtable_gfp_flags(mm, GFP_KERNEL));
118 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
120 kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
123 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
125 pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS);
128 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
129 unsigned long address)
132 * By now all the pud entries should be none entries. So go
133 * ahead and flush the page walk cache
135 flush_tlb_pgtable(tlb, address);
136 pgtable_free_tlb(tlb, pud, PUD_INDEX);
139 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
141 return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
142 pgtable_gfp_flags(mm, GFP_KERNEL));
145 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
147 kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
150 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
151 unsigned long address)
154 * By now all the pud entries should be none entries. So go
155 * ahead and flush the page walk cache
157 flush_tlb_pgtable(tlb, address);
158 return pgtable_free_tlb(tlb, pmd, PMD_INDEX);
161 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
164 pmd_set(pmd, __pgtable_ptr_val(pte) | PMD_VAL_BITS);
167 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
170 pmd_set(pmd, __pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
173 static inline pgtable_t pmd_pgtable(pmd_t pmd)
175 return (pgtable_t)pmd_page_vaddr(pmd);
178 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
179 unsigned long address)
181 return (pte_t *)pte_fragment_alloc(mm, address, 1);
184 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
185 unsigned long address)
187 return (pgtable_t)pte_fragment_alloc(mm, address, 0);
190 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
192 pte_fragment_free((unsigned long *)pte, 1);
195 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
197 pte_fragment_free((unsigned long *)ptepage, 0);
200 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
201 unsigned long address)
204 * By now all the pud entries should be none entries. So go
205 * ahead and flush the page walk cache
207 flush_tlb_pgtable(tlb, address);
208 pgtable_free_tlb(tlb, table, PTE_INDEX);
211 #define check_pgt_cache() do { } while (0)
213 #endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */