1 #ifndef _ASM_POWERPC_PGALLOC_H
2 #define _ASM_POWERPC_PGALLOC_H
7 #ifdef CONFIG_PPC_BOOK3E
8 extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address);
9 #else /* CONFIG_PPC_BOOK3E */
10 static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
11 unsigned long address)
14 #endif /* !CONFIG_PPC_BOOK3E */
16 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
18 free_page((unsigned long)pte);
21 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
23 pgtable_page_dtor(ptepage);
27 typedef struct pgtable_free {
31 /* This needs to be big enough to allow for MMU_PAGE_COUNT + 2 to be stored
32 * and small enough to fit in the low bits of any naturally aligned page
33 * table cache entry. Arbitrarily set to 0x1f, that should give us some
36 #define PGF_CACHENUM_MASK 0x1f
38 static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
41 BUG_ON(cachenum > PGF_CACHENUM_MASK);
43 return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
47 #include <asm/pgalloc-64.h>
49 #include <asm/pgalloc-32.h>
53 extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
54 extern void pte_free_finish(void);
55 #else /* CONFIG_SMP */
56 static inline void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
60 static inline void pte_free_finish(void) { }
61 #endif /* !CONFIG_SMP */
63 static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage,
64 unsigned long address)
66 pgtable_free_t pgf = pgtable_free_cache(page_address(ptepage),
69 tlb_flush_pgtable(tlb, address);
70 pgtable_page_dtor(ptepage);
71 pgtable_free_tlb(tlb, pgf);
74 #endif /* __KERNEL__ */
75 #endif /* _ASM_POWERPC_PGALLOC_H */