powerpc/mm/book3s/radix: Add mapping statistics
[linux-2.6-block.git] / arch / powerpc / include / asm / book3s / 64 / pgalloc.h
CommitLineData
75a9b8a6
AK
1#ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H
2#define _ASM_POWERPC_BOOK3S_64_PGALLOC_H
101ad5c6
AK
3/*
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/slab.h>
11#include <linux/cpumask.h>
a984506c 12#include <linux/kmemleak.h>
101ad5c6
AK
13#include <linux/percpu.h>
14
15struct vmemmap_backing {
16 struct vmemmap_backing *list;
17 unsigned long phys;
18 unsigned long virt_addr;
19};
20extern struct vmemmap_backing *vmemmap_list;
21
22/*
23 * Functions that deal with pagetables that could be at any level of
24 * the table need to be passed an "index_size" so they know how to
25 * handle allocation. For PTE pages (which are linked to a struct
26 * page for now, and drawn from the main get_free_pages() pool), the
27 * allocation size will be (2^index_size * sizeof(pointer)) and
28 * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
29 *
30 * The maximum index size needs to be big enough to allow any
31 * pagetable sizes we need, but small enough to fit in the low bits of
32 * any page table pointer. In other words all pagetables, even tiny
33 * ones, must be aligned to allow at least enough low 0 bits to
34 * contain this value. This value is also used as a mask, so it must
35 * be one less than a power of two.
36 */
37#define MAX_PGTABLE_INDEX_SIZE 0xf
38
39extern struct kmem_cache *pgtable_cache[];
40#define PGT_CACHE(shift) ({ \
41 BUG_ON(!(shift)); \
42 pgtable_cache[(shift) - 1]; \
43 })
44
934828ed 45extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
8a6c697b 46extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
934828ed 47extern void pte_fragment_free(unsigned long *, int);
8a6c697b 48extern void pmd_fragment_free(unsigned long *);
934828ed
AK
49extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
50#ifdef CONFIG_SMP
51extern void __tlb_remove_table(void *_table);
52#endif
53
a2f41eb9
AK
54static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
55{
56#ifdef CONFIG_PPC_64K_PAGES
de3b8761 57 return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
a2f41eb9
AK
58#else
59 struct page *page;
dcda9b04 60 page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL),
de3b8761 61 4);
a2f41eb9
AK
62 if (!page)
63 return NULL;
64 return (pgd_t *) page_address(page);
65#endif
66}
67
68static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
69{
70#ifdef CONFIG_PPC_64K_PAGES
71 free_page((unsigned long)pgd);
72#else
73 free_pages((unsigned long)pgd, 4);
74#endif
75}
76
101ad5c6
AK
77static inline pgd_t *pgd_alloc(struct mm_struct *mm)
78{
fc5c2f4a
AK
79 pgd_t *pgd;
80
a2f41eb9
AK
81 if (radix_enabled())
82 return radix__pgd_alloc(mm);
fc5c2f4a
AK
83
84 pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
85 pgtable_gfp_flags(mm, GFP_KERNEL));
a984506c
ME
86 /*
87 * Don't scan the PGD for pointers, it contains references to PUDs but
88 * those references are not full pointers and so can't be recognised by
89 * kmemleak.
90 */
91 kmemleak_no_scan(pgd);
92
872a100a
AK
93 /*
94 * With hugetlb, we don't clear the second half of the page table.
95 * If we share the same slab cache with the pmd or pud level table,
96 * we need to make sure we zero out the full table on alloc.
97 * With 4K we don't store slot in the second half. Hence we don't
98 * need to do this for 4k.
99 */
100#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \
738f9645 101 (H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX)
fc5c2f4a 102 memset(pgd, 0, PGD_TABLE_SIZE);
872a100a 103#endif
fc5c2f4a 104 return pgd;
101ad5c6
AK
105}
106
107static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
108{
a2f41eb9
AK
109 if (radix_enabled())
110 return radix__pgd_free(mm, pgd);
101ad5c6
AK
111 kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
112}
113
75a9b8a6
AK
114static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
115{
a2f41eb9 116 pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS);
75a9b8a6 117}
101ad5c6
AK
118
119static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
120{
a984506c
ME
121 pud_t *pud;
122
123 pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
124 pgtable_gfp_flags(mm, GFP_KERNEL));
125 /*
126 * Tell kmemleak to ignore the PUD, that means don't scan it for
127 * pointers and don't consider it a leak. PUDs are typically only
128 * referred to by their PGD, but kmemleak is not able to recognise those
129 * as pointers, leading to false leak reports.
130 */
131 kmemleak_ignore(pud);
132
133 return pud;
101ad5c6
AK
134}
135
136static inline void pud_free(struct mm_struct *mm, pud_t *pud)
137{
fae22116 138 kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
101ad5c6
AK
139}
140
141static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
142{
a2f41eb9 143 pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS);
101ad5c6
AK
144}
145
934828ed 146static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
0c4d2680 147 unsigned long address)
934828ed 148{
a145abf1
AK
149 /*
150 * By now all the pud entries should be none entries. So go
151 * ahead and flush the page walk cache
152 */
153 flush_tlb_pgtable(tlb, address);
0c4d2680 154 pgtable_free_tlb(tlb, pud, PUD_INDEX);
934828ed
AK
155}
156
157static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
158{
738f9645 159 return pmd_fragment_alloc(mm, addr);
934828ed
AK
160}
161
162static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
163{
738f9645 164 pmd_fragment_free((unsigned long *)pmd);
934828ed
AK
165}
166
167static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
0c4d2680 168 unsigned long address)
934828ed 169{
a145abf1
AK
170 /*
171 * By now all the pud entries should be none entries. So go
172 * ahead and flush the page walk cache
173 */
174 flush_tlb_pgtable(tlb, address);
0c4d2680 175 return pgtable_free_tlb(tlb, pmd, PMD_INDEX);
934828ed
AK
176}
177
101ad5c6
AK
178static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
179 pte_t *pte)
180{
a2f41eb9 181 pmd_set(pmd, __pgtable_ptr_val(pte) | PMD_VAL_BITS);
101ad5c6 182}
934828ed 183
101ad5c6
AK
184static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
185 pgtable_t pte_page)
186{
a2f41eb9 187 pmd_set(pmd, __pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
101ad5c6
AK
188}
189
75a9b8a6
AK
190static inline pgtable_t pmd_pgtable(pmd_t pmd)
191{
934828ed 192 return (pgtable_t)pmd_page_vaddr(pmd);
75a9b8a6 193}
101ad5c6 194
101ad5c6
AK
195static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
196 unsigned long address)
197{
74701d59 198 return (pte_t *)pte_fragment_alloc(mm, address, 1);
101ad5c6
AK
199}
200
201static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
934828ed 202 unsigned long address)
101ad5c6 203{
74701d59 204 return (pgtable_t)pte_fragment_alloc(mm, address, 0);
101ad5c6
AK
205}
206
207static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
208{
74701d59 209 pte_fragment_free((unsigned long *)pte, 1);
101ad5c6
AK
210}
211
212static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
213{
74701d59 214 pte_fragment_free((unsigned long *)ptepage, 0);
101ad5c6
AK
215}
216
217static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
218 unsigned long address)
219{
a145abf1
AK
220 /*
221 * By now all the pud entries should be none entries. So go
222 * ahead and flush the page walk cache
223 */
224 flush_tlb_pgtable(tlb, address);
0c4d2680 225 pgtable_free_tlb(tlb, table, PTE_INDEX);
101ad5c6 226}
101ad5c6
AK
227
228#define check_pgt_cache() do { } while (0)
229
a2dc009a
AK
230extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
231static inline void update_page_count(int psize, long count)
232{
233 if (IS_ENABLED(CONFIG_PROC_FS))
234 atomic_long_add(count, &direct_pages_count[psize]);
235}
236
75a9b8a6 237#endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */