Commit | Line | Data |
---|---|---|
3df33f12 AK |
1 | /* |
2 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | */ | |
9 | ||
10 | #include <linux/sched.h> | |
589ee628 | 11 | #include <linux/mm_types.h> |
59879d54 | 12 | #include <linux/memblock.h> |
fa4531f7 | 13 | #include <misc/cxl-base.h> |
589ee628 | 14 | |
3df33f12 AK |
15 | #include <asm/pgalloc.h> |
16 | #include <asm/tlb.h> | |
59879d54 AK |
17 | #include <asm/trace.h> |
18 | #include <asm/powernv.h> | |
3df33f12 AK |
19 | |
20 | #include "mmu_decl.h" | |
21 | #include <trace/events/thp.h> | |
22 | ||
eea8148c ME |
23 | int (*register_process_table)(unsigned long base, unsigned long page_size, |
24 | unsigned long tbl_size); | |
25 | ||
3df33f12 AK |
26 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
27 | /* | |
28 | * This is called when relaxing access to a hugepage. It's also called in the page | |
29 | * fault path when we don't hit any of the major fault cases, ie, a minor | |
30 | * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have | |
31 | * handled those two for us, we additionally deal with missing execute | |
32 | * permission here on some processors | |
33 | */ | |
34 | int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, | |
35 | pmd_t *pmdp, pmd_t entry, int dirty) | |
36 | { | |
37 | int changed; | |
38 | #ifdef CONFIG_DEBUG_VM | |
ebd31197 | 39 | WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); |
af60a4cf | 40 | assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp)); |
3df33f12 AK |
41 | #endif |
42 | changed = !pmd_same(*(pmdp), entry); | |
43 | if (changed) { | |
b3603e17 AK |
44 | __ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp), |
45 | pmd_pte(entry), address); | |
d8e91e93 | 46 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
3df33f12 AK |
47 | } |
48 | return changed; | |
49 | } | |
50 | ||
51 | int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
52 | unsigned long address, pmd_t *pmdp) | |
53 | { | |
54 | return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); | |
55 | } | |
56 | /* | |
57 | * set a new huge pmd. We should not be called for updating | |
58 | * an existing pmd entry. That should go via pmd_hugepage_update. | |
59 | */ | |
60 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |
61 | pmd_t *pmdp, pmd_t pmd) | |
62 | { | |
63 | #ifdef CONFIG_DEBUG_VM | |
64 | WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); | |
af60a4cf | 65 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
ebd31197 | 66 | WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd))); |
3df33f12 AK |
67 | #endif |
68 | trace_hugepage_set_pmd(addr, pmd_val(pmd)); | |
69 | return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); | |
70 | } | |
fa4531f7 AK |
71 | |
72 | static void do_nothing(void *unused) | |
73 | { | |
74 | ||
75 | } | |
76 | /* | |
77 | * Serialize against find_current_mm_pte which does lock-less | |
78 | * lookup in page tables with local interrupts disabled. For huge pages | |
79 | * it casts pmd_t to pte_t. Since format of pte_t is different from | |
80 | * pmd_t we want to prevent transit from pmd pointing to page table | |
81 | * to pmd pointing to huge page (and back) while interrupts are disabled. | |
82 | * We clear pmd to possibly replace it with page table pointer in | |
83 | * different code paths. So make sure we wait for the parallel | |
84 | * find_current_mm_pte to finish. | |
85 | */ | |
86 | void serialize_against_pte_lookup(struct mm_struct *mm) | |
87 | { | |
88 | smp_mb(); | |
0f4bc093 | 89 | smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1); |
fa4531f7 AK |
90 | } |
91 | ||
3df33f12 AK |
92 | /* |
93 | * We use this to invalidate a pmdp entry before switching from a | |
94 | * hugepte to regular pmd entry. | |
95 | */ | |
8cc931e0 | 96 | pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
3df33f12 AK |
97 | pmd_t *pmdp) |
98 | { | |
8cc931e0 AK |
99 | unsigned long old_pmd; |
100 | ||
101 | old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); | |
d8e91e93 | 102 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
3df33f12 AK |
103 | /* |
104 | * This ensures that generic code that rely on IRQ disabling | |
105 | * to prevent a parallel THP split work as expected. | |
106 | */ | |
fa4531f7 | 107 | serialize_against_pte_lookup(vma->vm_mm); |
8cc931e0 | 108 | return __pmd(old_pmd); |
3df33f12 AK |
109 | } |
110 | ||
111 | static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) | |
112 | { | |
113 | return __pmd(pmd_val(pmd) | pgprot_val(pgprot)); | |
114 | } | |
115 | ||
116 | pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) | |
117 | { | |
118 | unsigned long pmdv; | |
119 | ||
120 | pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK; | |
121 | return pmd_set_protbits(__pmd(pmdv), pgprot); | |
122 | } | |
123 | ||
124 | pmd_t mk_pmd(struct page *page, pgprot_t pgprot) | |
125 | { | |
126 | return pfn_pmd(page_to_pfn(page), pgprot); | |
127 | } | |
128 | ||
129 | pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | |
130 | { | |
131 | unsigned long pmdv; | |
132 | ||
133 | pmdv = pmd_val(pmd); | |
134 | pmdv &= _HPAGE_CHG_MASK; | |
135 | return pmd_set_protbits(__pmd(pmdv), newprot); | |
136 | } | |
137 | ||
138 | /* | |
139 | * This is called at the end of handling a user page fault, when the | |
140 | * fault has been handled by updating a HUGE PMD entry in the linux page tables. | |
141 | * We use it to preload an HPTE into the hash table corresponding to | |
142 | * the updated linux HUGE PMD entry. | |
143 | */ | |
144 | void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | |
145 | pmd_t *pmd) | |
146 | { | |
147 | return; | |
148 | } | |
149 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
fe036a06 BH |
150 | |
151 | /* For use by kexec */ | |
152 | void mmu_cleanup_all(void) | |
153 | { | |
154 | if (radix_enabled()) | |
155 | radix__mmu_cleanup_all(); | |
156 | else if (mmu_hash_ops.hpte_clear_all) | |
157 | mmu_hash_ops.hpte_clear_all(); | |
158 | } | |
32b53c01 RA |
159 | |
160 | #ifdef CONFIG_MEMORY_HOTPLUG | |
f437c517 | 161 | int __meminit create_section_mapping(unsigned long start, unsigned long end, int nid) |
32b53c01 RA |
162 | { |
163 | if (radix_enabled()) | |
29ab6c47 | 164 | return radix__create_section_mapping(start, end, nid); |
32b53c01 | 165 | |
29ab6c47 | 166 | return hash__create_section_mapping(start, end, nid); |
32b53c01 RA |
167 | } |
168 | ||
bde709a7 | 169 | int __meminit remove_section_mapping(unsigned long start, unsigned long end) |
32b53c01 RA |
170 | { |
171 | if (radix_enabled()) | |
4b5d62ca | 172 | return radix__remove_section_mapping(start, end); |
32b53c01 RA |
173 | |
174 | return hash__remove_section_mapping(start, end); | |
175 | } | |
176 | #endif /* CONFIG_MEMORY_HOTPLUG */ | |
59879d54 AK |
177 | |
178 | void __init mmu_partition_table_init(void) | |
179 | { | |
180 | unsigned long patb_size = 1UL << PATB_SIZE_SHIFT; | |
181 | unsigned long ptcr; | |
182 | ||
183 | BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large."); | |
184 | partition_tb = __va(memblock_alloc_base(patb_size, patb_size, | |
185 | MEMBLOCK_ALLOC_ANYWHERE)); | |
186 | ||
187 | /* Initialize the Partition Table with no entries */ | |
188 | memset((void *)partition_tb, 0, patb_size); | |
189 | ||
190 | /* | |
191 | * update partition table control register, | |
192 | * 64 K size. | |
193 | */ | |
194 | ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12); | |
195 | mtspr(SPRN_PTCR, ptcr); | |
196 | powernv_set_nmmu_ptcr(ptcr); | |
197 | } | |
198 | ||
199 | void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, | |
200 | unsigned long dw1) | |
201 | { | |
202 | unsigned long old = be64_to_cpu(partition_tb[lpid].patb0); | |
203 | ||
204 | partition_tb[lpid].patb0 = cpu_to_be64(dw0); | |
205 | partition_tb[lpid].patb1 = cpu_to_be64(dw1); | |
206 | ||
207 | /* | |
208 | * Global flush of TLBs and partition table caches for this lpid. | |
209 | * The type of flush (hash or radix) depends on what the previous | |
210 | * use of this partition ID was, not the new use. | |
211 | */ | |
212 | asm volatile("ptesync" : : : "memory"); | |
213 | if (old & PATB_HR) { | |
214 | asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : : | |
215 | "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); | |
216 | asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : | |
217 | "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); | |
218 | trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1); | |
219 | } else { | |
220 | asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : | |
221 | "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); | |
222 | trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0); | |
223 | } | |
224 | /* do we need fixup here ?*/ | |
225 | asm volatile("eieio; tlbsync; ptesync" : : : "memory"); | |
226 | } | |
227 | EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry); | |
1c7ec8a4 | 228 | |
70234676 AK |
229 | static pte_t *get_pte_from_cache(struct mm_struct *mm) |
230 | { | |
231 | void *pte_frag, *ret; | |
232 | ||
233 | spin_lock(&mm->page_table_lock); | |
234 | ret = mm->context.pte_frag; | |
235 | if (ret) { | |
236 | pte_frag = ret + PTE_FRAG_SIZE; | |
237 | /* | |
238 | * If we have taken up all the fragments mark PTE page NULL | |
239 | */ | |
240 | if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) | |
241 | pte_frag = NULL; | |
242 | mm->context.pte_frag = pte_frag; | |
243 | } | |
244 | spin_unlock(&mm->page_table_lock); | |
245 | return (pte_t *)ret; | |
246 | } | |
247 | ||
248 | static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) | |
249 | { | |
250 | void *ret = NULL; | |
251 | struct page *page; | |
252 | ||
253 | if (!kernel) { | |
254 | page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT); | |
255 | if (!page) | |
256 | return NULL; | |
257 | if (!pgtable_page_ctor(page)) { | |
258 | __free_page(page); | |
259 | return NULL; | |
260 | } | |
261 | } else { | |
262 | page = alloc_page(PGALLOC_GFP); | |
263 | if (!page) | |
264 | return NULL; | |
265 | } | |
266 | ||
1c7ec8a4 | 267 | |
70234676 | 268 | ret = page_address(page); |
1c7ec8a4 AK |
269 | /* |
270 | * if we support only one fragment just return the | |
271 | * allocated page. | |
272 | */ | |
273 | if (PTE_FRAG_NR == 1) | |
274 | return ret; | |
70234676 AK |
275 | spin_lock(&mm->page_table_lock); |
276 | /* | |
277 | * If we find pgtable_page set, we return | |
278 | * the allocated page with single fragement | |
279 | * count. | |
280 | */ | |
281 | if (likely(!mm->context.pte_frag)) { | |
282 | set_page_count(page, PTE_FRAG_NR); | |
283 | mm->context.pte_frag = ret + PTE_FRAG_SIZE; | |
284 | } | |
285 | spin_unlock(&mm->page_table_lock); | |
286 | ||
287 | return (pte_t *)ret; | |
288 | } | |
289 | ||
290 | pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel) | |
291 | { | |
292 | pte_t *pte; | |
293 | ||
294 | pte = get_pte_from_cache(mm); | |
295 | if (pte) | |
296 | return pte; | |
297 | ||
298 | return __alloc_for_ptecache(mm, kernel); | |
299 | } | |
300 | ||
70234676 AK |
301 | void pte_fragment_free(unsigned long *table, int kernel) |
302 | { | |
303 | struct page *page = virt_to_page(table); | |
304 | ||
305 | if (put_page_testzero(page)) { | |
306 | if (!kernel) | |
307 | pgtable_page_dtor(page); | |
308 | free_unref_page(page); | |
309 | } | |
310 | } | |
311 | ||
0c4d2680 AK |
312 | static inline void pgtable_free(void *table, int index) |
313 | { | |
314 | switch (index) { | |
315 | case PTE_INDEX: | |
316 | pte_fragment_free(table, 0); | |
317 | break; | |
318 | case PMD_INDEX: | |
319 | kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), table); | |
320 | break; | |
321 | case PUD_INDEX: | |
322 | kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table); | |
323 | break; | |
324 | /* We don't free pgd table via RCU callback */ | |
325 | default: | |
326 | BUG(); | |
327 | } | |
328 | } | |
329 | ||
70234676 | 330 | #ifdef CONFIG_SMP |
0c4d2680 | 331 | void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) |
70234676 AK |
332 | { |
333 | unsigned long pgf = (unsigned long)table; | |
334 | ||
0c4d2680 AK |
335 | BUG_ON(index > MAX_PGTABLE_INDEX_SIZE); |
336 | pgf |= index; | |
70234676 AK |
337 | tlb_remove_table(tlb, (void *)pgf); |
338 | } | |
339 | ||
340 | void __tlb_remove_table(void *_table) | |
341 | { | |
342 | void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); | |
0c4d2680 | 343 | unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; |
70234676 | 344 | |
0c4d2680 | 345 | return pgtable_free(table, index); |
70234676 AK |
346 | } |
347 | #else | |
0c4d2680 | 348 | void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) |
70234676 | 349 | { |
0c4d2680 AK |
350 | |
351 | return pgtable_free(table, index); | |
70234676 AK |
352 | } |
353 | #endif |