Commit | Line | Data |
---|---|---|
3df33f12 AK |
1 | /* |
2 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | */ | |
9 | ||
10 | #include <linux/sched.h> | |
589ee628 | 11 | #include <linux/mm_types.h> |
59879d54 | 12 | #include <linux/memblock.h> |
fa4531f7 | 13 | #include <misc/cxl-base.h> |
589ee628 | 14 | |
3df33f12 AK |
15 | #include <asm/pgalloc.h> |
16 | #include <asm/tlb.h> | |
59879d54 AK |
17 | #include <asm/trace.h> |
18 | #include <asm/powernv.h> | |
3df33f12 AK |
19 | |
20 | #include "mmu_decl.h" | |
21 | #include <trace/events/thp.h> | |
22 | ||
8a6c697b AK |
23 | unsigned long __pmd_frag_nr; |
24 | EXPORT_SYMBOL(__pmd_frag_nr); | |
25 | unsigned long __pmd_frag_size_shift; | |
26 | EXPORT_SYMBOL(__pmd_frag_size_shift); | |
27 | ||
eea8148c ME |
28 | int (*register_process_table)(unsigned long base, unsigned long page_size, |
29 | unsigned long tbl_size); | |
30 | ||
3df33f12 AK |
31 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
32 | /* | |
33 | * This is called when relaxing access to a hugepage. It's also called in the page | |
34 | * fault path when we don't hit any of the major fault cases, ie, a minor | |
35 | * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have | |
36 | * handled those two for us, we additionally deal with missing execute | |
37 | * permission here on some processors | |
38 | */ | |
39 | int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, | |
40 | pmd_t *pmdp, pmd_t entry, int dirty) | |
41 | { | |
42 | int changed; | |
43 | #ifdef CONFIG_DEBUG_VM | |
ebd31197 | 44 | WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); |
af60a4cf | 45 | assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp)); |
3df33f12 AK |
46 | #endif |
47 | changed = !pmd_same(*(pmdp), entry); | |
48 | if (changed) { | |
e4c1112c AK |
49 | /* |
50 | * We can use MMU_PAGE_2M here, because only radix | |
51 | * path look at the psize. | |
52 | */ | |
53 | __ptep_set_access_flags(vma, pmdp_ptep(pmdp), | |
54 | pmd_pte(entry), address, MMU_PAGE_2M); | |
3df33f12 AK |
55 | } |
56 | return changed; | |
57 | } | |
58 | ||
59 | int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
60 | unsigned long address, pmd_t *pmdp) | |
61 | { | |
62 | return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); | |
63 | } | |
64 | /* | |
65 | * set a new huge pmd. We should not be called for updating | |
66 | * an existing pmd entry. That should go via pmd_hugepage_update. | |
67 | */ | |
68 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |
69 | pmd_t *pmdp, pmd_t pmd) | |
70 | { | |
71 | #ifdef CONFIG_DEBUG_VM | |
72 | WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); | |
af60a4cf | 73 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
ebd31197 | 74 | WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd))); |
3df33f12 AK |
75 | #endif |
76 | trace_hugepage_set_pmd(addr, pmd_val(pmd)); | |
77 | return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); | |
78 | } | |
fa4531f7 AK |
79 | |
80 | static void do_nothing(void *unused) | |
81 | { | |
82 | ||
83 | } | |
84 | /* | |
85 | * Serialize against find_current_mm_pte which does lock-less | |
86 | * lookup in page tables with local interrupts disabled. For huge pages | |
87 | * it casts pmd_t to pte_t. Since format of pte_t is different from | |
88 | * pmd_t we want to prevent transit from pmd pointing to page table | |
89 | * to pmd pointing to huge page (and back) while interrupts are disabled. | |
90 | * We clear pmd to possibly replace it with page table pointer in | |
91 | * different code paths. So make sure we wait for the parallel | |
92 | * find_current_mm_pte to finish. | |
93 | */ | |
94 | void serialize_against_pte_lookup(struct mm_struct *mm) | |
95 | { | |
96 | smp_mb(); | |
0f4bc093 | 97 | smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1); |
fa4531f7 AK |
98 | } |
99 | ||
3df33f12 AK |
100 | /* |
101 | * We use this to invalidate a pmdp entry before switching from a | |
102 | * hugepte to regular pmd entry. | |
103 | */ | |
8cc931e0 | 104 | pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
3df33f12 AK |
105 | pmd_t *pmdp) |
106 | { | |
8cc931e0 AK |
107 | unsigned long old_pmd; |
108 | ||
109 | old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); | |
d8e91e93 | 110 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
3df33f12 AK |
111 | /* |
112 | * This ensures that generic code that rely on IRQ disabling | |
113 | * to prevent a parallel THP split work as expected. | |
114 | */ | |
fa4531f7 | 115 | serialize_against_pte_lookup(vma->vm_mm); |
8cc931e0 | 116 | return __pmd(old_pmd); |
3df33f12 AK |
117 | } |
118 | ||
119 | static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) | |
120 | { | |
121 | return __pmd(pmd_val(pmd) | pgprot_val(pgprot)); | |
122 | } | |
123 | ||
124 | pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) | |
125 | { | |
126 | unsigned long pmdv; | |
127 | ||
128 | pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK; | |
129 | return pmd_set_protbits(__pmd(pmdv), pgprot); | |
130 | } | |
131 | ||
132 | pmd_t mk_pmd(struct page *page, pgprot_t pgprot) | |
133 | { | |
134 | return pfn_pmd(page_to_pfn(page), pgprot); | |
135 | } | |
136 | ||
137 | pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | |
138 | { | |
139 | unsigned long pmdv; | |
140 | ||
141 | pmdv = pmd_val(pmd); | |
142 | pmdv &= _HPAGE_CHG_MASK; | |
143 | return pmd_set_protbits(__pmd(pmdv), newprot); | |
144 | } | |
145 | ||
146 | /* | |
147 | * This is called at the end of handling a user page fault, when the | |
148 | * fault has been handled by updating a HUGE PMD entry in the linux page tables. | |
149 | * We use it to preload an HPTE into the hash table corresponding to | |
150 | * the updated linux HUGE PMD entry. | |
151 | */ | |
152 | void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | |
153 | pmd_t *pmd) | |
154 | { | |
155 | return; | |
156 | } | |
157 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
fe036a06 BH |
158 | |
159 | /* For use by kexec */ | |
160 | void mmu_cleanup_all(void) | |
161 | { | |
162 | if (radix_enabled()) | |
163 | radix__mmu_cleanup_all(); | |
164 | else if (mmu_hash_ops.hpte_clear_all) | |
165 | mmu_hash_ops.hpte_clear_all(); | |
166 | } | |
32b53c01 RA |
167 | |
168 | #ifdef CONFIG_MEMORY_HOTPLUG | |
f437c517 | 169 | int __meminit create_section_mapping(unsigned long start, unsigned long end, int nid) |
32b53c01 RA |
170 | { |
171 | if (radix_enabled()) | |
29ab6c47 | 172 | return radix__create_section_mapping(start, end, nid); |
32b53c01 | 173 | |
29ab6c47 | 174 | return hash__create_section_mapping(start, end, nid); |
32b53c01 RA |
175 | } |
176 | ||
bde709a7 | 177 | int __meminit remove_section_mapping(unsigned long start, unsigned long end) |
32b53c01 RA |
178 | { |
179 | if (radix_enabled()) | |
4b5d62ca | 180 | return radix__remove_section_mapping(start, end); |
32b53c01 RA |
181 | |
182 | return hash__remove_section_mapping(start, end); | |
183 | } | |
184 | #endif /* CONFIG_MEMORY_HOTPLUG */ | |
59879d54 AK |
185 | |
186 | void __init mmu_partition_table_init(void) | |
187 | { | |
188 | unsigned long patb_size = 1UL << PATB_SIZE_SHIFT; | |
189 | unsigned long ptcr; | |
190 | ||
191 | BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large."); | |
192 | partition_tb = __va(memblock_alloc_base(patb_size, patb_size, | |
193 | MEMBLOCK_ALLOC_ANYWHERE)); | |
194 | ||
195 | /* Initialize the Partition Table with no entries */ | |
196 | memset((void *)partition_tb, 0, patb_size); | |
197 | ||
198 | /* | |
199 | * update partition table control register, | |
200 | * 64 K size. | |
201 | */ | |
202 | ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12); | |
203 | mtspr(SPRN_PTCR, ptcr); | |
204 | powernv_set_nmmu_ptcr(ptcr); | |
205 | } | |
206 | ||
207 | void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, | |
208 | unsigned long dw1) | |
209 | { | |
210 | unsigned long old = be64_to_cpu(partition_tb[lpid].patb0); | |
211 | ||
212 | partition_tb[lpid].patb0 = cpu_to_be64(dw0); | |
213 | partition_tb[lpid].patb1 = cpu_to_be64(dw1); | |
214 | ||
215 | /* | |
216 | * Global flush of TLBs and partition table caches for this lpid. | |
217 | * The type of flush (hash or radix) depends on what the previous | |
218 | * use of this partition ID was, not the new use. | |
219 | */ | |
220 | asm volatile("ptesync" : : : "memory"); | |
221 | if (old & PATB_HR) { | |
222 | asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : : | |
223 | "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); | |
224 | asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : | |
225 | "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); | |
226 | trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1); | |
227 | } else { | |
228 | asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : | |
229 | "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); | |
230 | trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0); | |
231 | } | |
232 | /* do we need fixup here ?*/ | |
233 | asm volatile("eieio; tlbsync; ptesync" : : : "memory"); | |
234 | } | |
235 | EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry); | |
1c7ec8a4 | 236 | |
8a6c697b AK |
237 | static pmd_t *get_pmd_from_cache(struct mm_struct *mm) |
238 | { | |
239 | void *pmd_frag, *ret; | |
240 | ||
241 | spin_lock(&mm->page_table_lock); | |
242 | ret = mm->context.pmd_frag; | |
243 | if (ret) { | |
244 | pmd_frag = ret + PMD_FRAG_SIZE; | |
245 | /* | |
246 | * If we have taken up all the fragments mark PTE page NULL | |
247 | */ | |
248 | if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0) | |
249 | pmd_frag = NULL; | |
250 | mm->context.pmd_frag = pmd_frag; | |
251 | } | |
252 | spin_unlock(&mm->page_table_lock); | |
253 | return (pmd_t *)ret; | |
254 | } | |
255 | ||
256 | static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm) | |
257 | { | |
258 | void *ret = NULL; | |
259 | struct page *page; | |
260 | gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO; | |
261 | ||
262 | if (mm == &init_mm) | |
263 | gfp &= ~__GFP_ACCOUNT; | |
264 | page = alloc_page(gfp); | |
265 | if (!page) | |
266 | return NULL; | |
267 | if (!pgtable_pmd_page_ctor(page)) { | |
268 | __free_pages(page, 0); | |
269 | return NULL; | |
270 | } | |
271 | ||
272 | ret = page_address(page); | |
273 | /* | |
274 | * if we support only one fragment just return the | |
275 | * allocated page. | |
276 | */ | |
277 | if (PMD_FRAG_NR == 1) | |
278 | return ret; | |
279 | ||
280 | spin_lock(&mm->page_table_lock); | |
281 | /* | |
282 | * If we find pgtable_page set, we return | |
283 | * the allocated page with single fragement | |
284 | * count. | |
285 | */ | |
286 | if (likely(!mm->context.pmd_frag)) { | |
287 | set_page_count(page, PMD_FRAG_NR); | |
288 | mm->context.pmd_frag = ret + PMD_FRAG_SIZE; | |
289 | } | |
290 | spin_unlock(&mm->page_table_lock); | |
291 | ||
292 | return (pmd_t *)ret; | |
293 | } | |
294 | ||
295 | pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr) | |
296 | { | |
297 | pmd_t *pmd; | |
298 | ||
299 | pmd = get_pmd_from_cache(mm); | |
300 | if (pmd) | |
301 | return pmd; | |
302 | ||
303 | return __alloc_for_pmdcache(mm); | |
304 | } | |
305 | ||
306 | void pmd_fragment_free(unsigned long *pmd) | |
307 | { | |
308 | struct page *page = virt_to_page(pmd); | |
309 | ||
310 | if (put_page_testzero(page)) { | |
311 | pgtable_pmd_page_dtor(page); | |
312 | free_unref_page(page); | |
313 | } | |
314 | } | |
315 | ||
70234676 AK |
316 | static pte_t *get_pte_from_cache(struct mm_struct *mm) |
317 | { | |
318 | void *pte_frag, *ret; | |
319 | ||
320 | spin_lock(&mm->page_table_lock); | |
321 | ret = mm->context.pte_frag; | |
322 | if (ret) { | |
323 | pte_frag = ret + PTE_FRAG_SIZE; | |
324 | /* | |
325 | * If we have taken up all the fragments mark PTE page NULL | |
326 | */ | |
327 | if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) | |
328 | pte_frag = NULL; | |
329 | mm->context.pte_frag = pte_frag; | |
330 | } | |
331 | spin_unlock(&mm->page_table_lock); | |
332 | return (pte_t *)ret; | |
333 | } | |
334 | ||
335 | static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) | |
336 | { | |
337 | void *ret = NULL; | |
338 | struct page *page; | |
339 | ||
340 | if (!kernel) { | |
341 | page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT); | |
342 | if (!page) | |
343 | return NULL; | |
344 | if (!pgtable_page_ctor(page)) { | |
345 | __free_page(page); | |
346 | return NULL; | |
347 | } | |
348 | } else { | |
349 | page = alloc_page(PGALLOC_GFP); | |
350 | if (!page) | |
351 | return NULL; | |
352 | } | |
353 | ||
1c7ec8a4 | 354 | |
70234676 | 355 | ret = page_address(page); |
1c7ec8a4 AK |
356 | /* |
357 | * if we support only one fragment just return the | |
358 | * allocated page. | |
359 | */ | |
360 | if (PTE_FRAG_NR == 1) | |
361 | return ret; | |
70234676 AK |
362 | spin_lock(&mm->page_table_lock); |
363 | /* | |
364 | * If we find pgtable_page set, we return | |
365 | * the allocated page with single fragement | |
366 | * count. | |
367 | */ | |
368 | if (likely(!mm->context.pte_frag)) { | |
369 | set_page_count(page, PTE_FRAG_NR); | |
370 | mm->context.pte_frag = ret + PTE_FRAG_SIZE; | |
371 | } | |
372 | spin_unlock(&mm->page_table_lock); | |
373 | ||
374 | return (pte_t *)ret; | |
375 | } | |
376 | ||
377 | pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel) | |
378 | { | |
379 | pte_t *pte; | |
380 | ||
381 | pte = get_pte_from_cache(mm); | |
382 | if (pte) | |
383 | return pte; | |
384 | ||
385 | return __alloc_for_ptecache(mm, kernel); | |
386 | } | |
387 | ||
70234676 AK |
388 | void pte_fragment_free(unsigned long *table, int kernel) |
389 | { | |
390 | struct page *page = virt_to_page(table); | |
391 | ||
392 | if (put_page_testzero(page)) { | |
393 | if (!kernel) | |
394 | pgtable_page_dtor(page); | |
395 | free_unref_page(page); | |
396 | } | |
397 | } | |
398 | ||
0c4d2680 AK |
399 | static inline void pgtable_free(void *table, int index) |
400 | { | |
401 | switch (index) { | |
402 | case PTE_INDEX: | |
403 | pte_fragment_free(table, 0); | |
404 | break; | |
405 | case PMD_INDEX: | |
738f9645 | 406 | pmd_fragment_free(table); |
0c4d2680 AK |
407 | break; |
408 | case PUD_INDEX: | |
409 | kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table); | |
410 | break; | |
411 | /* We don't free pgd table via RCU callback */ | |
412 | default: | |
413 | BUG(); | |
414 | } | |
415 | } | |
416 | ||
70234676 | 417 | #ifdef CONFIG_SMP |
0c4d2680 | 418 | void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) |
70234676 AK |
419 | { |
420 | unsigned long pgf = (unsigned long)table; | |
421 | ||
0c4d2680 AK |
422 | BUG_ON(index > MAX_PGTABLE_INDEX_SIZE); |
423 | pgf |= index; | |
70234676 AK |
424 | tlb_remove_table(tlb, (void *)pgf); |
425 | } | |
426 | ||
427 | void __tlb_remove_table(void *_table) | |
428 | { | |
429 | void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); | |
0c4d2680 | 430 | unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; |
70234676 | 431 | |
0c4d2680 | 432 | return pgtable_free(table, index); |
70234676 AK |
433 | } |
434 | #else | |
0c4d2680 | 435 | void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) |
70234676 | 436 | { |
0c4d2680 | 437 | return pgtable_free(table, index); |
70234676 AK |
438 | } |
439 | #endif |