Commit | Line | Data |
---|---|---|
3df33f12 AK |
1 | /* |
2 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | */ | |
9 | ||
10 | #include <linux/sched.h> | |
589ee628 | 11 | #include <linux/mm_types.h> |
59879d54 | 12 | #include <linux/memblock.h> |
fa4531f7 | 13 | #include <misc/cxl-base.h> |
589ee628 | 14 | |
3df33f12 AK |
15 | #include <asm/pgalloc.h> |
16 | #include <asm/tlb.h> | |
59879d54 AK |
17 | #include <asm/trace.h> |
18 | #include <asm/powernv.h> | |
3df33f12 AK |
19 | |
20 | #include "mmu_decl.h" | |
21 | #include <trace/events/thp.h> | |
22 | ||
8a6c697b AK |
23 | unsigned long __pmd_frag_nr; |
24 | EXPORT_SYMBOL(__pmd_frag_nr); | |
25 | unsigned long __pmd_frag_size_shift; | |
26 | EXPORT_SYMBOL(__pmd_frag_size_shift); | |
27 | ||
eea8148c ME |
28 | int (*register_process_table)(unsigned long base, unsigned long page_size, |
29 | unsigned long tbl_size); | |
30 | ||
3df33f12 AK |
31 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
32 | /* | |
33 | * This is called when relaxing access to a hugepage. It's also called in the page | |
34 | * fault path when we don't hit any of the major fault cases, ie, a minor | |
35 | * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have | |
36 | * handled those two for us, we additionally deal with missing execute | |
37 | * permission here on some processors | |
38 | */ | |
39 | int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, | |
40 | pmd_t *pmdp, pmd_t entry, int dirty) | |
41 | { | |
42 | int changed; | |
43 | #ifdef CONFIG_DEBUG_VM | |
ebd31197 | 44 | WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); |
af60a4cf | 45 | assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp)); |
3df33f12 AK |
46 | #endif |
47 | changed = !pmd_same(*(pmdp), entry); | |
48 | if (changed) { | |
b3603e17 AK |
49 | __ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp), |
50 | pmd_pte(entry), address); | |
d8e91e93 | 51 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
3df33f12 AK |
52 | } |
53 | return changed; | |
54 | } | |
55 | ||
56 | int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
57 | unsigned long address, pmd_t *pmdp) | |
58 | { | |
59 | return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); | |
60 | } | |
61 | /* | |
62 | * set a new huge pmd. We should not be called for updating | |
63 | * an existing pmd entry. That should go via pmd_hugepage_update. | |
64 | */ | |
65 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |
66 | pmd_t *pmdp, pmd_t pmd) | |
67 | { | |
68 | #ifdef CONFIG_DEBUG_VM | |
69 | WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); | |
af60a4cf | 70 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
ebd31197 | 71 | WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd))); |
3df33f12 AK |
72 | #endif |
73 | trace_hugepage_set_pmd(addr, pmd_val(pmd)); | |
74 | return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); | |
75 | } | |
fa4531f7 AK |
76 | |
77 | static void do_nothing(void *unused) | |
78 | { | |
79 | ||
80 | } | |
81 | /* | |
82 | * Serialize against find_current_mm_pte which does lock-less | |
83 | * lookup in page tables with local interrupts disabled. For huge pages | |
84 | * it casts pmd_t to pte_t. Since format of pte_t is different from | |
85 | * pmd_t we want to prevent transit from pmd pointing to page table | |
86 | * to pmd pointing to huge page (and back) while interrupts are disabled. | |
87 | * We clear pmd to possibly replace it with page table pointer in | |
88 | * different code paths. So make sure we wait for the parallel | |
89 | * find_current_mm_pte to finish. | |
90 | */ | |
91 | void serialize_against_pte_lookup(struct mm_struct *mm) | |
92 | { | |
93 | smp_mb(); | |
0f4bc093 | 94 | smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1); |
fa4531f7 AK |
95 | } |
96 | ||
3df33f12 AK |
97 | /* |
98 | * We use this to invalidate a pmdp entry before switching from a | |
99 | * hugepte to regular pmd entry. | |
100 | */ | |
8cc931e0 | 101 | pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
3df33f12 AK |
102 | pmd_t *pmdp) |
103 | { | |
8cc931e0 AK |
104 | unsigned long old_pmd; |
105 | ||
106 | old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); | |
d8e91e93 | 107 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
3df33f12 AK |
108 | /* |
109 | * This ensures that generic code that rely on IRQ disabling | |
110 | * to prevent a parallel THP split work as expected. | |
111 | */ | |
fa4531f7 | 112 | serialize_against_pte_lookup(vma->vm_mm); |
8cc931e0 | 113 | return __pmd(old_pmd); |
3df33f12 AK |
114 | } |
115 | ||
116 | static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) | |
117 | { | |
118 | return __pmd(pmd_val(pmd) | pgprot_val(pgprot)); | |
119 | } | |
120 | ||
121 | pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) | |
122 | { | |
123 | unsigned long pmdv; | |
124 | ||
125 | pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK; | |
126 | return pmd_set_protbits(__pmd(pmdv), pgprot); | |
127 | } | |
128 | ||
129 | pmd_t mk_pmd(struct page *page, pgprot_t pgprot) | |
130 | { | |
131 | return pfn_pmd(page_to_pfn(page), pgprot); | |
132 | } | |
133 | ||
134 | pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | |
135 | { | |
136 | unsigned long pmdv; | |
137 | ||
138 | pmdv = pmd_val(pmd); | |
139 | pmdv &= _HPAGE_CHG_MASK; | |
140 | return pmd_set_protbits(__pmd(pmdv), newprot); | |
141 | } | |
142 | ||
143 | /* | |
144 | * This is called at the end of handling a user page fault, when the | |
145 | * fault has been handled by updating a HUGE PMD entry in the linux page tables. | |
146 | * We use it to preload an HPTE into the hash table corresponding to | |
147 | * the updated linux HUGE PMD entry. | |
148 | */ | |
149 | void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | |
150 | pmd_t *pmd) | |
151 | { | |
152 | return; | |
153 | } | |
154 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
fe036a06 BH |
155 | |
156 | /* For use by kexec */ | |
157 | void mmu_cleanup_all(void) | |
158 | { | |
159 | if (radix_enabled()) | |
160 | radix__mmu_cleanup_all(); | |
161 | else if (mmu_hash_ops.hpte_clear_all) | |
162 | mmu_hash_ops.hpte_clear_all(); | |
163 | } | |
32b53c01 RA |
164 | |
165 | #ifdef CONFIG_MEMORY_HOTPLUG | |
f437c517 | 166 | int __meminit create_section_mapping(unsigned long start, unsigned long end, int nid) |
32b53c01 RA |
167 | { |
168 | if (radix_enabled()) | |
29ab6c47 | 169 | return radix__create_section_mapping(start, end, nid); |
32b53c01 | 170 | |
29ab6c47 | 171 | return hash__create_section_mapping(start, end, nid); |
32b53c01 RA |
172 | } |
173 | ||
bde709a7 | 174 | int __meminit remove_section_mapping(unsigned long start, unsigned long end) |
32b53c01 RA |
175 | { |
176 | if (radix_enabled()) | |
4b5d62ca | 177 | return radix__remove_section_mapping(start, end); |
32b53c01 RA |
178 | |
179 | return hash__remove_section_mapping(start, end); | |
180 | } | |
181 | #endif /* CONFIG_MEMORY_HOTPLUG */ | |
59879d54 AK |
182 | |
183 | void __init mmu_partition_table_init(void) | |
184 | { | |
185 | unsigned long patb_size = 1UL << PATB_SIZE_SHIFT; | |
186 | unsigned long ptcr; | |
187 | ||
188 | BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large."); | |
189 | partition_tb = __va(memblock_alloc_base(patb_size, patb_size, | |
190 | MEMBLOCK_ALLOC_ANYWHERE)); | |
191 | ||
192 | /* Initialize the Partition Table with no entries */ | |
193 | memset((void *)partition_tb, 0, patb_size); | |
194 | ||
195 | /* | |
196 | * update partition table control register, | |
197 | * 64 K size. | |
198 | */ | |
199 | ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12); | |
200 | mtspr(SPRN_PTCR, ptcr); | |
201 | powernv_set_nmmu_ptcr(ptcr); | |
202 | } | |
203 | ||
204 | void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, | |
205 | unsigned long dw1) | |
206 | { | |
207 | unsigned long old = be64_to_cpu(partition_tb[lpid].patb0); | |
208 | ||
209 | partition_tb[lpid].patb0 = cpu_to_be64(dw0); | |
210 | partition_tb[lpid].patb1 = cpu_to_be64(dw1); | |
211 | ||
212 | /* | |
213 | * Global flush of TLBs and partition table caches for this lpid. | |
214 | * The type of flush (hash or radix) depends on what the previous | |
215 | * use of this partition ID was, not the new use. | |
216 | */ | |
217 | asm volatile("ptesync" : : : "memory"); | |
218 | if (old & PATB_HR) { | |
219 | asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : : | |
220 | "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); | |
221 | asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : | |
222 | "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); | |
223 | trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1); | |
224 | } else { | |
225 | asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : | |
226 | "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); | |
227 | trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0); | |
228 | } | |
229 | /* do we need fixup here ?*/ | |
230 | asm volatile("eieio; tlbsync; ptesync" : : : "memory"); | |
231 | } | |
232 | EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry); | |
1c7ec8a4 | 233 | |
8a6c697b AK |
234 | static pmd_t *get_pmd_from_cache(struct mm_struct *mm) |
235 | { | |
236 | void *pmd_frag, *ret; | |
237 | ||
238 | spin_lock(&mm->page_table_lock); | |
239 | ret = mm->context.pmd_frag; | |
240 | if (ret) { | |
241 | pmd_frag = ret + PMD_FRAG_SIZE; | |
242 | /* | |
243 | * If we have taken up all the fragments mark PTE page NULL | |
244 | */ | |
245 | if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0) | |
246 | pmd_frag = NULL; | |
247 | mm->context.pmd_frag = pmd_frag; | |
248 | } | |
249 | spin_unlock(&mm->page_table_lock); | |
250 | return (pmd_t *)ret; | |
251 | } | |
252 | ||
253 | static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm) | |
254 | { | |
255 | void *ret = NULL; | |
256 | struct page *page; | |
257 | gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO; | |
258 | ||
259 | if (mm == &init_mm) | |
260 | gfp &= ~__GFP_ACCOUNT; | |
261 | page = alloc_page(gfp); | |
262 | if (!page) | |
263 | return NULL; | |
264 | if (!pgtable_pmd_page_ctor(page)) { | |
265 | __free_pages(page, 0); | |
266 | return NULL; | |
267 | } | |
268 | ||
269 | ret = page_address(page); | |
270 | /* | |
271 | * if we support only one fragment just return the | |
272 | * allocated page. | |
273 | */ | |
274 | if (PMD_FRAG_NR == 1) | |
275 | return ret; | |
276 | ||
277 | spin_lock(&mm->page_table_lock); | |
278 | /* | |
279 | * If we find pgtable_page set, we return | |
280 | * the allocated page with single fragement | |
281 | * count. | |
282 | */ | |
283 | if (likely(!mm->context.pmd_frag)) { | |
284 | set_page_count(page, PMD_FRAG_NR); | |
285 | mm->context.pmd_frag = ret + PMD_FRAG_SIZE; | |
286 | } | |
287 | spin_unlock(&mm->page_table_lock); | |
288 | ||
289 | return (pmd_t *)ret; | |
290 | } | |
291 | ||
292 | pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr) | |
293 | { | |
294 | pmd_t *pmd; | |
295 | ||
296 | pmd = get_pmd_from_cache(mm); | |
297 | if (pmd) | |
298 | return pmd; | |
299 | ||
300 | return __alloc_for_pmdcache(mm); | |
301 | } | |
302 | ||
303 | void pmd_fragment_free(unsigned long *pmd) | |
304 | { | |
305 | struct page *page = virt_to_page(pmd); | |
306 | ||
307 | if (put_page_testzero(page)) { | |
308 | pgtable_pmd_page_dtor(page); | |
309 | free_unref_page(page); | |
310 | } | |
311 | } | |
312 | ||
70234676 AK |
313 | static pte_t *get_pte_from_cache(struct mm_struct *mm) |
314 | { | |
315 | void *pte_frag, *ret; | |
316 | ||
317 | spin_lock(&mm->page_table_lock); | |
318 | ret = mm->context.pte_frag; | |
319 | if (ret) { | |
320 | pte_frag = ret + PTE_FRAG_SIZE; | |
321 | /* | |
322 | * If we have taken up all the fragments mark PTE page NULL | |
323 | */ | |
324 | if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) | |
325 | pte_frag = NULL; | |
326 | mm->context.pte_frag = pte_frag; | |
327 | } | |
328 | spin_unlock(&mm->page_table_lock); | |
329 | return (pte_t *)ret; | |
330 | } | |
331 | ||
332 | static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel) | |
333 | { | |
334 | void *ret = NULL; | |
335 | struct page *page; | |
336 | ||
337 | if (!kernel) { | |
338 | page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT); | |
339 | if (!page) | |
340 | return NULL; | |
341 | if (!pgtable_page_ctor(page)) { | |
342 | __free_page(page); | |
343 | return NULL; | |
344 | } | |
345 | } else { | |
346 | page = alloc_page(PGALLOC_GFP); | |
347 | if (!page) | |
348 | return NULL; | |
349 | } | |
350 | ||
1c7ec8a4 | 351 | |
70234676 | 352 | ret = page_address(page); |
1c7ec8a4 AK |
353 | /* |
354 | * if we support only one fragment just return the | |
355 | * allocated page. | |
356 | */ | |
357 | if (PTE_FRAG_NR == 1) | |
358 | return ret; | |
70234676 AK |
359 | spin_lock(&mm->page_table_lock); |
360 | /* | |
361 | * If we find pgtable_page set, we return | |
362 | * the allocated page with single fragement | |
363 | * count. | |
364 | */ | |
365 | if (likely(!mm->context.pte_frag)) { | |
366 | set_page_count(page, PTE_FRAG_NR); | |
367 | mm->context.pte_frag = ret + PTE_FRAG_SIZE; | |
368 | } | |
369 | spin_unlock(&mm->page_table_lock); | |
370 | ||
371 | return (pte_t *)ret; | |
372 | } | |
373 | ||
374 | pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel) | |
375 | { | |
376 | pte_t *pte; | |
377 | ||
378 | pte = get_pte_from_cache(mm); | |
379 | if (pte) | |
380 | return pte; | |
381 | ||
382 | return __alloc_for_ptecache(mm, kernel); | |
383 | } | |
384 | ||
70234676 AK |
385 | void pte_fragment_free(unsigned long *table, int kernel) |
386 | { | |
387 | struct page *page = virt_to_page(table); | |
388 | ||
389 | if (put_page_testzero(page)) { | |
390 | if (!kernel) | |
391 | pgtable_page_dtor(page); | |
392 | free_unref_page(page); | |
393 | } | |
394 | } | |
395 | ||
0c4d2680 AK |
396 | static inline void pgtable_free(void *table, int index) |
397 | { | |
398 | switch (index) { | |
399 | case PTE_INDEX: | |
400 | pte_fragment_free(table, 0); | |
401 | break; | |
402 | case PMD_INDEX: | |
738f9645 | 403 | pmd_fragment_free(table); |
0c4d2680 AK |
404 | break; |
405 | case PUD_INDEX: | |
406 | kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table); | |
407 | break; | |
408 | /* We don't free pgd table via RCU callback */ | |
409 | default: | |
410 | BUG(); | |
411 | } | |
412 | } | |
413 | ||
70234676 | 414 | #ifdef CONFIG_SMP |
0c4d2680 | 415 | void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) |
70234676 AK |
416 | { |
417 | unsigned long pgf = (unsigned long)table; | |
418 | ||
0c4d2680 AK |
419 | BUG_ON(index > MAX_PGTABLE_INDEX_SIZE); |
420 | pgf |= index; | |
70234676 AK |
421 | tlb_remove_table(tlb, (void *)pgf); |
422 | } | |
423 | ||
424 | void __tlb_remove_table(void *_table) | |
425 | { | |
426 | void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); | |
0c4d2680 | 427 | unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; |
70234676 | 428 | |
0c4d2680 | 429 | return pgtable_free(table, index); |
70234676 AK |
430 | } |
431 | #else | |
0c4d2680 | 432 | void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) |
70234676 | 433 | { |
0c4d2680 | 434 | return pgtable_free(table, index); |
70234676 AK |
435 | } |
436 | #endif |