2 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/sched.h>
11 #include <linux/mm_types.h>
12 #include <linux/memblock.h>
13 #include <misc/cxl-base.h>
15 #include <asm/pgalloc.h>
17 #include <asm/trace.h>
18 #include <asm/powernv.h>
21 #include <trace/events/thp.h>
23 unsigned long __pmd_frag_nr;
24 EXPORT_SYMBOL(__pmd_frag_nr);
25 unsigned long __pmd_frag_size_shift;
26 EXPORT_SYMBOL(__pmd_frag_size_shift);
28 int (*register_process_table)(unsigned long base, unsigned long page_size,
29 unsigned long tbl_size);
31 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
33 * This is called when relaxing access to a hugepage. It's also called in the page
34 * fault path when we don't hit any of the major fault cases, ie, a minor
35 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
36 * handled those two for us, we additionally deal with missing execute
37 * permission here on some processors
39 int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
40 pmd_t *pmdp, pmd_t entry, int dirty)
43 #ifdef CONFIG_DEBUG_VM
44 WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
45 assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
47 changed = !pmd_same(*(pmdp), entry);
50 * We can use MMU_PAGE_2M here, because only radix
51 * path look at the psize.
53 __ptep_set_access_flags(vma, pmdp_ptep(pmdp),
54 pmd_pte(entry), address, MMU_PAGE_2M);
59 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
60 unsigned long address, pmd_t *pmdp)
62 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
65 * set a new huge pmd. We should not be called for updating
66 * an existing pmd entry. That should go via pmd_hugepage_update.
68 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
69 pmd_t *pmdp, pmd_t pmd)
71 #ifdef CONFIG_DEBUG_VM
73 * Make sure hardware valid bit is not set. We don't do
74 * tlb flush for this update.
76 WARN_ON(pte_val(pmd_pte(*pmdp)) & _PAGE_PRESENT);
77 assert_spin_locked(pmd_lockptr(mm, pmdp));
78 WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
80 trace_hugepage_set_pmd(addr, pmd_val(pmd));
81 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
84 static void do_nothing(void *unused)
89 * Serialize against find_current_mm_pte which does lock-less
90 * lookup in page tables with local interrupts disabled. For huge pages
91 * it casts pmd_t to pte_t. Since format of pte_t is different from
92 * pmd_t we want to prevent transit from pmd pointing to page table
93 * to pmd pointing to huge page (and back) while interrupts are disabled.
94 * We clear pmd to possibly replace it with page table pointer in
95 * different code paths. So make sure we wait for the parallel
96 * find_current_mm_pte to finish.
98 void serialize_against_pte_lookup(struct mm_struct *mm)
101 smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
105 * We use this to invalidate a pmdp entry before switching from a
106 * hugepte to regular pmd entry.
108 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
111 unsigned long old_pmd;
113 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
114 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
116 * This ensures that generic code that rely on IRQ disabling
117 * to prevent a parallel THP split work as expected.
119 serialize_against_pte_lookup(vma->vm_mm);
120 return __pmd(old_pmd);
123 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
125 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
128 pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
132 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
133 return pmd_set_protbits(__pmd(pmdv), pgprot);
136 pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
138 return pfn_pmd(page_to_pfn(page), pgprot);
141 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
146 pmdv &= _HPAGE_CHG_MASK;
147 return pmd_set_protbits(__pmd(pmdv), newprot);
151 * This is called at the end of handling a user page fault, when the
152 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
153 * We use it to preload an HPTE into the hash table corresponding to
154 * the updated linux HUGE PMD entry.
156 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
160 prefetch((void *)addr);
162 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
164 /* For use by kexec */
165 void mmu_cleanup_all(void)
168 radix__mmu_cleanup_all();
169 else if (mmu_hash_ops.hpte_clear_all)
170 mmu_hash_ops.hpte_clear_all();
173 #ifdef CONFIG_MEMORY_HOTPLUG
174 int __meminit create_section_mapping(unsigned long start, unsigned long end, int nid)
177 return radix__create_section_mapping(start, end, nid);
179 return hash__create_section_mapping(start, end, nid);
182 int __meminit remove_section_mapping(unsigned long start, unsigned long end)
185 return radix__remove_section_mapping(start, end);
187 return hash__remove_section_mapping(start, end);
189 #endif /* CONFIG_MEMORY_HOTPLUG */
191 void __init mmu_partition_table_init(void)
193 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
196 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
197 partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
198 MEMBLOCK_ALLOC_ANYWHERE));
200 /* Initialize the Partition Table with no entries */
201 memset((void *)partition_tb, 0, patb_size);
204 * update partition table control register,
207 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
208 mtspr(SPRN_PTCR, ptcr);
209 powernv_set_nmmu_ptcr(ptcr);
212 void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
215 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
217 partition_tb[lpid].patb0 = cpu_to_be64(dw0);
218 partition_tb[lpid].patb1 = cpu_to_be64(dw1);
221 * Global flush of TLBs and partition table caches for this lpid.
222 * The type of flush (hash or radix) depends on what the previous
223 * use of this partition ID was, not the new use.
225 asm volatile("ptesync" : : : "memory");
227 asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
228 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
229 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
230 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
231 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
233 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
234 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
235 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
237 /* do we need fixup here ?*/
238 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
240 EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
242 static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
244 void *pmd_frag, *ret;
246 spin_lock(&mm->page_table_lock);
247 ret = mm->context.pmd_frag;
249 pmd_frag = ret + PMD_FRAG_SIZE;
251 * If we have taken up all the fragments mark PTE page NULL
253 if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
255 mm->context.pmd_frag = pmd_frag;
257 spin_unlock(&mm->page_table_lock);
261 static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
265 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
268 gfp &= ~__GFP_ACCOUNT;
269 page = alloc_page(gfp);
272 if (!pgtable_pmd_page_ctor(page)) {
273 __free_pages(page, 0);
277 atomic_set(&page->pt_frag_refcount, 1);
279 ret = page_address(page);
281 * if we support only one fragment just return the
284 if (PMD_FRAG_NR == 1)
287 spin_lock(&mm->page_table_lock);
289 * If we find pgtable_page set, we return
290 * the allocated page with single fragement
293 if (likely(!mm->context.pmd_frag)) {
294 atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
295 mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
297 spin_unlock(&mm->page_table_lock);
302 pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
306 pmd = get_pmd_from_cache(mm);
310 return __alloc_for_pmdcache(mm);
313 void pmd_fragment_free(unsigned long *pmd)
315 struct page *page = virt_to_page(pmd);
317 BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
318 if (atomic_dec_and_test(&page->pt_frag_refcount)) {
319 pgtable_pmd_page_dtor(page);
324 static pte_t *get_pte_from_cache(struct mm_struct *mm)
326 void *pte_frag, *ret;
328 spin_lock(&mm->page_table_lock);
329 ret = mm->context.pte_frag;
331 pte_frag = ret + PTE_FRAG_SIZE;
333 * If we have taken up all the fragments mark PTE page NULL
335 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
337 mm->context.pte_frag = pte_frag;
339 spin_unlock(&mm->page_table_lock);
343 static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
349 page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
352 if (!pgtable_page_ctor(page)) {
357 page = alloc_page(PGALLOC_GFP);
362 atomic_set(&page->pt_frag_refcount, 1);
364 ret = page_address(page);
366 * if we support only one fragment just return the
369 if (PTE_FRAG_NR == 1)
371 spin_lock(&mm->page_table_lock);
373 * If we find pgtable_page set, we return
374 * the allocated page with single fragement
377 if (likely(!mm->context.pte_frag)) {
378 atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
379 mm->context.pte_frag = ret + PTE_FRAG_SIZE;
381 spin_unlock(&mm->page_table_lock);
386 pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
390 pte = get_pte_from_cache(mm);
394 return __alloc_for_ptecache(mm, kernel);
397 void pte_fragment_free(unsigned long *table, int kernel)
399 struct page *page = virt_to_page(table);
401 BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
402 if (atomic_dec_and_test(&page->pt_frag_refcount)) {
404 pgtable_page_dtor(page);
409 static inline void pgtable_free(void *table, int index)
413 pte_fragment_free(table, 0);
416 pmd_fragment_free(table);
419 kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
421 #if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
422 /* 16M hugepd directory at pud level */
424 BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
425 kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
427 /* 16G hugepd directory at the pgd level */
429 BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
430 kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
433 /* We don't free pgd table via RCU callback */
440 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
442 unsigned long pgf = (unsigned long)table;
444 BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
446 tlb_remove_table(tlb, (void *)pgf);
449 void __tlb_remove_table(void *_table)
451 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
452 unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
454 return pgtable_free(table, index);
457 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
459 return pgtable_free(table, index);
463 #ifdef CONFIG_PROC_FS
464 atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
466 void arch_report_meminfo(struct seq_file *m)
469 * Hash maps the memory with one size mmu_linear_psize.
470 * So don't bother to print these on hash
472 if (!radix_enabled())
474 seq_printf(m, "DirectMap4k: %8lu kB\n",
475 atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
476 seq_printf(m, "DirectMap64k: %8lu kB\n",
477 atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
478 seq_printf(m, "DirectMap2M: %8lu kB\n",
479 atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
480 seq_printf(m, "DirectMap1G: %8lu kB\n",
481 atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
483 #endif /* CONFIG_PROC_FS */