powerpc/radix: Fix kernel crash with mremap()
[linux-2.6-block.git] / arch / powerpc / mm / pgtable-book3s64.c
CommitLineData
3df33f12
AK
1/*
2 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/sched.h>
589ee628 11#include <linux/mm_types.h>
59879d54 12#include <linux/memblock.h>
fa4531f7 13#include <misc/cxl-base.h>
589ee628 14
3df33f12
AK
15#include <asm/pgalloc.h>
16#include <asm/tlb.h>
59879d54
AK
17#include <asm/trace.h>
18#include <asm/powernv.h>
3df33f12
AK
19
20#include "mmu_decl.h"
21#include <trace/events/thp.h>
22
8a6c697b
AK
23unsigned long __pmd_frag_nr;
24EXPORT_SYMBOL(__pmd_frag_nr);
25unsigned long __pmd_frag_size_shift;
26EXPORT_SYMBOL(__pmd_frag_size_shift);
27
eea8148c
ME
28int (*register_process_table)(unsigned long base, unsigned long page_size,
29 unsigned long tbl_size);
30
3df33f12
AK
31#ifdef CONFIG_TRANSPARENT_HUGEPAGE
32/*
33 * This is called when relaxing access to a hugepage. It's also called in the page
34 * fault path when we don't hit any of the major fault cases, ie, a minor
35 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
36 * handled those two for us, we additionally deal with missing execute
37 * permission here on some processors
38 */
39int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
40 pmd_t *pmdp, pmd_t entry, int dirty)
41{
42 int changed;
43#ifdef CONFIG_DEBUG_VM
ebd31197 44 WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
af60a4cf 45 assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
3df33f12
AK
46#endif
47 changed = !pmd_same(*(pmdp), entry);
48 if (changed) {
e4c1112c
AK
49 /*
50 * We can use MMU_PAGE_2M here, because only radix
51 * path look at the psize.
52 */
53 __ptep_set_access_flags(vma, pmdp_ptep(pmdp),
54 pmd_pte(entry), address, MMU_PAGE_2M);
3df33f12
AK
55 }
56 return changed;
57}
58
59int pmdp_test_and_clear_young(struct vm_area_struct *vma,
60 unsigned long address, pmd_t *pmdp)
61{
62 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
63}
64/*
65 * set a new huge pmd. We should not be called for updating
66 * an existing pmd entry. That should go via pmd_hugepage_update.
67 */
68void set_pmd_at(struct mm_struct *mm, unsigned long addr,
69 pmd_t *pmdp, pmd_t pmd)
70{
71#ifdef CONFIG_DEBUG_VM
da7ad366
AK
72 /*
73 * Make sure hardware valid bit is not set. We don't do
74 * tlb flush for this update.
75 */
dd0e144a
AK
76
77 WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
af60a4cf 78 assert_spin_locked(pmd_lockptr(mm, pmdp));
ae28f17b 79 WARN_ON(!(pmd_large(pmd) || pmd_devmap(pmd)));
3df33f12
AK
80#endif
81 trace_hugepage_set_pmd(addr, pmd_val(pmd));
82 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
83}
fa4531f7
AK
84
85static void do_nothing(void *unused)
86{
87
88}
89/*
90 * Serialize against find_current_mm_pte which does lock-less
91 * lookup in page tables with local interrupts disabled. For huge pages
92 * it casts pmd_t to pte_t. Since format of pte_t is different from
93 * pmd_t we want to prevent transit from pmd pointing to page table
94 * to pmd pointing to huge page (and back) while interrupts are disabled.
95 * We clear pmd to possibly replace it with page table pointer in
96 * different code paths. So make sure we wait for the parallel
97 * find_current_mm_pte to finish.
98 */
99void serialize_against_pte_lookup(struct mm_struct *mm)
100{
101 smp_mb();
0f4bc093 102 smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
fa4531f7
AK
103}
104
3df33f12
AK
105/*
106 * We use this to invalidate a pmdp entry before switching from a
107 * hugepte to regular pmd entry.
108 */
8cc931e0 109pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
3df33f12
AK
110 pmd_t *pmdp)
111{
8cc931e0
AK
112 unsigned long old_pmd;
113
da7ad366 114 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
d8e91e93 115 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
3df33f12
AK
116 /*
117 * This ensures that generic code that rely on IRQ disabling
118 * to prevent a parallel THP split work as expected.
119 */
fa4531f7 120 serialize_against_pte_lookup(vma->vm_mm);
8cc931e0 121 return __pmd(old_pmd);
3df33f12
AK
122}
123
124static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
125{
126 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
127}
128
129pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
130{
131 unsigned long pmdv;
132
133 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
134 return pmd_set_protbits(__pmd(pmdv), pgprot);
135}
136
137pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
138{
139 return pfn_pmd(page_to_pfn(page), pgprot);
140}
141
142pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
143{
144 unsigned long pmdv;
145
146 pmdv = pmd_val(pmd);
147 pmdv &= _HPAGE_CHG_MASK;
148 return pmd_set_protbits(__pmd(pmdv), newprot);
149}
150
151/*
152 * This is called at the end of handling a user page fault, when the
153 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
154 * We use it to preload an HPTE into the hash table corresponding to
155 * the updated linux HUGE PMD entry.
156 */
157void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
158 pmd_t *pmd)
159{
68662f85
NP
160 if (radix_enabled())
161 prefetch((void *)addr);
3df33f12
AK
162}
163#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
fe036a06
BH
164
165/* For use by kexec */
166void mmu_cleanup_all(void)
167{
168 if (radix_enabled())
169 radix__mmu_cleanup_all();
170 else if (mmu_hash_ops.hpte_clear_all)
171 mmu_hash_ops.hpte_clear_all();
172}
32b53c01
RA
173
174#ifdef CONFIG_MEMORY_HOTPLUG
f437c517 175int __meminit create_section_mapping(unsigned long start, unsigned long end, int nid)
32b53c01
RA
176{
177 if (radix_enabled())
29ab6c47 178 return radix__create_section_mapping(start, end, nid);
32b53c01 179
29ab6c47 180 return hash__create_section_mapping(start, end, nid);
32b53c01
RA
181}
182
bde709a7 183int __meminit remove_section_mapping(unsigned long start, unsigned long end)
32b53c01
RA
184{
185 if (radix_enabled())
4b5d62ca 186 return radix__remove_section_mapping(start, end);
32b53c01
RA
187
188 return hash__remove_section_mapping(start, end);
189}
190#endif /* CONFIG_MEMORY_HOTPLUG */
59879d54
AK
191
192void __init mmu_partition_table_init(void)
193{
194 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
195 unsigned long ptcr;
196
197 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
198 partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
199 MEMBLOCK_ALLOC_ANYWHERE));
200
201 /* Initialize the Partition Table with no entries */
202 memset((void *)partition_tb, 0, patb_size);
203
204 /*
205 * update partition table control register,
206 * 64 K size.
207 */
208 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
209 mtspr(SPRN_PTCR, ptcr);
210 powernv_set_nmmu_ptcr(ptcr);
211}
212
213void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
214 unsigned long dw1)
215{
216 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
217
218 partition_tb[lpid].patb0 = cpu_to_be64(dw0);
219 partition_tb[lpid].patb1 = cpu_to_be64(dw1);
220
221 /*
222 * Global flush of TLBs and partition table caches for this lpid.
223 * The type of flush (hash or radix) depends on what the previous
224 * use of this partition ID was, not the new use.
225 */
226 asm volatile("ptesync" : : : "memory");
227 if (old & PATB_HR) {
228 asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
229 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
230 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
231 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
232 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
233 } else {
234 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
235 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
236 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
237 }
238 /* do we need fixup here ?*/
239 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
240}
241EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
1c7ec8a4 242
8a6c697b
AK
243static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
244{
245 void *pmd_frag, *ret;
246
2a146533
CL
247 if (PMD_FRAG_NR == 1)
248 return NULL;
249
8a6c697b
AK
250 spin_lock(&mm->page_table_lock);
251 ret = mm->context.pmd_frag;
252 if (ret) {
253 pmd_frag = ret + PMD_FRAG_SIZE;
254 /*
255 * If we have taken up all the fragments mark PTE page NULL
256 */
257 if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
258 pmd_frag = NULL;
259 mm->context.pmd_frag = pmd_frag;
260 }
261 spin_unlock(&mm->page_table_lock);
262 return (pmd_t *)ret;
263}
264
265static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
266{
267 void *ret = NULL;
268 struct page *page;
269 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
270
271 if (mm == &init_mm)
272 gfp &= ~__GFP_ACCOUNT;
273 page = alloc_page(gfp);
274 if (!page)
275 return NULL;
276 if (!pgtable_pmd_page_ctor(page)) {
277 __free_pages(page, 0);
278 return NULL;
279 }
280
4231aba0
NP
281 atomic_set(&page->pt_frag_refcount, 1);
282
8a6c697b
AK
283 ret = page_address(page);
284 /*
285 * if we support only one fragment just return the
286 * allocated page.
287 */
288 if (PMD_FRAG_NR == 1)
289 return ret;
290
291 spin_lock(&mm->page_table_lock);
292 /*
293 * If we find pgtable_page set, we return
294 * the allocated page with single fragement
295 * count.
296 */
297 if (likely(!mm->context.pmd_frag)) {
4231aba0 298 atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
8a6c697b
AK
299 mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
300 }
301 spin_unlock(&mm->page_table_lock);
302
303 return (pmd_t *)ret;
304}
305
306pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
307{
308 pmd_t *pmd;
309
310 pmd = get_pmd_from_cache(mm);
311 if (pmd)
312 return pmd;
313
314 return __alloc_for_pmdcache(mm);
315}
316
317void pmd_fragment_free(unsigned long *pmd)
318{
319 struct page *page = virt_to_page(pmd);
320
4231aba0
NP
321 BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
322 if (atomic_dec_and_test(&page->pt_frag_refcount)) {
8a6c697b 323 pgtable_pmd_page_dtor(page);
4231aba0 324 __free_page(page);
8a6c697b
AK
325 }
326}
327
0c4d2680
AK
328static inline void pgtable_free(void *table, int index)
329{
330 switch (index) {
331 case PTE_INDEX:
332 pte_fragment_free(table, 0);
333 break;
334 case PMD_INDEX:
738f9645 335 pmd_fragment_free(table);
0c4d2680
AK
336 break;
337 case PUD_INDEX:
338 kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
339 break;
fadd03c6
AK
340#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
341 /* 16M hugepd directory at pud level */
342 case HTLB_16M_INDEX:
343 BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
344 kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
345 break;
346 /* 16G hugepd directory at the pgd level */
347 case HTLB_16G_INDEX:
348 BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
349 kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
350 break;
351#endif
0c4d2680
AK
352 /* We don't free pgd table via RCU callback */
353 default:
354 BUG();
355 }
356}
357
70234676 358#ifdef CONFIG_SMP
0c4d2680 359void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
70234676
AK
360{
361 unsigned long pgf = (unsigned long)table;
362
0c4d2680
AK
363 BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
364 pgf |= index;
70234676
AK
365 tlb_remove_table(tlb, (void *)pgf);
366}
367
368void __tlb_remove_table(void *_table)
369{
370 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
0c4d2680 371 unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
70234676 372
0c4d2680 373 return pgtable_free(table, index);
70234676
AK
374}
375#else
0c4d2680 376void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
70234676 377{
0c4d2680 378 return pgtable_free(table, index);
70234676
AK
379}
380#endif
a2dc009a
AK
381
382#ifdef CONFIG_PROC_FS
383atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
384
385void arch_report_meminfo(struct seq_file *m)
386{
387 /*
388 * Hash maps the memory with one size mmu_linear_psize.
389 * So don't bother to print these on hash
390 */
391 if (!radix_enabled())
392 return;
393 seq_printf(m, "DirectMap4k: %8lu kB\n",
394 atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
395 seq_printf(m, "DirectMap64k: %8lu kB\n",
396 atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
397 seq_printf(m, "DirectMap2M: %8lu kB\n",
398 atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
399 seq_printf(m, "DirectMap1G: %8lu kB\n",
400 atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
401}
402#endif /* CONFIG_PROC_FS */
579b9239
AK
403
404/*
405 * For hash translation mode, we use the deposited table to store hash slot
406 * information and they are stored at PTRS_PER_PMD offset from related pmd
407 * location. Hence a pmd move requires deposit and withdraw.
408 *
409 * For radix translation with split pmd ptl, we store the deposited table in the
410 * pmd page. Hence if we have different pmd page we need to withdraw during pmd
411 * move.
412 *
413 * With hash we use deposited table always irrespective of anon or not.
414 * With radix we use deposited table only for anonymous mapping.
415 */
416int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
417 struct spinlock *old_pmd_ptl,
418 struct vm_area_struct *vma)
419{
420 if (radix_enabled())
421 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
422
423 return true;
424}