powerpc/powernv: Make possible for user to force a full ipl cec reboot
[linux-2.6-block.git] / arch / powerpc / mm / pgtable-book3s64.c
CommitLineData
3df33f12
AK
1/*
2 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/sched.h>
589ee628 11#include <linux/mm_types.h>
59879d54 12#include <linux/memblock.h>
fa4531f7 13#include <misc/cxl-base.h>
589ee628 14
3df33f12
AK
15#include <asm/pgalloc.h>
16#include <asm/tlb.h>
59879d54
AK
17#include <asm/trace.h>
18#include <asm/powernv.h>
3df33f12
AK
19
20#include "mmu_decl.h"
21#include <trace/events/thp.h>
22
8a6c697b
AK
23unsigned long __pmd_frag_nr;
24EXPORT_SYMBOL(__pmd_frag_nr);
25unsigned long __pmd_frag_size_shift;
26EXPORT_SYMBOL(__pmd_frag_size_shift);
27
eea8148c
ME
28int (*register_process_table)(unsigned long base, unsigned long page_size,
29 unsigned long tbl_size);
30
3df33f12
AK
31#ifdef CONFIG_TRANSPARENT_HUGEPAGE
32/*
33 * This is called when relaxing access to a hugepage. It's also called in the page
34 * fault path when we don't hit any of the major fault cases, ie, a minor
35 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
36 * handled those two for us, we additionally deal with missing execute
37 * permission here on some processors
38 */
39int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
40 pmd_t *pmdp, pmd_t entry, int dirty)
41{
42 int changed;
43#ifdef CONFIG_DEBUG_VM
ebd31197 44 WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
af60a4cf 45 assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
3df33f12
AK
46#endif
47 changed = !pmd_same(*(pmdp), entry);
48 if (changed) {
e4c1112c
AK
49 /*
50 * We can use MMU_PAGE_2M here, because only radix
51 * path look at the psize.
52 */
53 __ptep_set_access_flags(vma, pmdp_ptep(pmdp),
54 pmd_pte(entry), address, MMU_PAGE_2M);
3df33f12
AK
55 }
56 return changed;
57}
58
59int pmdp_test_and_clear_young(struct vm_area_struct *vma,
60 unsigned long address, pmd_t *pmdp)
61{
62 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
63}
64/*
65 * set a new huge pmd. We should not be called for updating
66 * an existing pmd entry. That should go via pmd_hugepage_update.
67 */
68void set_pmd_at(struct mm_struct *mm, unsigned long addr,
69 pmd_t *pmdp, pmd_t pmd)
70{
71#ifdef CONFIG_DEBUG_VM
72 WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
af60a4cf 73 assert_spin_locked(pmd_lockptr(mm, pmdp));
ebd31197 74 WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
3df33f12
AK
75#endif
76 trace_hugepage_set_pmd(addr, pmd_val(pmd));
77 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
78}
fa4531f7
AK
79
80static void do_nothing(void *unused)
81{
82
83}
84/*
85 * Serialize against find_current_mm_pte which does lock-less
86 * lookup in page tables with local interrupts disabled. For huge pages
87 * it casts pmd_t to pte_t. Since format of pte_t is different from
88 * pmd_t we want to prevent transit from pmd pointing to page table
89 * to pmd pointing to huge page (and back) while interrupts are disabled.
90 * We clear pmd to possibly replace it with page table pointer in
91 * different code paths. So make sure we wait for the parallel
92 * find_current_mm_pte to finish.
93 */
94void serialize_against_pte_lookup(struct mm_struct *mm)
95{
96 smp_mb();
0f4bc093 97 smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
fa4531f7
AK
98}
99
3df33f12
AK
100/*
101 * We use this to invalidate a pmdp entry before switching from a
102 * hugepte to regular pmd entry.
103 */
8cc931e0 104pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
3df33f12
AK
105 pmd_t *pmdp)
106{
8cc931e0
AK
107 unsigned long old_pmd;
108
109 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
d8e91e93 110 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
3df33f12
AK
111 /*
112 * This ensures that generic code that rely on IRQ disabling
113 * to prevent a parallel THP split work as expected.
114 */
fa4531f7 115 serialize_against_pte_lookup(vma->vm_mm);
8cc931e0 116 return __pmd(old_pmd);
3df33f12
AK
117}
118
119static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
120{
121 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
122}
123
124pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
125{
126 unsigned long pmdv;
127
128 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
129 return pmd_set_protbits(__pmd(pmdv), pgprot);
130}
131
132pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
133{
134 return pfn_pmd(page_to_pfn(page), pgprot);
135}
136
137pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
138{
139 unsigned long pmdv;
140
141 pmdv = pmd_val(pmd);
142 pmdv &= _HPAGE_CHG_MASK;
143 return pmd_set_protbits(__pmd(pmdv), newprot);
144}
145
146/*
147 * This is called at the end of handling a user page fault, when the
148 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
149 * We use it to preload an HPTE into the hash table corresponding to
150 * the updated linux HUGE PMD entry.
151 */
152void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
153 pmd_t *pmd)
154{
68662f85
NP
155 if (radix_enabled())
156 prefetch((void *)addr);
3df33f12
AK
157}
158#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
fe036a06
BH
159
160/* For use by kexec */
161void mmu_cleanup_all(void)
162{
163 if (radix_enabled())
164 radix__mmu_cleanup_all();
165 else if (mmu_hash_ops.hpte_clear_all)
166 mmu_hash_ops.hpte_clear_all();
167}
32b53c01
RA
168
169#ifdef CONFIG_MEMORY_HOTPLUG
f437c517 170int __meminit create_section_mapping(unsigned long start, unsigned long end, int nid)
32b53c01
RA
171{
172 if (radix_enabled())
29ab6c47 173 return radix__create_section_mapping(start, end, nid);
32b53c01 174
29ab6c47 175 return hash__create_section_mapping(start, end, nid);
32b53c01
RA
176}
177
bde709a7 178int __meminit remove_section_mapping(unsigned long start, unsigned long end)
32b53c01
RA
179{
180 if (radix_enabled())
4b5d62ca 181 return radix__remove_section_mapping(start, end);
32b53c01
RA
182
183 return hash__remove_section_mapping(start, end);
184}
185#endif /* CONFIG_MEMORY_HOTPLUG */
59879d54
AK
186
187void __init mmu_partition_table_init(void)
188{
189 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
190 unsigned long ptcr;
191
192 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
193 partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
194 MEMBLOCK_ALLOC_ANYWHERE));
195
196 /* Initialize the Partition Table with no entries */
197 memset((void *)partition_tb, 0, patb_size);
198
199 /*
200 * update partition table control register,
201 * 64 K size.
202 */
203 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
204 mtspr(SPRN_PTCR, ptcr);
205 powernv_set_nmmu_ptcr(ptcr);
206}
207
208void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
209 unsigned long dw1)
210{
211 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
212
213 partition_tb[lpid].patb0 = cpu_to_be64(dw0);
214 partition_tb[lpid].patb1 = cpu_to_be64(dw1);
215
216 /*
217 * Global flush of TLBs and partition table caches for this lpid.
218 * The type of flush (hash or radix) depends on what the previous
219 * use of this partition ID was, not the new use.
220 */
221 asm volatile("ptesync" : : : "memory");
222 if (old & PATB_HR) {
223 asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
224 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
225 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
226 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
227 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
228 } else {
229 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
230 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
231 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
232 }
233 /* do we need fixup here ?*/
234 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
235}
236EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
1c7ec8a4 237
8a6c697b
AK
238static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
239{
240 void *pmd_frag, *ret;
241
242 spin_lock(&mm->page_table_lock);
243 ret = mm->context.pmd_frag;
244 if (ret) {
245 pmd_frag = ret + PMD_FRAG_SIZE;
246 /*
247 * If we have taken up all the fragments mark PTE page NULL
248 */
249 if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
250 pmd_frag = NULL;
251 mm->context.pmd_frag = pmd_frag;
252 }
253 spin_unlock(&mm->page_table_lock);
254 return (pmd_t *)ret;
255}
256
257static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
258{
259 void *ret = NULL;
260 struct page *page;
261 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
262
263 if (mm == &init_mm)
264 gfp &= ~__GFP_ACCOUNT;
265 page = alloc_page(gfp);
266 if (!page)
267 return NULL;
268 if (!pgtable_pmd_page_ctor(page)) {
269 __free_pages(page, 0);
270 return NULL;
271 }
272
4231aba0
NP
273 atomic_set(&page->pt_frag_refcount, 1);
274
8a6c697b
AK
275 ret = page_address(page);
276 /*
277 * if we support only one fragment just return the
278 * allocated page.
279 */
280 if (PMD_FRAG_NR == 1)
281 return ret;
282
283 spin_lock(&mm->page_table_lock);
284 /*
285 * If we find pgtable_page set, we return
286 * the allocated page with single fragement
287 * count.
288 */
289 if (likely(!mm->context.pmd_frag)) {
4231aba0 290 atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
8a6c697b
AK
291 mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
292 }
293 spin_unlock(&mm->page_table_lock);
294
295 return (pmd_t *)ret;
296}
297
298pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
299{
300 pmd_t *pmd;
301
302 pmd = get_pmd_from_cache(mm);
303 if (pmd)
304 return pmd;
305
306 return __alloc_for_pmdcache(mm);
307}
308
309void pmd_fragment_free(unsigned long *pmd)
310{
311 struct page *page = virt_to_page(pmd);
312
4231aba0
NP
313 BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
314 if (atomic_dec_and_test(&page->pt_frag_refcount)) {
8a6c697b 315 pgtable_pmd_page_dtor(page);
4231aba0 316 __free_page(page);
8a6c697b
AK
317 }
318}
319
70234676
AK
320static pte_t *get_pte_from_cache(struct mm_struct *mm)
321{
322 void *pte_frag, *ret;
323
324 spin_lock(&mm->page_table_lock);
325 ret = mm->context.pte_frag;
326 if (ret) {
327 pte_frag = ret + PTE_FRAG_SIZE;
328 /*
329 * If we have taken up all the fragments mark PTE page NULL
330 */
331 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
332 pte_frag = NULL;
333 mm->context.pte_frag = pte_frag;
334 }
335 spin_unlock(&mm->page_table_lock);
336 return (pte_t *)ret;
337}
338
339static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
340{
341 void *ret = NULL;
342 struct page *page;
343
344 if (!kernel) {
345 page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
346 if (!page)
347 return NULL;
348 if (!pgtable_page_ctor(page)) {
349 __free_page(page);
350 return NULL;
351 }
352 } else {
353 page = alloc_page(PGALLOC_GFP);
354 if (!page)
355 return NULL;
356 }
357
4231aba0 358 atomic_set(&page->pt_frag_refcount, 1);
1c7ec8a4 359
70234676 360 ret = page_address(page);
1c7ec8a4
AK
361 /*
362 * if we support only one fragment just return the
363 * allocated page.
364 */
365 if (PTE_FRAG_NR == 1)
366 return ret;
70234676
AK
367 spin_lock(&mm->page_table_lock);
368 /*
369 * If we find pgtable_page set, we return
370 * the allocated page with single fragement
371 * count.
372 */
373 if (likely(!mm->context.pte_frag)) {
4231aba0 374 atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
70234676
AK
375 mm->context.pte_frag = ret + PTE_FRAG_SIZE;
376 }
377 spin_unlock(&mm->page_table_lock);
378
379 return (pte_t *)ret;
380}
381
382pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
383{
384 pte_t *pte;
385
386 pte = get_pte_from_cache(mm);
387 if (pte)
388 return pte;
389
390 return __alloc_for_ptecache(mm, kernel);
391}
392
70234676
AK
393void pte_fragment_free(unsigned long *table, int kernel)
394{
395 struct page *page = virt_to_page(table);
396
4231aba0
NP
397 BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
398 if (atomic_dec_and_test(&page->pt_frag_refcount)) {
70234676
AK
399 if (!kernel)
400 pgtable_page_dtor(page);
4231aba0 401 __free_page(page);
70234676
AK
402 }
403}
404
0c4d2680
AK
405static inline void pgtable_free(void *table, int index)
406{
407 switch (index) {
408 case PTE_INDEX:
409 pte_fragment_free(table, 0);
410 break;
411 case PMD_INDEX:
738f9645 412 pmd_fragment_free(table);
0c4d2680
AK
413 break;
414 case PUD_INDEX:
415 kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
416 break;
fadd03c6
AK
417#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
418 /* 16M hugepd directory at pud level */
419 case HTLB_16M_INDEX:
420 BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
421 kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
422 break;
423 /* 16G hugepd directory at the pgd level */
424 case HTLB_16G_INDEX:
425 BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
426 kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
427 break;
428#endif
0c4d2680
AK
429 /* We don't free pgd table via RCU callback */
430 default:
431 BUG();
432 }
433}
434
70234676 435#ifdef CONFIG_SMP
0c4d2680 436void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
70234676
AK
437{
438 unsigned long pgf = (unsigned long)table;
439
0c4d2680
AK
440 BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
441 pgf |= index;
70234676
AK
442 tlb_remove_table(tlb, (void *)pgf);
443}
444
445void __tlb_remove_table(void *_table)
446{
447 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
0c4d2680 448 unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
70234676 449
0c4d2680 450 return pgtable_free(table, index);
70234676
AK
451}
452#else
0c4d2680 453void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
70234676 454{
0c4d2680 455 return pgtable_free(table, index);
70234676
AK
456}
457#endif
a2dc009a
AK
458
459#ifdef CONFIG_PROC_FS
460atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
461
462void arch_report_meminfo(struct seq_file *m)
463{
464 /*
465 * Hash maps the memory with one size mmu_linear_psize.
466 * So don't bother to print these on hash
467 */
468 if (!radix_enabled())
469 return;
470 seq_printf(m, "DirectMap4k: %8lu kB\n",
471 atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
472 seq_printf(m, "DirectMap64k: %8lu kB\n",
473 atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
474 seq_printf(m, "DirectMap2M: %8lu kB\n",
475 atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
476 seq_printf(m, "DirectMap1G: %8lu kB\n",
477 atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
478}
479#endif /* CONFIG_PROC_FS */