powerpc/mm/hugetlb/book3s: add _PAGE_PRESENT to hugepd pointer.
[linux-2.6-block.git] / arch / powerpc / mm / pgtable-book3s64.c
CommitLineData
3df33f12
AK
1/*
2 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/sched.h>
589ee628 11#include <linux/mm_types.h>
59879d54 12#include <linux/memblock.h>
fa4531f7 13#include <misc/cxl-base.h>
589ee628 14
3df33f12
AK
15#include <asm/pgalloc.h>
16#include <asm/tlb.h>
59879d54
AK
17#include <asm/trace.h>
18#include <asm/powernv.h>
3df33f12
AK
19
20#include "mmu_decl.h"
21#include <trace/events/thp.h>
22
8a6c697b
AK
23unsigned long __pmd_frag_nr;
24EXPORT_SYMBOL(__pmd_frag_nr);
25unsigned long __pmd_frag_size_shift;
26EXPORT_SYMBOL(__pmd_frag_size_shift);
27
eea8148c
ME
28int (*register_process_table)(unsigned long base, unsigned long page_size,
29 unsigned long tbl_size);
30
3df33f12
AK
31#ifdef CONFIG_TRANSPARENT_HUGEPAGE
32/*
33 * This is called when relaxing access to a hugepage. It's also called in the page
34 * fault path when we don't hit any of the major fault cases, ie, a minor
35 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
36 * handled those two for us, we additionally deal with missing execute
37 * permission here on some processors
38 */
39int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
40 pmd_t *pmdp, pmd_t entry, int dirty)
41{
42 int changed;
43#ifdef CONFIG_DEBUG_VM
ebd31197 44 WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
af60a4cf 45 assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
3df33f12
AK
46#endif
47 changed = !pmd_same(*(pmdp), entry);
48 if (changed) {
e4c1112c
AK
49 /*
50 * We can use MMU_PAGE_2M here, because only radix
51 * path look at the psize.
52 */
53 __ptep_set_access_flags(vma, pmdp_ptep(pmdp),
54 pmd_pte(entry), address, MMU_PAGE_2M);
3df33f12
AK
55 }
56 return changed;
57}
58
59int pmdp_test_and_clear_young(struct vm_area_struct *vma,
60 unsigned long address, pmd_t *pmdp)
61{
62 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
63}
64/*
65 * set a new huge pmd. We should not be called for updating
66 * an existing pmd entry. That should go via pmd_hugepage_update.
67 */
68void set_pmd_at(struct mm_struct *mm, unsigned long addr,
69 pmd_t *pmdp, pmd_t pmd)
70{
71#ifdef CONFIG_DEBUG_VM
da7ad366
AK
72 /*
73 * Make sure hardware valid bit is not set. We don't do
74 * tlb flush for this update.
75 */
76 WARN_ON(pte_val(pmd_pte(*pmdp)) & _PAGE_PRESENT);
af60a4cf 77 assert_spin_locked(pmd_lockptr(mm, pmdp));
ebd31197 78 WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
3df33f12
AK
79#endif
80 trace_hugepage_set_pmd(addr, pmd_val(pmd));
81 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
82}
fa4531f7
AK
83
84static void do_nothing(void *unused)
85{
86
87}
88/*
89 * Serialize against find_current_mm_pte which does lock-less
90 * lookup in page tables with local interrupts disabled. For huge pages
91 * it casts pmd_t to pte_t. Since format of pte_t is different from
92 * pmd_t we want to prevent transit from pmd pointing to page table
93 * to pmd pointing to huge page (and back) while interrupts are disabled.
94 * We clear pmd to possibly replace it with page table pointer in
95 * different code paths. So make sure we wait for the parallel
96 * find_current_mm_pte to finish.
97 */
98void serialize_against_pte_lookup(struct mm_struct *mm)
99{
100 smp_mb();
0f4bc093 101 smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
fa4531f7
AK
102}
103
3df33f12
AK
104/*
105 * We use this to invalidate a pmdp entry before switching from a
106 * hugepte to regular pmd entry.
107 */
8cc931e0 108pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
3df33f12
AK
109 pmd_t *pmdp)
110{
8cc931e0
AK
111 unsigned long old_pmd;
112
da7ad366 113 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
d8e91e93 114 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
3df33f12
AK
115 /*
116 * This ensures that generic code that rely on IRQ disabling
117 * to prevent a parallel THP split work as expected.
118 */
fa4531f7 119 serialize_against_pte_lookup(vma->vm_mm);
8cc931e0 120 return __pmd(old_pmd);
3df33f12
AK
121}
122
123static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
124{
125 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
126}
127
128pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
129{
130 unsigned long pmdv;
131
132 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
133 return pmd_set_protbits(__pmd(pmdv), pgprot);
134}
135
136pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
137{
138 return pfn_pmd(page_to_pfn(page), pgprot);
139}
140
141pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
142{
143 unsigned long pmdv;
144
145 pmdv = pmd_val(pmd);
146 pmdv &= _HPAGE_CHG_MASK;
147 return pmd_set_protbits(__pmd(pmdv), newprot);
148}
149
150/*
151 * This is called at the end of handling a user page fault, when the
152 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
153 * We use it to preload an HPTE into the hash table corresponding to
154 * the updated linux HUGE PMD entry.
155 */
156void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
157 pmd_t *pmd)
158{
68662f85
NP
159 if (radix_enabled())
160 prefetch((void *)addr);
3df33f12
AK
161}
162#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
fe036a06
BH
163
164/* For use by kexec */
165void mmu_cleanup_all(void)
166{
167 if (radix_enabled())
168 radix__mmu_cleanup_all();
169 else if (mmu_hash_ops.hpte_clear_all)
170 mmu_hash_ops.hpte_clear_all();
171}
32b53c01
RA
172
173#ifdef CONFIG_MEMORY_HOTPLUG
f437c517 174int __meminit create_section_mapping(unsigned long start, unsigned long end, int nid)
32b53c01
RA
175{
176 if (radix_enabled())
29ab6c47 177 return radix__create_section_mapping(start, end, nid);
32b53c01 178
29ab6c47 179 return hash__create_section_mapping(start, end, nid);
32b53c01
RA
180}
181
bde709a7 182int __meminit remove_section_mapping(unsigned long start, unsigned long end)
32b53c01
RA
183{
184 if (radix_enabled())
4b5d62ca 185 return radix__remove_section_mapping(start, end);
32b53c01
RA
186
187 return hash__remove_section_mapping(start, end);
188}
189#endif /* CONFIG_MEMORY_HOTPLUG */
59879d54
AK
190
191void __init mmu_partition_table_init(void)
192{
193 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
194 unsigned long ptcr;
195
196 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
197 partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
198 MEMBLOCK_ALLOC_ANYWHERE));
199
200 /* Initialize the Partition Table with no entries */
201 memset((void *)partition_tb, 0, patb_size);
202
203 /*
204 * update partition table control register,
205 * 64 K size.
206 */
207 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
208 mtspr(SPRN_PTCR, ptcr);
209 powernv_set_nmmu_ptcr(ptcr);
210}
211
212void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
213 unsigned long dw1)
214{
215 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
216
217 partition_tb[lpid].patb0 = cpu_to_be64(dw0);
218 partition_tb[lpid].patb1 = cpu_to_be64(dw1);
219
220 /*
221 * Global flush of TLBs and partition table caches for this lpid.
222 * The type of flush (hash or radix) depends on what the previous
223 * use of this partition ID was, not the new use.
224 */
225 asm volatile("ptesync" : : : "memory");
226 if (old & PATB_HR) {
227 asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
228 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
229 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
230 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
231 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
232 } else {
233 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
234 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
235 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
236 }
237 /* do we need fixup here ?*/
238 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
239}
240EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
1c7ec8a4 241
8a6c697b
AK
242static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
243{
244 void *pmd_frag, *ret;
245
246 spin_lock(&mm->page_table_lock);
247 ret = mm->context.pmd_frag;
248 if (ret) {
249 pmd_frag = ret + PMD_FRAG_SIZE;
250 /*
251 * If we have taken up all the fragments mark PTE page NULL
252 */
253 if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
254 pmd_frag = NULL;
255 mm->context.pmd_frag = pmd_frag;
256 }
257 spin_unlock(&mm->page_table_lock);
258 return (pmd_t *)ret;
259}
260
261static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
262{
263 void *ret = NULL;
264 struct page *page;
265 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
266
267 if (mm == &init_mm)
268 gfp &= ~__GFP_ACCOUNT;
269 page = alloc_page(gfp);
270 if (!page)
271 return NULL;
272 if (!pgtable_pmd_page_ctor(page)) {
273 __free_pages(page, 0);
274 return NULL;
275 }
276
4231aba0
NP
277 atomic_set(&page->pt_frag_refcount, 1);
278
8a6c697b
AK
279 ret = page_address(page);
280 /*
281 * if we support only one fragment just return the
282 * allocated page.
283 */
284 if (PMD_FRAG_NR == 1)
285 return ret;
286
287 spin_lock(&mm->page_table_lock);
288 /*
289 * If we find pgtable_page set, we return
290 * the allocated page with single fragement
291 * count.
292 */
293 if (likely(!mm->context.pmd_frag)) {
4231aba0 294 atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
8a6c697b
AK
295 mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
296 }
297 spin_unlock(&mm->page_table_lock);
298
299 return (pmd_t *)ret;
300}
301
302pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
303{
304 pmd_t *pmd;
305
306 pmd = get_pmd_from_cache(mm);
307 if (pmd)
308 return pmd;
309
310 return __alloc_for_pmdcache(mm);
311}
312
313void pmd_fragment_free(unsigned long *pmd)
314{
315 struct page *page = virt_to_page(pmd);
316
4231aba0
NP
317 BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
318 if (atomic_dec_and_test(&page->pt_frag_refcount)) {
8a6c697b 319 pgtable_pmd_page_dtor(page);
4231aba0 320 __free_page(page);
8a6c697b
AK
321 }
322}
323
70234676
AK
324static pte_t *get_pte_from_cache(struct mm_struct *mm)
325{
326 void *pte_frag, *ret;
327
328 spin_lock(&mm->page_table_lock);
329 ret = mm->context.pte_frag;
330 if (ret) {
331 pte_frag = ret + PTE_FRAG_SIZE;
332 /*
333 * If we have taken up all the fragments mark PTE page NULL
334 */
335 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
336 pte_frag = NULL;
337 mm->context.pte_frag = pte_frag;
338 }
339 spin_unlock(&mm->page_table_lock);
340 return (pte_t *)ret;
341}
342
343static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
344{
345 void *ret = NULL;
346 struct page *page;
347
348 if (!kernel) {
349 page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
350 if (!page)
351 return NULL;
352 if (!pgtable_page_ctor(page)) {
353 __free_page(page);
354 return NULL;
355 }
356 } else {
357 page = alloc_page(PGALLOC_GFP);
358 if (!page)
359 return NULL;
360 }
361
4231aba0 362 atomic_set(&page->pt_frag_refcount, 1);
1c7ec8a4 363
70234676 364 ret = page_address(page);
1c7ec8a4
AK
365 /*
366 * if we support only one fragment just return the
367 * allocated page.
368 */
369 if (PTE_FRAG_NR == 1)
370 return ret;
70234676
AK
371 spin_lock(&mm->page_table_lock);
372 /*
373 * If we find pgtable_page set, we return
374 * the allocated page with single fragement
375 * count.
376 */
377 if (likely(!mm->context.pte_frag)) {
4231aba0 378 atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
70234676
AK
379 mm->context.pte_frag = ret + PTE_FRAG_SIZE;
380 }
381 spin_unlock(&mm->page_table_lock);
382
383 return (pte_t *)ret;
384}
385
386pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
387{
388 pte_t *pte;
389
390 pte = get_pte_from_cache(mm);
391 if (pte)
392 return pte;
393
394 return __alloc_for_ptecache(mm, kernel);
395}
396
70234676
AK
397void pte_fragment_free(unsigned long *table, int kernel)
398{
399 struct page *page = virt_to_page(table);
400
4231aba0
NP
401 BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
402 if (atomic_dec_and_test(&page->pt_frag_refcount)) {
70234676
AK
403 if (!kernel)
404 pgtable_page_dtor(page);
4231aba0 405 __free_page(page);
70234676
AK
406 }
407}
408
0c4d2680
AK
409static inline void pgtable_free(void *table, int index)
410{
411 switch (index) {
412 case PTE_INDEX:
413 pte_fragment_free(table, 0);
414 break;
415 case PMD_INDEX:
738f9645 416 pmd_fragment_free(table);
0c4d2680
AK
417 break;
418 case PUD_INDEX:
419 kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
420 break;
fadd03c6
AK
421#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
422 /* 16M hugepd directory at pud level */
423 case HTLB_16M_INDEX:
424 BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
425 kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
426 break;
427 /* 16G hugepd directory at the pgd level */
428 case HTLB_16G_INDEX:
429 BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
430 kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
431 break;
432#endif
0c4d2680
AK
433 /* We don't free pgd table via RCU callback */
434 default:
435 BUG();
436 }
437}
438
70234676 439#ifdef CONFIG_SMP
0c4d2680 440void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
70234676
AK
441{
442 unsigned long pgf = (unsigned long)table;
443
0c4d2680
AK
444 BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
445 pgf |= index;
70234676
AK
446 tlb_remove_table(tlb, (void *)pgf);
447}
448
449void __tlb_remove_table(void *_table)
450{
451 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
0c4d2680 452 unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
70234676 453
0c4d2680 454 return pgtable_free(table, index);
70234676
AK
455}
456#else
0c4d2680 457void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
70234676 458{
0c4d2680 459 return pgtable_free(table, index);
70234676
AK
460}
461#endif
a2dc009a
AK
462
463#ifdef CONFIG_PROC_FS
464atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
465
466void arch_report_meminfo(struct seq_file *m)
467{
468 /*
469 * Hash maps the memory with one size mmu_linear_psize.
470 * So don't bother to print these on hash
471 */
472 if (!radix_enabled())
473 return;
474 seq_printf(m, "DirectMap4k: %8lu kB\n",
475 atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
476 seq_printf(m, "DirectMap64k: %8lu kB\n",
477 atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
478 seq_printf(m, "DirectMap2M: %8lu kB\n",
479 atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
480 seq_printf(m, "DirectMap1G: %8lu kB\n",
481 atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
482}
483#endif /* CONFIG_PROC_FS */