powerpc/mm: Don't send IPI to all cpus on THP updates
[linux-2.6-block.git] / arch / powerpc / mm / pgtable-book3s64.c
CommitLineData
3df33f12
AK
1/*
2 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/sched.h>
589ee628 11#include <linux/mm_types.h>
fa4531f7 12#include <misc/cxl-base.h>
589ee628 13
3df33f12
AK
14#include <asm/pgalloc.h>
15#include <asm/tlb.h>
16
17#include "mmu_decl.h"
18#include <trace/events/thp.h>
19
eea8148c
ME
20int (*register_process_table)(unsigned long base, unsigned long page_size,
21 unsigned long tbl_size);
22
3df33f12
AK
23#ifdef CONFIG_TRANSPARENT_HUGEPAGE
24/*
25 * This is called when relaxing access to a hugepage. It's also called in the page
26 * fault path when we don't hit any of the major fault cases, ie, a minor
27 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
28 * handled those two for us, we additionally deal with missing execute
29 * permission here on some processors
30 */
31int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
32 pmd_t *pmdp, pmd_t entry, int dirty)
33{
34 int changed;
35#ifdef CONFIG_DEBUG_VM
ebd31197 36 WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
3df33f12
AK
37 assert_spin_locked(&vma->vm_mm->page_table_lock);
38#endif
39 changed = !pmd_same(*(pmdp), entry);
40 if (changed) {
b3603e17
AK
41 __ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp),
42 pmd_pte(entry), address);
d8e91e93 43 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
3df33f12
AK
44 }
45 return changed;
46}
47
48int pmdp_test_and_clear_young(struct vm_area_struct *vma,
49 unsigned long address, pmd_t *pmdp)
50{
51 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
52}
53/*
54 * set a new huge pmd. We should not be called for updating
55 * an existing pmd entry. That should go via pmd_hugepage_update.
56 */
57void set_pmd_at(struct mm_struct *mm, unsigned long addr,
58 pmd_t *pmdp, pmd_t pmd)
59{
60#ifdef CONFIG_DEBUG_VM
61 WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
62 assert_spin_locked(&mm->page_table_lock);
ebd31197 63 WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
3df33f12
AK
64#endif
65 trace_hugepage_set_pmd(addr, pmd_val(pmd));
66 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
67}
fa4531f7
AK
68
69static void do_nothing(void *unused)
70{
71
72}
73/*
74 * Serialize against find_current_mm_pte which does lock-less
75 * lookup in page tables with local interrupts disabled. For huge pages
76 * it casts pmd_t to pte_t. Since format of pte_t is different from
77 * pmd_t we want to prevent transit from pmd pointing to page table
78 * to pmd pointing to huge page (and back) while interrupts are disabled.
79 * We clear pmd to possibly replace it with page table pointer in
80 * different code paths. So make sure we wait for the parallel
81 * find_current_mm_pte to finish.
82 */
83void serialize_against_pte_lookup(struct mm_struct *mm)
84{
85 smp_mb();
86 /*
87 * Cxl fault handling requires us to do a lockless page table
88 * walk while inserting hash page table entry with mm tracked
89 * in cxl context. Hence we need to do a global flush.
90 */
91 if (cxl_ctx_in_use())
92 smp_call_function(do_nothing, NULL, 1);
93 else
94 smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
95}
96
3df33f12
AK
97/*
98 * We use this to invalidate a pmdp entry before switching from a
99 * hugepte to regular pmd entry.
100 */
101void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
102 pmd_t *pmdp)
103{
104 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
d8e91e93 105 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
3df33f12
AK
106 /*
107 * This ensures that generic code that rely on IRQ disabling
108 * to prevent a parallel THP split work as expected.
109 */
fa4531f7 110 serialize_against_pte_lookup(vma->vm_mm);
3df33f12
AK
111}
112
113static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
114{
115 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
116}
117
118pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
119{
120 unsigned long pmdv;
121
122 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
123 return pmd_set_protbits(__pmd(pmdv), pgprot);
124}
125
126pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
127{
128 return pfn_pmd(page_to_pfn(page), pgprot);
129}
130
131pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
132{
133 unsigned long pmdv;
134
135 pmdv = pmd_val(pmd);
136 pmdv &= _HPAGE_CHG_MASK;
137 return pmd_set_protbits(__pmd(pmdv), newprot);
138}
139
140/*
141 * This is called at the end of handling a user page fault, when the
142 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
143 * We use it to preload an HPTE into the hash table corresponding to
144 * the updated linux HUGE PMD entry.
145 */
146void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
147 pmd_t *pmd)
148{
149 return;
150}
151#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
fe036a06
BH
152
153/* For use by kexec */
154void mmu_cleanup_all(void)
155{
156 if (radix_enabled())
157 radix__mmu_cleanup_all();
158 else if (mmu_hash_ops.hpte_clear_all)
159 mmu_hash_ops.hpte_clear_all();
160}
32b53c01
RA
161
162#ifdef CONFIG_MEMORY_HOTPLUG
163int create_section_mapping(unsigned long start, unsigned long end)
164{
165 if (radix_enabled())
6cc27341 166 return radix__create_section_mapping(start, end);
32b53c01
RA
167
168 return hash__create_section_mapping(start, end);
169}
170
171int remove_section_mapping(unsigned long start, unsigned long end)
172{
173 if (radix_enabled())
4b5d62ca 174 return radix__remove_section_mapping(start, end);
32b53c01
RA
175
176 return hash__remove_section_mapping(start, end);
177}
178#endif /* CONFIG_MEMORY_HOTPLUG */