mm,thp: reduce ifdef'ery for THP in generic code
[linux-block.git] / mm / pgtable-generic.c
CommitLineData
e2cda322
AA
1/*
2 * mm/pgtable-generic.c
3 *
4 * Generic pgtable methods declared in asm-generic/pgtable.h
5 *
6 * Copyright (C) 2010 Linus Torvalds
7 */
8
f95ba941 9#include <linux/pagemap.h>
e2cda322
AA
10#include <asm/tlb.h>
11#include <asm-generic/pgtable.h>
12
bc4b4448
JK
13/*
14 * If a p?d_bad entry is found while walking page tables, report
15 * the error, before resetting entry to p?d_none. Usually (but
16 * very seldom) called out from the p?d_none_or_clear_bad macros.
17 */
18
19void pgd_clear_bad(pgd_t *pgd)
20{
21 pgd_ERROR(*pgd);
22 pgd_clear(pgd);
23}
24
25void pud_clear_bad(pud_t *pud)
26{
27 pud_ERROR(*pud);
28 pud_clear(pud);
29}
30
31void pmd_clear_bad(pmd_t *pmd)
32{
33 pmd_ERROR(*pmd);
34 pmd_clear(pmd);
35}
36
e2cda322
AA
37#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
38/*
cef23d9d
RR
39 * Only sets the access flags (dirty, accessed), as well as write
40 * permission. Furthermore, we know it always gets set to a "more
e2cda322
AA
41 * permissive" setting, which allows most architectures to optimize
42 * this. We return whether the PTE actually changed, which in turn
43 * instructs the caller to do things like update__mmu_cache. This
44 * used to be done in the caller, but sparc needs minor faults to
45 * force that call on sun4c so we changed this macro slightly
46 */
47int ptep_set_access_flags(struct vm_area_struct *vma,
48 unsigned long address, pte_t *ptep,
49 pte_t entry, int dirty)
50{
51 int changed = !pte_same(*ptep, entry);
52 if (changed) {
53 set_pte_at(vma->vm_mm, address, ptep, entry);
cef23d9d 54 flush_tlb_fix_spurious_fault(vma, address);
e2cda322
AA
55 }
56 return changed;
57}
58#endif
59
52585bcc
VG
60#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
61int ptep_clear_flush_young(struct vm_area_struct *vma,
62 unsigned long address, pte_t *ptep)
63{
64 int young;
65 young = ptep_test_and_clear_young(vma, address, ptep);
66 if (young)
67 flush_tlb_page(vma, address);
68 return young;
69}
70#endif
71
72#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
73pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
74 pte_t *ptep)
75{
76 struct mm_struct *mm = (vma)->vm_mm;
77 pte_t pte;
78 pte = ptep_get_and_clear(mm, address, ptep);
79 if (pte_accessible(mm, pte))
80 flush_tlb_page(vma, address);
81 return pte;
82}
83#endif
84
bd5e88ad
VG
85#ifdef CONFIG_TRANSPARENT_HUGEPAGE
86
e2cda322
AA
87#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
88int pmdp_set_access_flags(struct vm_area_struct *vma,
89 unsigned long address, pmd_t *pmdp,
90 pmd_t entry, int dirty)
91{
e2cda322
AA
92 int changed = !pmd_same(*pmdp, entry);
93 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
94 if (changed) {
95 set_pmd_at(vma->vm_mm, address, pmdp, entry);
96 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
97 }
98 return changed;
e2cda322
AA
99}
100#endif
101
e2cda322
AA
102#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
103int pmdp_clear_flush_young(struct vm_area_struct *vma,
104 unsigned long address, pmd_t *pmdp)
105{
106 int young;
d8c37c48 107 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
e2cda322
AA
108 young = pmdp_test_and_clear_young(vma, address, pmdp);
109 if (young)
110 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
111 return young;
112}
113#endif
114
8809aa2d 115#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
8809aa2d
AK
116pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
117 pmd_t *pmdp)
e2cda322
AA
118{
119 pmd_t pmd;
e2cda322 120 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
f28b6ff8 121 VM_BUG_ON(!pmd_trans_huge(*pmdp));
8809aa2d 122 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
e2cda322
AA
123 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
124 return pmd;
125}
126#endif
127
128#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
73636b1a
CM
129void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
130 pmd_t *pmdp)
e2cda322 131{
e2cda322
AA
132 pmd_t pmd = pmd_mksplitting(*pmdp);
133 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
134 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
135 /* tlb flush only to serialize against gup-fast */
136 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
e2cda322
AA
137}
138#endif
e3ebcf64
GS
139
140#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
6b0b50b0
AK
141void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
142 pgtable_t pgtable)
e3ebcf64 143{
c4088ebd 144 assert_spin_locked(pmd_lockptr(mm, pmdp));
e3ebcf64
GS
145
146 /* FIFO */
c389a250 147 if (!pmd_huge_pte(mm, pmdp))
e3ebcf64
GS
148 INIT_LIST_HEAD(&pgtable->lru);
149 else
c389a250
KS
150 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
151 pmd_huge_pte(mm, pmdp) = pgtable;
e3ebcf64 152}
e3ebcf64
GS
153#endif
154
155#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
e3ebcf64 156/* no "address" argument so destroys page coloring of some arch */
6b0b50b0 157pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
e3ebcf64
GS
158{
159 pgtable_t pgtable;
160
c4088ebd 161 assert_spin_locked(pmd_lockptr(mm, pmdp));
e3ebcf64
GS
162
163 /* FIFO */
c389a250 164 pgtable = pmd_huge_pte(mm, pmdp);
e3ebcf64 165 if (list_empty(&pgtable->lru))
c389a250 166 pmd_huge_pte(mm, pmdp) = NULL;
e3ebcf64 167 else {
c389a250 168 pmd_huge_pte(mm, pmdp) = list_entry(pgtable->lru.next,
e3ebcf64
GS
169 struct page, lru);
170 list_del(&pgtable->lru);
171 }
172 return pgtable;
173}
e3ebcf64 174#endif
46dcde73
GS
175
176#ifndef __HAVE_ARCH_PMDP_INVALIDATE
46dcde73
GS
177void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
178 pmd_t *pmdp)
179{
67f87463 180 pmd_t entry = *pmdp;
ce8369bc 181 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
46dcde73
GS
182 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
183}
46dcde73 184#endif
f28b6ff8
AK
185
186#ifndef pmdp_collapse_flush
f28b6ff8
AK
187pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
188 pmd_t *pmdp)
189{
8809aa2d
AK
190 /*
191 * pmd and hugepage pte format are same. So we could
192 * use the same function.
193 */
f28b6ff8
AK
194 pmd_t pmd;
195
196 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
197 VM_BUG_ON(pmd_trans_huge(*pmdp));
8809aa2d 198 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
f28b6ff8
AK
199 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
200 return pmd;
201}
f28b6ff8 202#endif
bd5e88ad 203#endif /* CONFIG_TRANSPARENT_HUGEPAGE */