Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[linux-block.git] / mm / page_table_check.c
CommitLineData
df4e817b
PT
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright (c) 2021, Google LLC.
5 * Pasha Tatashin <pasha.tatashin@soleen.com>
6 */
7#include <linux/mm.h>
8#include <linux/page_table_check.h>
9
10#undef pr_fmt
11#define pr_fmt(fmt) "page_table_check: " fmt
12
13struct page_table_check {
14 atomic_t anon_map_count;
15 atomic_t file_map_count;
16};
17
18static bool __page_table_check_enabled __initdata =
19 IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
20
21DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
22EXPORT_SYMBOL(page_table_check_disabled);
23
24static int __init early_page_table_check_param(char *buf)
25{
597da28e 26 return strtobool(buf, &__page_table_check_enabled);
df4e817b
PT
27}
28
29early_param("page_table_check", early_page_table_check_param);
30
31static bool __init need_page_table_check(void)
32{
33 return __page_table_check_enabled;
34}
35
36static void __init init_page_table_check(void)
37{
38 if (!__page_table_check_enabled)
39 return;
40 static_branch_disable(&page_table_check_disabled);
41}
42
43struct page_ext_operations page_table_check_ops = {
44 .size = sizeof(struct page_table_check),
45 .need = need_page_table_check,
46 .init = init_page_table_check,
47};
48
49static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
50{
51 BUG_ON(!page_ext);
52 return (void *)(page_ext) + page_table_check_ops.offset;
53}
54
55static inline bool pte_user_accessible_page(pte_t pte)
56{
57 return (pte_val(pte) & _PAGE_PRESENT) && (pte_val(pte) & _PAGE_USER);
58}
59
60static inline bool pmd_user_accessible_page(pmd_t pmd)
61{
62 return pmd_leaf(pmd) && (pmd_val(pmd) & _PAGE_PRESENT) &&
63 (pmd_val(pmd) & _PAGE_USER);
64}
65
66static inline bool pud_user_accessible_page(pud_t pud)
67{
68 return pud_leaf(pud) && (pud_val(pud) & _PAGE_PRESENT) &&
69 (pud_val(pud) & _PAGE_USER);
70}
71
72/*
73 * An enty is removed from the page table, decrement the counters for that page
74 * verify that it is of correct type and counters do not become negative.
75 */
76static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
77 unsigned long pfn, unsigned long pgcnt)
78{
79 struct page_ext *page_ext;
80 struct page *page;
64d8b9e1 81 unsigned long i;
df4e817b 82 bool anon;
df4e817b
PT
83
84 if (!pfn_valid(pfn))
85 return;
86
87 page = pfn_to_page(pfn);
88 page_ext = lookup_page_ext(page);
89 anon = PageAnon(page);
90
91 for (i = 0; i < pgcnt; i++) {
92 struct page_table_check *ptc = get_page_table_check(page_ext);
93
94 if (anon) {
95 BUG_ON(atomic_read(&ptc->file_map_count));
96 BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
97 } else {
98 BUG_ON(atomic_read(&ptc->anon_map_count));
99 BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
100 }
101 page_ext = page_ext_next(page_ext);
102 }
103}
104
105/*
106 * A new enty is added to the page table, increment the counters for that page
107 * verify that it is of correct type and is not being mapped with a different
108 * type to a different process.
109 */
110static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
111 unsigned long pfn, unsigned long pgcnt,
112 bool rw)
113{
114 struct page_ext *page_ext;
115 struct page *page;
64d8b9e1 116 unsigned long i;
df4e817b 117 bool anon;
df4e817b
PT
118
119 if (!pfn_valid(pfn))
120 return;
121
122 page = pfn_to_page(pfn);
123 page_ext = lookup_page_ext(page);
124 anon = PageAnon(page);
125
126 for (i = 0; i < pgcnt; i++) {
127 struct page_table_check *ptc = get_page_table_check(page_ext);
128
129 if (anon) {
130 BUG_ON(atomic_read(&ptc->file_map_count));
131 BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
132 } else {
133 BUG_ON(atomic_read(&ptc->anon_map_count));
134 BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
135 }
136 page_ext = page_ext_next(page_ext);
137 }
138}
139
140/*
141 * page is on free list, or is being allocated, verify that counters are zeroes
142 * crash if they are not.
143 */
144void __page_table_check_zero(struct page *page, unsigned int order)
145{
146 struct page_ext *page_ext = lookup_page_ext(page);
64d8b9e1 147 unsigned long i;
df4e817b
PT
148
149 BUG_ON(!page_ext);
64d8b9e1 150 for (i = 0; i < (1ul << order); i++) {
df4e817b
PT
151 struct page_table_check *ptc = get_page_table_check(page_ext);
152
153 BUG_ON(atomic_read(&ptc->anon_map_count));
154 BUG_ON(atomic_read(&ptc->file_map_count));
155 page_ext = page_ext_next(page_ext);
156 }
157}
158
159void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
160 pte_t pte)
161{
162 if (&init_mm == mm)
163 return;
164
165 if (pte_user_accessible_page(pte)) {
166 page_table_check_clear(mm, addr, pte_pfn(pte),
167 PAGE_SIZE >> PAGE_SHIFT);
168 }
169}
170EXPORT_SYMBOL(__page_table_check_pte_clear);
171
172void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
173 pmd_t pmd)
174{
175 if (&init_mm == mm)
176 return;
177
178 if (pmd_user_accessible_page(pmd)) {
179 page_table_check_clear(mm, addr, pmd_pfn(pmd),
180 PMD_PAGE_SIZE >> PAGE_SHIFT);
181 }
182}
183EXPORT_SYMBOL(__page_table_check_pmd_clear);
184
185void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
186 pud_t pud)
187{
188 if (&init_mm == mm)
189 return;
190
191 if (pud_user_accessible_page(pud)) {
192 page_table_check_clear(mm, addr, pud_pfn(pud),
193 PUD_PAGE_SIZE >> PAGE_SHIFT);
194 }
195}
196EXPORT_SYMBOL(__page_table_check_pud_clear);
197
198void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
199 pte_t *ptep, pte_t pte)
200{
df4e817b
PT
201 if (&init_mm == mm)
202 return;
203
64d8b9e1 204 __page_table_check_pte_clear(mm, addr, *ptep);
df4e817b
PT
205 if (pte_user_accessible_page(pte)) {
206 page_table_check_set(mm, addr, pte_pfn(pte),
207 PAGE_SIZE >> PAGE_SHIFT,
208 pte_write(pte));
209 }
210}
211EXPORT_SYMBOL(__page_table_check_pte_set);
212
213void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
214 pmd_t *pmdp, pmd_t pmd)
215{
df4e817b
PT
216 if (&init_mm == mm)
217 return;
218
64d8b9e1 219 __page_table_check_pmd_clear(mm, addr, *pmdp);
df4e817b
PT
220 if (pmd_user_accessible_page(pmd)) {
221 page_table_check_set(mm, addr, pmd_pfn(pmd),
222 PMD_PAGE_SIZE >> PAGE_SHIFT,
223 pmd_write(pmd));
224 }
225}
226EXPORT_SYMBOL(__page_table_check_pmd_set);
227
228void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
229 pud_t *pudp, pud_t pud)
230{
df4e817b
PT
231 if (&init_mm == mm)
232 return;
233
64d8b9e1 234 __page_table_check_pud_clear(mm, addr, *pudp);
df4e817b
PT
235 if (pud_user_accessible_page(pud)) {
236 page_table_check_set(mm, addr, pud_pfn(pud),
237 PUD_PAGE_SIZE >> PAGE_SHIFT,
238 pud_write(pud));
239 }
240}
241EXPORT_SYMBOL(__page_table_check_pud_set);
80110bbf
PT
242
243void __page_table_check_pte_clear_range(struct mm_struct *mm,
244 unsigned long addr,
245 pmd_t pmd)
246{
247 if (&init_mm == mm)
248 return;
249
250 if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
251 pte_t *ptep = pte_offset_map(&pmd, addr);
252 unsigned long i;
253
254 pte_unmap(ptep);
255 for (i = 0; i < PTRS_PER_PTE; i++) {
256 __page_table_check_pte_clear(mm, addr, *ptep);
257 addr += PAGE_SIZE;
258 ptep++;
259 }
260 }
261}