Commit | Line | Data |
---|---|---|
f80be457 AP |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * KMSAN hooks for kernel subsystems. | |
4 | * | |
5 | * These functions handle creation of KMSAN metadata for memory allocations. | |
6 | * | |
7 | * Copyright (C) 2018-2022 Google LLC | |
8 | * Author: Alexander Potapenko <glider@google.com> | |
9 | * | |
10 | */ | |
11 | ||
12 | #include <linux/cacheflush.h> | |
7ade4f10 | 13 | #include <linux/dma-direction.h> |
f80be457 | 14 | #include <linux/gfp.h> |
b073d7f8 | 15 | #include <linux/kmsan.h> |
f80be457 AP |
16 | #include <linux/mm.h> |
17 | #include <linux/mm_types.h> | |
7ade4f10 | 18 | #include <linux/scatterlist.h> |
f80be457 AP |
19 | #include <linux/slab.h> |
20 | #include <linux/uaccess.h> | |
553a8018 | 21 | #include <linux/usb.h> |
f80be457 AP |
22 | |
23 | #include "../internal.h" | |
24 | #include "../slab.h" | |
25 | #include "kmsan.h" | |
26 | ||
27 | /* | |
28 | * Instrumented functions shouldn't be called under | |
29 | * kmsan_enter_runtime()/kmsan_leave_runtime(), because this will lead to | |
30 | * skipping effects of functions like memset() inside instrumented code. | |
31 | */ | |
32 | ||
50b5e49c AP |
33 | void kmsan_task_create(struct task_struct *task) |
34 | { | |
35 | kmsan_enter_runtime(); | |
36 | kmsan_internal_task_create(task); | |
37 | kmsan_leave_runtime(); | |
38 | } | |
39 | ||
40 | void kmsan_task_exit(struct task_struct *task) | |
41 | { | |
42 | struct kmsan_ctx *ctx = &task->kmsan_ctx; | |
43 | ||
44 | if (!kmsan_enabled || kmsan_in_runtime()) | |
45 | return; | |
46 | ||
47 | ctx->allow_reporting = false; | |
48 | } | |
49 | ||
68ef169a AP |
50 | void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags) |
51 | { | |
52 | if (unlikely(object == NULL)) | |
53 | return; | |
54 | if (!kmsan_enabled || kmsan_in_runtime()) | |
55 | return; | |
56 | /* | |
57 | * There's a ctor or this is an RCU cache - do nothing. The memory | |
58 | * status hasn't changed since last use. | |
59 | */ | |
60 | if (s->ctor || (s->flags & SLAB_TYPESAFE_BY_RCU)) | |
61 | return; | |
62 | ||
63 | kmsan_enter_runtime(); | |
64 | if (flags & __GFP_ZERO) | |
65 | kmsan_internal_unpoison_memory(object, s->object_size, | |
66 | KMSAN_POISON_CHECK); | |
67 | else | |
68 | kmsan_internal_poison_memory(object, s->object_size, flags, | |
69 | KMSAN_POISON_CHECK); | |
70 | kmsan_leave_runtime(); | |
71 | } | |
72 | ||
73 | void kmsan_slab_free(struct kmem_cache *s, void *object) | |
74 | { | |
75 | if (!kmsan_enabled || kmsan_in_runtime()) | |
76 | return; | |
77 | ||
78 | /* RCU slabs could be legally used after free within the RCU period */ | |
79 | if (unlikely(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))) | |
80 | return; | |
81 | /* | |
82 | * If there's a constructor, freed memory must remain in the same state | |
83 | * until the next allocation. We cannot save its state to detect | |
84 | * use-after-free bugs, instead we just keep it unpoisoned. | |
85 | */ | |
86 | if (s->ctor) | |
87 | return; | |
88 | kmsan_enter_runtime(); | |
89 | kmsan_internal_poison_memory(object, s->object_size, GFP_KERNEL, | |
90 | KMSAN_POISON_CHECK | KMSAN_POISON_FREE); | |
91 | kmsan_leave_runtime(); | |
92 | } | |
93 | ||
94 | void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) | |
95 | { | |
96 | if (unlikely(ptr == NULL)) | |
97 | return; | |
98 | if (!kmsan_enabled || kmsan_in_runtime()) | |
99 | return; | |
100 | kmsan_enter_runtime(); | |
101 | if (flags & __GFP_ZERO) | |
102 | kmsan_internal_unpoison_memory((void *)ptr, size, | |
103 | /*checked*/ true); | |
104 | else | |
105 | kmsan_internal_poison_memory((void *)ptr, size, flags, | |
106 | KMSAN_POISON_CHECK); | |
107 | kmsan_leave_runtime(); | |
108 | } | |
109 | ||
110 | void kmsan_kfree_large(const void *ptr) | |
111 | { | |
112 | struct page *page; | |
113 | ||
114 | if (!kmsan_enabled || kmsan_in_runtime()) | |
115 | return; | |
116 | kmsan_enter_runtime(); | |
117 | page = virt_to_head_page((void *)ptr); | |
118 | KMSAN_WARN_ON(ptr != page_address(page)); | |
119 | kmsan_internal_poison_memory((void *)ptr, | |
120 | PAGE_SIZE << compound_order(page), | |
121 | GFP_KERNEL, | |
122 | KMSAN_POISON_CHECK | KMSAN_POISON_FREE); | |
123 | kmsan_leave_runtime(); | |
124 | } | |
125 | ||
b073d7f8 AP |
126 | static unsigned long vmalloc_shadow(unsigned long addr) |
127 | { | |
128 | return (unsigned long)kmsan_get_metadata((void *)addr, | |
129 | KMSAN_META_SHADOW); | |
130 | } | |
131 | ||
132 | static unsigned long vmalloc_origin(unsigned long addr) | |
133 | { | |
134 | return (unsigned long)kmsan_get_metadata((void *)addr, | |
135 | KMSAN_META_ORIGIN); | |
136 | } | |
137 | ||
138 | void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end) | |
139 | { | |
140 | __vunmap_range_noflush(vmalloc_shadow(start), vmalloc_shadow(end)); | |
141 | __vunmap_range_noflush(vmalloc_origin(start), vmalloc_origin(end)); | |
142 | flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end)); | |
143 | flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end)); | |
144 | } | |
145 | ||
146 | /* | |
147 | * This function creates new shadow/origin pages for the physical pages mapped | |
148 | * into the virtual memory. If those physical pages already had shadow/origin, | |
149 | * those are ignored. | |
150 | */ | |
151 | void kmsan_ioremap_page_range(unsigned long start, unsigned long end, | |
152 | phys_addr_t phys_addr, pgprot_t prot, | |
153 | unsigned int page_shift) | |
154 | { | |
155 | gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO; | |
156 | struct page *shadow, *origin; | |
157 | unsigned long off = 0; | |
158 | int nr; | |
159 | ||
160 | if (!kmsan_enabled || kmsan_in_runtime()) | |
161 | return; | |
162 | ||
163 | nr = (end - start) / PAGE_SIZE; | |
164 | kmsan_enter_runtime(); | |
165 | for (int i = 0; i < nr; i++, off += PAGE_SIZE) { | |
166 | shadow = alloc_pages(gfp_mask, 1); | |
167 | origin = alloc_pages(gfp_mask, 1); | |
168 | __vmap_pages_range_noflush( | |
169 | vmalloc_shadow(start + off), | |
170 | vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow, | |
171 | PAGE_SHIFT); | |
172 | __vmap_pages_range_noflush( | |
173 | vmalloc_origin(start + off), | |
174 | vmalloc_origin(start + off + PAGE_SIZE), prot, &origin, | |
175 | PAGE_SHIFT); | |
176 | } | |
177 | flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end)); | |
178 | flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end)); | |
179 | kmsan_leave_runtime(); | |
180 | } | |
181 | ||
182 | void kmsan_iounmap_page_range(unsigned long start, unsigned long end) | |
183 | { | |
184 | unsigned long v_shadow, v_origin; | |
185 | struct page *shadow, *origin; | |
186 | int nr; | |
187 | ||
188 | if (!kmsan_enabled || kmsan_in_runtime()) | |
189 | return; | |
190 | ||
191 | nr = (end - start) / PAGE_SIZE; | |
192 | kmsan_enter_runtime(); | |
193 | v_shadow = (unsigned long)vmalloc_shadow(start); | |
194 | v_origin = (unsigned long)vmalloc_origin(start); | |
195 | for (int i = 0; i < nr; | |
196 | i++, v_shadow += PAGE_SIZE, v_origin += PAGE_SIZE) { | |
197 | shadow = kmsan_vmalloc_to_page_or_null((void *)v_shadow); | |
198 | origin = kmsan_vmalloc_to_page_or_null((void *)v_origin); | |
199 | __vunmap_range_noflush(v_shadow, vmalloc_shadow(end)); | |
200 | __vunmap_range_noflush(v_origin, vmalloc_origin(end)); | |
201 | if (shadow) | |
202 | __free_pages(shadow, 1); | |
203 | if (origin) | |
204 | __free_pages(origin, 1); | |
205 | } | |
206 | flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end)); | |
207 | flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end)); | |
208 | kmsan_leave_runtime(); | |
209 | } | |
210 | ||
75cf0290 AP |
211 | void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy, |
212 | size_t left) | |
213 | { | |
214 | unsigned long ua_flags; | |
215 | ||
216 | if (!kmsan_enabled || kmsan_in_runtime()) | |
217 | return; | |
218 | /* | |
219 | * At this point we've copied the memory already. It's hard to check it | |
220 | * before copying, as the size of actually copied buffer is unknown. | |
221 | */ | |
222 | ||
223 | /* copy_to_user() may copy zero bytes. No need to check. */ | |
224 | if (!to_copy) | |
225 | return; | |
226 | /* Or maybe copy_to_user() failed to copy anything. */ | |
227 | if (to_copy <= left) | |
228 | return; | |
229 | ||
230 | ua_flags = user_access_save(); | |
231 | if ((u64)to < TASK_SIZE) { | |
232 | /* This is a user memory access, check it. */ | |
233 | kmsan_internal_check_memory((void *)from, to_copy - left, to, | |
234 | REASON_COPY_TO_USER); | |
235 | } else { | |
236 | /* Otherwise this is a kernel memory access. This happens when a | |
237 | * compat syscall passes an argument allocated on the kernel | |
238 | * stack to a real syscall. | |
239 | * Don't check anything, just copy the shadow of the copied | |
240 | * bytes. | |
241 | */ | |
242 | kmsan_internal_memmove_metadata((void *)to, (void *)from, | |
243 | to_copy - left); | |
244 | } | |
245 | user_access_restore(ua_flags); | |
246 | } | |
247 | EXPORT_SYMBOL(kmsan_copy_to_user); | |
248 | ||
553a8018 AP |
249 | /* Helper function to check an URB. */ |
250 | void kmsan_handle_urb(const struct urb *urb, bool is_out) | |
251 | { | |
252 | if (!urb) | |
253 | return; | |
254 | if (is_out) | |
255 | kmsan_internal_check_memory(urb->transfer_buffer, | |
256 | urb->transfer_buffer_length, | |
257 | /*user_addr*/ 0, REASON_SUBMIT_URB); | |
258 | else | |
259 | kmsan_internal_unpoison_memory(urb->transfer_buffer, | |
260 | urb->transfer_buffer_length, | |
261 | /*checked*/ false); | |
262 | } | |
7ba594d7 | 263 | EXPORT_SYMBOL_GPL(kmsan_handle_urb); |
553a8018 | 264 | |
7ade4f10 AP |
265 | static void kmsan_handle_dma_page(const void *addr, size_t size, |
266 | enum dma_data_direction dir) | |
267 | { | |
268 | switch (dir) { | |
269 | case DMA_BIDIRECTIONAL: | |
270 | kmsan_internal_check_memory((void *)addr, size, /*user_addr*/ 0, | |
271 | REASON_ANY); | |
272 | kmsan_internal_unpoison_memory((void *)addr, size, | |
273 | /*checked*/ false); | |
274 | break; | |
275 | case DMA_TO_DEVICE: | |
276 | kmsan_internal_check_memory((void *)addr, size, /*user_addr*/ 0, | |
277 | REASON_ANY); | |
278 | break; | |
279 | case DMA_FROM_DEVICE: | |
280 | kmsan_internal_unpoison_memory((void *)addr, size, | |
281 | /*checked*/ false); | |
282 | break; | |
283 | case DMA_NONE: | |
284 | break; | |
285 | } | |
286 | } | |
287 | ||
288 | /* Helper function to handle DMA data transfers. */ | |
289 | void kmsan_handle_dma(struct page *page, size_t offset, size_t size, | |
290 | enum dma_data_direction dir) | |
291 | { | |
292 | u64 page_offset, to_go, addr; | |
293 | ||
294 | if (PageHighMem(page)) | |
295 | return; | |
296 | addr = (u64)page_address(page) + offset; | |
297 | /* | |
298 | * The kernel may occasionally give us adjacent DMA pages not belonging | |
299 | * to the same allocation. Process them separately to avoid triggering | |
300 | * internal KMSAN checks. | |
301 | */ | |
302 | while (size > 0) { | |
303 | page_offset = addr % PAGE_SIZE; | |
304 | to_go = min(PAGE_SIZE - page_offset, (u64)size); | |
305 | kmsan_handle_dma_page((void *)addr, to_go, dir); | |
306 | addr += to_go; | |
307 | size -= to_go; | |
308 | } | |
309 | } | |
310 | ||
311 | void kmsan_handle_dma_sg(struct scatterlist *sg, int nents, | |
312 | enum dma_data_direction dir) | |
313 | { | |
314 | struct scatterlist *item; | |
315 | int i; | |
316 | ||
317 | for_each_sg(sg, item, nents, i) | |
318 | kmsan_handle_dma(sg_page(item), item->offset, item->length, | |
319 | dir); | |
320 | } | |
321 | ||
f80be457 AP |
322 | /* Functions from kmsan-checks.h follow. */ |
323 | void kmsan_poison_memory(const void *address, size_t size, gfp_t flags) | |
324 | { | |
325 | if (!kmsan_enabled || kmsan_in_runtime()) | |
326 | return; | |
327 | kmsan_enter_runtime(); | |
328 | /* The users may want to poison/unpoison random memory. */ | |
329 | kmsan_internal_poison_memory((void *)address, size, flags, | |
330 | KMSAN_POISON_NOCHECK); | |
331 | kmsan_leave_runtime(); | |
332 | } | |
333 | EXPORT_SYMBOL(kmsan_poison_memory); | |
334 | ||
335 | void kmsan_unpoison_memory(const void *address, size_t size) | |
336 | { | |
337 | unsigned long ua_flags; | |
338 | ||
339 | if (!kmsan_enabled || kmsan_in_runtime()) | |
340 | return; | |
341 | ||
342 | ua_flags = user_access_save(); | |
343 | kmsan_enter_runtime(); | |
344 | /* The users may want to poison/unpoison random memory. */ | |
345 | kmsan_internal_unpoison_memory((void *)address, size, | |
346 | KMSAN_POISON_NOCHECK); | |
347 | kmsan_leave_runtime(); | |
348 | user_access_restore(ua_flags); | |
349 | } | |
350 | EXPORT_SYMBOL(kmsan_unpoison_memory); | |
351 | ||
6cae637f AP |
352 | /* |
353 | * Version of kmsan_unpoison_memory() that can be called from within the KMSAN | |
354 | * runtime. | |
355 | * | |
356 | * Non-instrumented IRQ entry functions receive struct pt_regs from assembly | |
357 | * code. Those regs need to be unpoisoned, otherwise using them will result in | |
358 | * false positives. | |
359 | * Using kmsan_unpoison_memory() is not an option in entry code, because the | |
360 | * return value of in_task() is inconsistent - as a result, certain calls to | |
361 | * kmsan_unpoison_memory() are ignored. kmsan_unpoison_entry_regs() ensures that | |
362 | * the registers are unpoisoned even if kmsan_in_runtime() is true in the early | |
363 | * entry code. | |
364 | */ | |
365 | void kmsan_unpoison_entry_regs(const struct pt_regs *regs) | |
366 | { | |
367 | unsigned long ua_flags; | |
368 | ||
369 | if (!kmsan_enabled) | |
370 | return; | |
371 | ||
372 | ua_flags = user_access_save(); | |
373 | kmsan_internal_unpoison_memory((void *)regs, sizeof(*regs), | |
374 | KMSAN_POISON_NOCHECK); | |
375 | user_access_restore(ua_flags); | |
376 | } | |
377 | ||
f80be457 AP |
378 | void kmsan_check_memory(const void *addr, size_t size) |
379 | { | |
380 | if (!kmsan_enabled) | |
381 | return; | |
382 | return kmsan_internal_check_memory((void *)addr, size, /*user_addr*/ 0, | |
383 | REASON_ANY); | |
384 | } | |
385 | EXPORT_SYMBOL(kmsan_check_memory); |