Commit | Line | Data |
---|---|---|
bb359dbc AK |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * This file contains KASAN runtime code that manages shadow memory for | |
4 | * generic and software tag-based KASAN modes. | |
5 | * | |
6 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. | |
7 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> | |
8 | * | |
9 | * Some code borrowed from https://github.com/xairy/kasan-prototype by | |
10 | * Andrey Konovalov <andreyknvl@gmail.com> | |
11 | */ | |
12 | ||
13 | #include <linux/init.h> | |
14 | #include <linux/kasan.h> | |
15 | #include <linux/kernel.h> | |
2b830526 | 16 | #include <linux/kfence.h> |
bb359dbc AK |
17 | #include <linux/kmemleak.h> |
18 | #include <linux/memory.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/string.h> | |
21 | #include <linux/types.h> | |
22 | #include <linux/vmalloc.h> | |
23 | ||
24 | #include <asm/cacheflush.h> | |
25 | #include <asm/tlbflush.h> | |
26 | ||
27 | #include "kasan.h" | |
28 | ||
29 | bool __kasan_check_read(const volatile void *p, unsigned int size) | |
30 | { | |
f00748bf | 31 | return kasan_check_range((unsigned long)p, size, false, _RET_IP_); |
bb359dbc AK |
32 | } |
33 | EXPORT_SYMBOL(__kasan_check_read); | |
34 | ||
35 | bool __kasan_check_write(const volatile void *p, unsigned int size) | |
36 | { | |
f00748bf | 37 | return kasan_check_range((unsigned long)p, size, true, _RET_IP_); |
bb359dbc AK |
38 | } |
39 | EXPORT_SYMBOL(__kasan_check_write); | |
40 | ||
36be5cba | 41 | #if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY) |
69d4c0d3 PZ |
42 | /* |
43 | * CONFIG_GENERIC_ENTRY relies on compiler emitted mem*() calls to not be | |
44 | * instrumented. KASAN enabled toolchains should emit __asan_mem*() functions | |
45 | * for the sites they want to instrument. | |
36be5cba ME |
46 | * |
47 | * If we have a compiler that can instrument meminstrinsics, never override | |
48 | * these, so that non-instrumented files can safely consider them as builtins. | |
69d4c0d3 | 49 | */ |
bb359dbc AK |
50 | #undef memset |
51 | void *memset(void *addr, int c, size_t len) | |
52 | { | |
f00748bf | 53 | if (!kasan_check_range((unsigned long)addr, len, true, _RET_IP_)) |
bb359dbc AK |
54 | return NULL; |
55 | ||
56 | return __memset(addr, c, len); | |
57 | } | |
58 | ||
59 | #ifdef __HAVE_ARCH_MEMMOVE | |
60 | #undef memmove | |
61 | void *memmove(void *dest, const void *src, size_t len) | |
62 | { | |
f00748bf AK |
63 | if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) || |
64 | !kasan_check_range((unsigned long)dest, len, true, _RET_IP_)) | |
bb359dbc AK |
65 | return NULL; |
66 | ||
67 | return __memmove(dest, src, len); | |
68 | } | |
69 | #endif | |
70 | ||
71 | #undef memcpy | |
72 | void *memcpy(void *dest, const void *src, size_t len) | |
73 | { | |
f00748bf AK |
74 | if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) || |
75 | !kasan_check_range((unsigned long)dest, len, true, _RET_IP_)) | |
bb359dbc AK |
76 | return NULL; |
77 | ||
78 | return __memcpy(dest, src, len); | |
79 | } | |
69d4c0d3 PZ |
80 | #endif |
81 | ||
82 | void *__asan_memset(void *addr, int c, size_t len) | |
83 | { | |
84 | if (!kasan_check_range((unsigned long)addr, len, true, _RET_IP_)) | |
85 | return NULL; | |
86 | ||
87 | return __memset(addr, c, len); | |
88 | } | |
89 | EXPORT_SYMBOL(__asan_memset); | |
90 | ||
91 | #ifdef __HAVE_ARCH_MEMMOVE | |
92 | void *__asan_memmove(void *dest, const void *src, size_t len) | |
93 | { | |
94 | if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) || | |
95 | !kasan_check_range((unsigned long)dest, len, true, _RET_IP_)) | |
96 | return NULL; | |
97 | ||
98 | return __memmove(dest, src, len); | |
99 | } | |
100 | EXPORT_SYMBOL(__asan_memmove); | |
101 | #endif | |
102 | ||
103 | void *__asan_memcpy(void *dest, const void *src, size_t len) | |
104 | { | |
105 | if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) || | |
106 | !kasan_check_range((unsigned long)dest, len, true, _RET_IP_)) | |
107 | return NULL; | |
108 | ||
109 | return __memcpy(dest, src, len); | |
110 | } | |
111 | EXPORT_SYMBOL(__asan_memcpy); | |
bb359dbc | 112 | |
51287dcb ME |
113 | #ifdef CONFIG_KASAN_SW_TAGS |
114 | void *__hwasan_memset(void *addr, int c, size_t len) __alias(__asan_memset); | |
115 | EXPORT_SYMBOL(__hwasan_memset); | |
116 | #ifdef __HAVE_ARCH_MEMMOVE | |
117 | void *__hwasan_memmove(void *dest, const void *src, size_t len) __alias(__asan_memmove); | |
118 | EXPORT_SYMBOL(__hwasan_memmove); | |
119 | #endif | |
120 | void *__hwasan_memcpy(void *dest, const void *src, size_t len) __alias(__asan_memcpy); | |
121 | EXPORT_SYMBOL(__hwasan_memcpy); | |
122 | #endif | |
123 | ||
aa5c219c | 124 | void kasan_poison(const void *addr, size_t size, u8 value, bool init) |
bb359dbc AK |
125 | { |
126 | void *shadow_start, *shadow_end; | |
127 | ||
af3751f3 DA |
128 | if (!kasan_arch_is_ready()) |
129 | return; | |
130 | ||
bb359dbc AK |
131 | /* |
132 | * Perform shadow offset calculation based on untagged address, as | |
133 | * some of the callers (e.g. kasan_poison_object_data) pass tagged | |
134 | * addresses to this function. | |
135 | */ | |
cde8a7eb | 136 | addr = kasan_reset_tag(addr); |
bb359dbc | 137 | |
2b830526 | 138 | /* Skip KFENCE memory if called explicitly outside of sl*b. */ |
cde8a7eb | 139 | if (is_kfence_address(addr)) |
2b830526 AP |
140 | return; |
141 | ||
cde8a7eb AK |
142 | if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) |
143 | return; | |
144 | if (WARN_ON(size & KASAN_GRANULE_MASK)) | |
145 | return; | |
146 | ||
147 | shadow_start = kasan_mem_to_shadow(addr); | |
148 | shadow_end = kasan_mem_to_shadow(addr + size); | |
bb359dbc AK |
149 | |
150 | __memset(shadow_start, value, shadow_end - shadow_start); | |
151 | } | |
573a4809 | 152 | EXPORT_SYMBOL(kasan_poison); |
bb359dbc | 153 | |
e2db1a9a | 154 | #ifdef CONFIG_KASAN_GENERIC |
cde8a7eb | 155 | void kasan_poison_last_granule(const void *addr, size_t size) |
e2db1a9a | 156 | { |
af3751f3 DA |
157 | if (!kasan_arch_is_ready()) |
158 | return; | |
159 | ||
e2db1a9a | 160 | if (size & KASAN_GRANULE_MASK) { |
cde8a7eb | 161 | u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size); |
e2db1a9a AK |
162 | *shadow = size & KASAN_GRANULE_MASK; |
163 | } | |
164 | } | |
165 | #endif | |
166 | ||
aa5c219c | 167 | void kasan_unpoison(const void *addr, size_t size, bool init) |
bb359dbc | 168 | { |
cde8a7eb | 169 | u8 tag = get_tag(addr); |
bb359dbc AK |
170 | |
171 | /* | |
172 | * Perform shadow offset calculation based on untagged address, as | |
173 | * some of the callers (e.g. kasan_unpoison_object_data) pass tagged | |
174 | * addresses to this function. | |
175 | */ | |
cde8a7eb | 176 | addr = kasan_reset_tag(addr); |
bb359dbc | 177 | |
2b830526 AP |
178 | /* |
179 | * Skip KFENCE memory if called explicitly outside of sl*b. Also note | |
180 | * that calls to ksize(), where size is not a multiple of machine-word | |
181 | * size, would otherwise poison the invalid portion of the word. | |
182 | */ | |
cde8a7eb AK |
183 | if (is_kfence_address(addr)) |
184 | return; | |
185 | ||
186 | if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) | |
2b830526 AP |
187 | return; |
188 | ||
cde8a7eb | 189 | /* Unpoison all granules that cover the object. */ |
aa5c219c | 190 | kasan_poison(addr, round_up(size, KASAN_GRANULE_SIZE), tag, false); |
bb359dbc | 191 | |
e2db1a9a AK |
192 | /* Partially poison the last granule for the generic mode. */ |
193 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) | |
cde8a7eb | 194 | kasan_poison_last_granule(addr, size); |
bb359dbc AK |
195 | } |
196 | ||
197 | #ifdef CONFIG_MEMORY_HOTPLUG | |
198 | static bool shadow_mapped(unsigned long addr) | |
199 | { | |
200 | pgd_t *pgd = pgd_offset_k(addr); | |
201 | p4d_t *p4d; | |
202 | pud_t *pud; | |
203 | pmd_t *pmd; | |
204 | pte_t *pte; | |
205 | ||
206 | if (pgd_none(*pgd)) | |
207 | return false; | |
208 | p4d = p4d_offset(pgd, addr); | |
209 | if (p4d_none(*p4d)) | |
210 | return false; | |
211 | pud = pud_offset(p4d, addr); | |
212 | if (pud_none(*pud)) | |
213 | return false; | |
214 | ||
215 | /* | |
216 | * We can't use pud_large() or pud_huge(), the first one is | |
217 | * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse | |
218 | * pud_bad(), if pud is bad then it's bad because it's huge. | |
219 | */ | |
220 | if (pud_bad(*pud)) | |
221 | return true; | |
222 | pmd = pmd_offset(pud, addr); | |
223 | if (pmd_none(*pmd)) | |
224 | return false; | |
225 | ||
226 | if (pmd_bad(*pmd)) | |
227 | return true; | |
228 | pte = pte_offset_kernel(pmd, addr); | |
229 | return !pte_none(*pte); | |
230 | } | |
231 | ||
232 | static int __meminit kasan_mem_notifier(struct notifier_block *nb, | |
233 | unsigned long action, void *data) | |
234 | { | |
235 | struct memory_notify *mem_data = data; | |
236 | unsigned long nr_shadow_pages, start_kaddr, shadow_start; | |
237 | unsigned long shadow_end, shadow_size; | |
238 | ||
239 | nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; | |
240 | start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); | |
241 | shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr); | |
242 | shadow_size = nr_shadow_pages << PAGE_SHIFT; | |
243 | shadow_end = shadow_start + shadow_size; | |
244 | ||
245 | if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) || | |
affc3f07 | 246 | WARN_ON(start_kaddr % KASAN_MEMORY_PER_SHADOW_PAGE)) |
bb359dbc AK |
247 | return NOTIFY_BAD; |
248 | ||
249 | switch (action) { | |
250 | case MEM_GOING_ONLINE: { | |
251 | void *ret; | |
252 | ||
253 | /* | |
254 | * If shadow is mapped already than it must have been mapped | |
255 | * during the boot. This could happen if we onlining previously | |
256 | * offlined memory. | |
257 | */ | |
258 | if (shadow_mapped(shadow_start)) | |
259 | return NOTIFY_OK; | |
260 | ||
261 | ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, | |
262 | shadow_end, GFP_KERNEL, | |
263 | PAGE_KERNEL, VM_NO_GUARD, | |
264 | pfn_to_nid(mem_data->start_pfn), | |
265 | __builtin_return_address(0)); | |
266 | if (!ret) | |
267 | return NOTIFY_BAD; | |
268 | ||
269 | kmemleak_ignore(ret); | |
270 | return NOTIFY_OK; | |
271 | } | |
272 | case MEM_CANCEL_ONLINE: | |
273 | case MEM_OFFLINE: { | |
274 | struct vm_struct *vm; | |
275 | ||
276 | /* | |
277 | * shadow_start was either mapped during boot by kasan_init() | |
278 | * or during memory online by __vmalloc_node_range(). | |
279 | * In the latter case we can use vfree() to free shadow. | |
280 | * Non-NULL result of the find_vm_area() will tell us if | |
281 | * that was the second case. | |
282 | * | |
283 | * Currently it's not possible to free shadow mapped | |
284 | * during boot by kasan_init(). It's because the code | |
285 | * to do that hasn't been written yet. So we'll just | |
286 | * leak the memory. | |
287 | */ | |
288 | vm = find_vm_area((void *)shadow_start); | |
289 | if (vm) | |
290 | vfree((void *)shadow_start); | |
291 | } | |
292 | } | |
293 | ||
294 | return NOTIFY_OK; | |
295 | } | |
296 | ||
297 | static int __init kasan_memhotplug_init(void) | |
298 | { | |
1eeaa4fd | 299 | hotplug_memory_notifier(kasan_mem_notifier, DEFAULT_CALLBACK_PRI); |
bb359dbc AK |
300 | |
301 | return 0; | |
302 | } | |
303 | ||
304 | core_initcall(kasan_memhotplug_init); | |
305 | #endif | |
306 | ||
307 | #ifdef CONFIG_KASAN_VMALLOC | |
308 | ||
3252b1d8 KW |
309 | void __init __weak kasan_populate_early_vm_area_shadow(void *start, |
310 | unsigned long size) | |
311 | { | |
312 | } | |
313 | ||
bb359dbc AK |
314 | static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, |
315 | void *unused) | |
316 | { | |
317 | unsigned long page; | |
318 | pte_t pte; | |
319 | ||
320 | if (likely(!pte_none(*ptep))) | |
321 | return 0; | |
322 | ||
323 | page = __get_free_page(GFP_KERNEL); | |
324 | if (!page) | |
325 | return -ENOMEM; | |
326 | ||
327 | memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE); | |
328 | pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); | |
329 | ||
330 | spin_lock(&init_mm.page_table_lock); | |
331 | if (likely(pte_none(*ptep))) { | |
332 | set_pte_at(&init_mm, addr, ptep, pte); | |
333 | page = 0; | |
334 | } | |
335 | spin_unlock(&init_mm.page_table_lock); | |
336 | if (page) | |
337 | free_page(page); | |
338 | return 0; | |
339 | } | |
340 | ||
341 | int kasan_populate_vmalloc(unsigned long addr, unsigned long size) | |
342 | { | |
343 | unsigned long shadow_start, shadow_end; | |
344 | int ret; | |
345 | ||
55d77bae CL |
346 | if (!kasan_arch_is_ready()) |
347 | return 0; | |
348 | ||
bb359dbc AK |
349 | if (!is_vmalloc_or_module_addr((void *)addr)) |
350 | return 0; | |
351 | ||
352 | shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr); | |
bb359dbc | 353 | shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size); |
5b301409 PA |
354 | |
355 | /* | |
356 | * User Mode Linux maps enough shadow memory for all of virtual memory | |
357 | * at boot, so doesn't need to allocate more on vmalloc, just clear it. | |
358 | * | |
359 | * The remaining CONFIG_UML checks in this file exist for the same | |
360 | * reason. | |
361 | */ | |
362 | if (IS_ENABLED(CONFIG_UML)) { | |
363 | __memset((void *)shadow_start, KASAN_VMALLOC_INVALID, shadow_end - shadow_start); | |
364 | return 0; | |
365 | } | |
366 | ||
367 | shadow_start = PAGE_ALIGN_DOWN(shadow_start); | |
368 | shadow_end = PAGE_ALIGN(shadow_end); | |
bb359dbc AK |
369 | |
370 | ret = apply_to_page_range(&init_mm, shadow_start, | |
371 | shadow_end - shadow_start, | |
372 | kasan_populate_vmalloc_pte, NULL); | |
373 | if (ret) | |
374 | return ret; | |
375 | ||
376 | flush_cache_vmap(shadow_start, shadow_end); | |
377 | ||
378 | /* | |
379 | * We need to be careful about inter-cpu effects here. Consider: | |
380 | * | |
381 | * CPU#0 CPU#1 | |
382 | * WRITE_ONCE(p, vmalloc(100)); while (x = READ_ONCE(p)) ; | |
383 | * p[99] = 1; | |
384 | * | |
385 | * With compiler instrumentation, that ends up looking like this: | |
386 | * | |
387 | * CPU#0 CPU#1 | |
388 | * // vmalloc() allocates memory | |
389 | * // let a = area->addr | |
390 | * // we reach kasan_populate_vmalloc | |
f00748bf | 391 | * // and call kasan_unpoison: |
bb359dbc AK |
392 | * STORE shadow(a), unpoison_val |
393 | * ... | |
394 | * STORE shadow(a+99), unpoison_val x = LOAD p | |
395 | * // rest of vmalloc process <data dependency> | |
396 | * STORE p, a LOAD shadow(x+99) | |
397 | * | |
f0953a1b | 398 | * If there is no barrier between the end of unpoisoning the shadow |
bb359dbc AK |
399 | * and the store of the result to p, the stores could be committed |
400 | * in a different order by CPU#0, and CPU#1 could erroneously observe | |
401 | * poison in the shadow. | |
402 | * | |
403 | * We need some sort of barrier between the stores. | |
404 | * | |
405 | * In the vmalloc() case, this is provided by a smp_wmb() in | |
406 | * clear_vm_uninitialized_flag(). In the per-cpu allocator and in | |
407 | * get_vm_area() and friends, the caller gets shadow allocated but | |
408 | * doesn't have any pages mapped into the virtual address space that | |
409 | * has been reserved. Mapping those pages in will involve taking and | |
410 | * releasing a page-table lock, which will provide the barrier. | |
411 | */ | |
412 | ||
413 | return 0; | |
414 | } | |
415 | ||
bb359dbc AK |
416 | static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, |
417 | void *unused) | |
418 | { | |
419 | unsigned long page; | |
420 | ||
421 | page = (unsigned long)__va(pte_pfn(*ptep) << PAGE_SHIFT); | |
422 | ||
423 | spin_lock(&init_mm.page_table_lock); | |
424 | ||
425 | if (likely(!pte_none(*ptep))) { | |
426 | pte_clear(&init_mm, addr, ptep); | |
427 | free_page(page); | |
428 | } | |
429 | spin_unlock(&init_mm.page_table_lock); | |
430 | ||
431 | return 0; | |
432 | } | |
433 | ||
434 | /* | |
435 | * Release the backing for the vmalloc region [start, end), which | |
436 | * lies within the free region [free_region_start, free_region_end). | |
437 | * | |
438 | * This can be run lazily, long after the region was freed. It runs | |
439 | * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap | |
440 | * infrastructure. | |
441 | * | |
442 | * How does this work? | |
443 | * ------------------- | |
444 | * | |
f0953a1b | 445 | * We have a region that is page aligned, labeled as A. |
bb359dbc AK |
446 | * That might not map onto the shadow in a way that is page-aligned: |
447 | * | |
448 | * start end | |
449 | * v v | |
450 | * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc | |
451 | * -------- -------- -------- -------- -------- | |
452 | * | | | | | | |
453 | * | | | /-------/ | | |
454 | * \-------\|/------/ |/---------------/ | |
455 | * ||| || | |
456 | * |??AAAAAA|AAAAAAAA|AA??????| < shadow | |
457 | * (1) (2) (3) | |
458 | * | |
459 | * First we align the start upwards and the end downwards, so that the | |
460 | * shadow of the region aligns with shadow page boundaries. In the | |
461 | * example, this gives us the shadow page (2). This is the shadow entirely | |
462 | * covered by this allocation. | |
463 | * | |
464 | * Then we have the tricky bits. We want to know if we can free the | |
465 | * partially covered shadow pages - (1) and (3) in the example. For this, | |
466 | * we are given the start and end of the free region that contains this | |
467 | * allocation. Extending our previous example, we could have: | |
468 | * | |
469 | * free_region_start free_region_end | |
470 | * | start end | | |
471 | * v v v v | |
472 | * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc | |
473 | * -------- -------- -------- -------- -------- | |
474 | * | | | | | | |
475 | * | | | /-------/ | | |
476 | * \-------\|/------/ |/---------------/ | |
477 | * ||| || | |
478 | * |FFAAAAAA|AAAAAAAA|AAF?????| < shadow | |
479 | * (1) (2) (3) | |
480 | * | |
481 | * Once again, we align the start of the free region up, and the end of | |
482 | * the free region down so that the shadow is page aligned. So we can free | |
483 | * page (1) - we know no allocation currently uses anything in that page, | |
484 | * because all of it is in the vmalloc free region. But we cannot free | |
485 | * page (3), because we can't be sure that the rest of it is unused. | |
486 | * | |
487 | * We only consider pages that contain part of the original region for | |
488 | * freeing: we don't try to free other pages from the free region or we'd | |
489 | * end up trying to free huge chunks of virtual address space. | |
490 | * | |
491 | * Concurrency | |
492 | * ----------- | |
493 | * | |
494 | * How do we know that we're not freeing a page that is simultaneously | |
495 | * being used for a fresh allocation in kasan_populate_vmalloc(_pte)? | |
496 | * | |
497 | * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running | |
498 | * at the same time. While we run under free_vmap_area_lock, the population | |
499 | * code does not. | |
500 | * | |
501 | * free_vmap_area_lock instead operates to ensure that the larger range | |
502 | * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and | |
503 | * the per-cpu region-finding algorithm both run under free_vmap_area_lock, | |
504 | * no space identified as free will become used while we are running. This | |
505 | * means that so long as we are careful with alignment and only free shadow | |
506 | * pages entirely covered by the free region, we will not run in to any | |
507 | * trouble - any simultaneous allocations will be for disjoint regions. | |
508 | */ | |
509 | void kasan_release_vmalloc(unsigned long start, unsigned long end, | |
510 | unsigned long free_region_start, | |
511 | unsigned long free_region_end) | |
512 | { | |
513 | void *shadow_start, *shadow_end; | |
514 | unsigned long region_start, region_end; | |
515 | unsigned long size; | |
516 | ||
55d77bae CL |
517 | if (!kasan_arch_is_ready()) |
518 | return; | |
519 | ||
affc3f07 AK |
520 | region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE); |
521 | region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE); | |
bb359dbc | 522 | |
affc3f07 | 523 | free_region_start = ALIGN(free_region_start, KASAN_MEMORY_PER_SHADOW_PAGE); |
bb359dbc AK |
524 | |
525 | if (start != region_start && | |
526 | free_region_start < region_start) | |
affc3f07 | 527 | region_start -= KASAN_MEMORY_PER_SHADOW_PAGE; |
bb359dbc | 528 | |
affc3f07 | 529 | free_region_end = ALIGN_DOWN(free_region_end, KASAN_MEMORY_PER_SHADOW_PAGE); |
bb359dbc AK |
530 | |
531 | if (end != region_end && | |
532 | free_region_end > region_end) | |
affc3f07 | 533 | region_end += KASAN_MEMORY_PER_SHADOW_PAGE; |
bb359dbc AK |
534 | |
535 | shadow_start = kasan_mem_to_shadow((void *)region_start); | |
536 | shadow_end = kasan_mem_to_shadow((void *)region_end); | |
537 | ||
538 | if (shadow_end > shadow_start) { | |
539 | size = shadow_end - shadow_start; | |
5b301409 PA |
540 | if (IS_ENABLED(CONFIG_UML)) { |
541 | __memset(shadow_start, KASAN_SHADOW_INIT, shadow_end - shadow_start); | |
542 | return; | |
543 | } | |
bb359dbc AK |
544 | apply_to_existing_page_range(&init_mm, |
545 | (unsigned long)shadow_start, | |
546 | size, kasan_depopulate_vmalloc_pte, | |
547 | NULL); | |
548 | flush_tlb_kernel_range((unsigned long)shadow_start, | |
549 | (unsigned long)shadow_end); | |
550 | } | |
551 | } | |
552 | ||
23689e91 AK |
553 | void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, |
554 | kasan_vmalloc_flags_t flags) | |
5bd9bae2 | 555 | { |
23689e91 AK |
556 | /* |
557 | * Software KASAN modes unpoison both VM_ALLOC and non-VM_ALLOC | |
558 | * mappings, so the KASAN_VMALLOC_VM_ALLOC flag is ignored. | |
559 | * Software KASAN modes can't optimize zeroing memory by combining it | |
560 | * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored. | |
561 | */ | |
562 | ||
55d77bae CL |
563 | if (!kasan_arch_is_ready()) |
564 | return (void *)start; | |
565 | ||
5bd9bae2 | 566 | if (!is_vmalloc_or_module_addr(start)) |
1d96320f | 567 | return (void *)start; |
5bd9bae2 | 568 | |
f6e39794 AK |
569 | /* |
570 | * Don't tag executable memory with the tag-based mode. | |
571 | * The kernel doesn't tolerate having the PC register tagged. | |
572 | */ | |
573 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) && | |
574 | !(flags & KASAN_VMALLOC_PROT_NORMAL)) | |
575 | return (void *)start; | |
576 | ||
1d96320f | 577 | start = set_tag(start, kasan_random_tag()); |
5bd9bae2 | 578 | kasan_unpoison(start, size, false); |
1d96320f | 579 | return (void *)start; |
5bd9bae2 AK |
580 | } |
581 | ||
582 | /* | |
583 | * Poison the shadow for a vmalloc region. Called as part of the | |
584 | * freeing process at the time the region is freed. | |
585 | */ | |
579fb0ac | 586 | void __kasan_poison_vmalloc(const void *start, unsigned long size) |
5bd9bae2 | 587 | { |
55d77bae CL |
588 | if (!kasan_arch_is_ready()) |
589 | return; | |
590 | ||
5bd9bae2 AK |
591 | if (!is_vmalloc_or_module_addr(start)) |
592 | return; | |
593 | ||
594 | size = round_up(size, KASAN_GRANULE_SIZE); | |
595 | kasan_poison(start, size, KASAN_VMALLOC_INVALID, false); | |
596 | } | |
597 | ||
bb359dbc AK |
598 | #else /* CONFIG_KASAN_VMALLOC */ |
599 | ||
63840de2 | 600 | int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) |
bb359dbc AK |
601 | { |
602 | void *ret; | |
603 | size_t scaled_size; | |
604 | size_t shadow_size; | |
605 | unsigned long shadow_start; | |
606 | ||
607 | shadow_start = (unsigned long)kasan_mem_to_shadow(addr); | |
608 | scaled_size = (size + KASAN_GRANULE_SIZE - 1) >> | |
609 | KASAN_SHADOW_SCALE_SHIFT; | |
610 | shadow_size = round_up(scaled_size, PAGE_SIZE); | |
611 | ||
612 | if (WARN_ON(!PAGE_ALIGNED(shadow_start))) | |
613 | return -EINVAL; | |
614 | ||
5b301409 PA |
615 | if (IS_ENABLED(CONFIG_UML)) { |
616 | __memset((void *)shadow_start, KASAN_SHADOW_INIT, shadow_size); | |
617 | return 0; | |
618 | } | |
619 | ||
bb359dbc AK |
620 | ret = __vmalloc_node_range(shadow_size, 1, shadow_start, |
621 | shadow_start + shadow_size, | |
622 | GFP_KERNEL, | |
623 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, | |
624 | __builtin_return_address(0)); | |
625 | ||
626 | if (ret) { | |
60115fa5 | 627 | struct vm_struct *vm = find_vm_area(addr); |
bb359dbc | 628 | __memset(ret, KASAN_SHADOW_INIT, shadow_size); |
60115fa5 | 629 | vm->flags |= VM_KASAN; |
bb359dbc | 630 | kmemleak_ignore(ret); |
60115fa5 KW |
631 | |
632 | if (vm->flags & VM_DEFER_KMEMLEAK) | |
633 | kmemleak_vmalloc(vm, size, gfp_mask); | |
634 | ||
bb359dbc AK |
635 | return 0; |
636 | } | |
637 | ||
638 | return -ENOMEM; | |
639 | } | |
640 | ||
63840de2 | 641 | void kasan_free_module_shadow(const struct vm_struct *vm) |
bb359dbc | 642 | { |
5b301409 PA |
643 | if (IS_ENABLED(CONFIG_UML)) |
644 | return; | |
645 | ||
bb359dbc AK |
646 | if (vm->flags & VM_KASAN) |
647 | vfree(kasan_mem_to_shadow(vm->addr)); | |
648 | } | |
649 | ||
650 | #endif |