Commit | Line | Data |
---|---|---|
e886bf9d | 1 | // SPDX-License-Identifier: GPL-2.0 |
bffa986c | 2 | /* |
bb359dbc | 3 | * This file contains common KASAN code. |
bffa986c AK |
4 | * |
5 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. | |
6 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> | |
7 | * | |
8 | * Some code borrowed from https://github.com/xairy/kasan-prototype by | |
9 | * Andrey Konovalov <andreyknvl@gmail.com> | |
bffa986c AK |
10 | */ |
11 | ||
12 | #include <linux/export.h> | |
bffa986c AK |
13 | #include <linux/init.h> |
14 | #include <linux/kasan.h> | |
15 | #include <linux/kernel.h> | |
bffa986c AK |
16 | #include <linux/linkage.h> |
17 | #include <linux/memblock.h> | |
18 | #include <linux/memory.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/module.h> | |
21 | #include <linux/printk.h> | |
22 | #include <linux/sched.h> | |
23 | #include <linux/sched/task_stack.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/stacktrace.h> | |
26 | #include <linux/string.h> | |
27 | #include <linux/types.h> | |
bffa986c AK |
28 | #include <linux/bug.h> |
29 | ||
30 | #include "kasan.h" | |
31 | #include "../slab.h" | |
32 | ||
0f282f15 AK |
33 | struct slab *kasan_addr_to_slab(const void *addr) |
34 | { | |
35 | if (virt_addr_valid(addr)) | |
36 | return virt_to_slab(addr); | |
37 | return NULL; | |
38 | } | |
39 | ||
7594b347 | 40 | depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc) |
bffa986c AK |
41 | { |
42 | unsigned long entries[KASAN_STACK_DEPTH]; | |
880e049c | 43 | unsigned int nr_entries; |
bffa986c | 44 | |
880e049c | 45 | nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); |
36aa1e67 | 46 | return __stack_depot_save(entries, nr_entries, flags, can_alloc); |
bffa986c AK |
47 | } |
48 | ||
e4b7818b | 49 | void kasan_set_track(struct kasan_track *track, gfp_t flags) |
bffa986c AK |
50 | { |
51 | track->pid = current->pid; | |
7594b347 | 52 | track->stack = kasan_save_stack(flags, true); |
bffa986c AK |
53 | } |
54 | ||
d73b4936 | 55 | #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) |
bffa986c AK |
56 | void kasan_enable_current(void) |
57 | { | |
58 | current->kasan_depth++; | |
59 | } | |
1f9f78b1 | 60 | EXPORT_SYMBOL(kasan_enable_current); |
bffa986c AK |
61 | |
62 | void kasan_disable_current(void) | |
63 | { | |
64 | current->kasan_depth--; | |
65 | } | |
1f9f78b1 OG |
66 | EXPORT_SYMBOL(kasan_disable_current); |
67 | ||
d73b4936 | 68 | #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ |
bffa986c | 69 | |
34303244 | 70 | void __kasan_unpoison_range(const void *address, size_t size) |
cebd0eb2 | 71 | { |
aa5c219c | 72 | kasan_unpoison(address, size, false); |
cebd0eb2 AK |
73 | } |
74 | ||
02c58773 | 75 | #ifdef CONFIG_KASAN_STACK |
bffa986c AK |
76 | /* Unpoison the entire stack for a task. */ |
77 | void kasan_unpoison_task_stack(struct task_struct *task) | |
78 | { | |
77f57c98 AK |
79 | void *base = task_stack_page(task); |
80 | ||
aa5c219c | 81 | kasan_unpoison(base, THREAD_SIZE, false); |
bffa986c AK |
82 | } |
83 | ||
84 | /* Unpoison the stack for the current task beyond a watermark sp value. */ | |
85 | asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) | |
86 | { | |
87 | /* | |
88 | * Calculate the task stack base address. Avoid using 'current' | |
89 | * because this function is called by early resume code which hasn't | |
90 | * yet set up the percpu register (%gs). | |
91 | */ | |
92 | void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); | |
93 | ||
aa5c219c | 94 | kasan_unpoison(base, watermark - base, false); |
bffa986c | 95 | } |
d56a9ef8 | 96 | #endif /* CONFIG_KASAN_STACK */ |
bffa986c | 97 | |
44383cef | 98 | bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init) |
bffa986c | 99 | { |
2813b9c0 AK |
100 | u8 tag; |
101 | unsigned long i; | |
102 | ||
7f94ffbc | 103 | if (unlikely(PageHighMem(page))) |
44383cef AK |
104 | return false; |
105 | ||
106 | if (!kasan_sample_page_alloc(order)) | |
107 | return false; | |
2813b9c0 | 108 | |
f00748bf | 109 | tag = kasan_random_tag(); |
ed0a6d1d CM |
110 | kasan_unpoison(set_tag(page_address(page), tag), |
111 | PAGE_SIZE << order, init); | |
2813b9c0 AK |
112 | for (i = 0; i < (1 << order); i++) |
113 | page_kasan_tag_set(page + i, tag); | |
44383cef AK |
114 | |
115 | return true; | |
bffa986c AK |
116 | } |
117 | ||
7a3b8353 | 118 | void __kasan_poison_pages(struct page *page, unsigned int order, bool init) |
bffa986c AK |
119 | { |
120 | if (likely(!PageHighMem(page))) | |
f00748bf | 121 | kasan_poison(page_address(page), PAGE_SIZE << order, |
06bc4cf6 | 122 | KASAN_PAGE_FREE, init); |
bffa986c AK |
123 | } |
124 | ||
6e48a966 | 125 | void __kasan_poison_slab(struct slab *slab) |
bffa986c | 126 | { |
6e48a966 | 127 | struct page *page = slab_page(slab); |
2813b9c0 AK |
128 | unsigned long i; |
129 | ||
d8c6546b | 130 | for (i = 0; i < compound_nr(page); i++) |
2813b9c0 | 131 | page_kasan_tag_reset(page + i); |
f00748bf | 132 | kasan_poison(page_address(page), page_size(page), |
06bc4cf6 | 133 | KASAN_SLAB_REDZONE, false); |
bffa986c AK |
134 | } |
135 | ||
34303244 | 136 | void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object) |
bffa986c | 137 | { |
aa5c219c | 138 | kasan_unpoison(object, cache->object_size, false); |
bffa986c AK |
139 | } |
140 | ||
34303244 | 141 | void __kasan_poison_object_data(struct kmem_cache *cache, void *object) |
bffa986c | 142 | { |
cde8a7eb | 143 | kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), |
06bc4cf6 | 144 | KASAN_SLAB_REDZONE, false); |
bffa986c AK |
145 | } |
146 | ||
7f94ffbc | 147 | /* |
a3fe7cdf AK |
148 | * This function assigns a tag to an object considering the following: |
149 | * 1. A cache might have a constructor, which might save a pointer to a slab | |
150 | * object somewhere (e.g. in the object itself). We preassign a tag for | |
151 | * each object in caches with constructors during slab creation and reuse | |
152 | * the same tag each time a particular object is allocated. | |
153 | * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be | |
154 | * accessed after being freed. We preassign tags for objects in these | |
155 | * caches as well. | |
156 | * 3. For SLAB allocator we can't preassign tags randomly since the freelist | |
157 | * is stored as an array of indexes instead of a linked list. Assign tags | |
158 | * based on objects indexes, so that objects that are next to each other | |
159 | * get different tags. | |
7f94ffbc | 160 | */ |
c80a0366 AK |
161 | static inline u8 assign_tag(struct kmem_cache *cache, |
162 | const void *object, bool init) | |
7f94ffbc | 163 | { |
1ef3133b AK |
164 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) |
165 | return 0xff; | |
166 | ||
a3fe7cdf AK |
167 | /* |
168 | * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU | |
169 | * set, assign a tag when the object is being allocated (init == false). | |
170 | */ | |
7f94ffbc | 171 | if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) |
f00748bf | 172 | return init ? KASAN_TAG_KERNEL : kasan_random_tag(); |
7f94ffbc | 173 | |
a3fe7cdf | 174 | /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */ |
7f94ffbc | 175 | #ifdef CONFIG_SLAB |
a3fe7cdf | 176 | /* For SLAB assign tags based on the object index in the freelist. */ |
40f3bf0c | 177 | return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object); |
7f94ffbc | 178 | #else |
a3fe7cdf AK |
179 | /* |
180 | * For SLUB assign a random tag during slab creation, otherwise reuse | |
181 | * the already assigned tag. | |
182 | */ | |
f00748bf | 183 | return init ? kasan_random_tag() : get_tag(object); |
7f94ffbc AK |
184 | #endif |
185 | } | |
186 | ||
34303244 | 187 | void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, |
66afc7f1 | 188 | const void *object) |
bffa986c | 189 | { |
836daba0 | 190 | /* Initialize per-object metadata if it is present. */ |
284f8590 | 191 | if (kasan_requires_meta()) |
836daba0 | 192 | kasan_init_object_meta(cache, object); |
bffa986c | 193 | |
1ef3133b | 194 | /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */ |
e2db1a9a | 195 | object = set_tag(object, assign_tag(cache, object, true)); |
7f94ffbc | 196 | |
bffa986c AK |
197 | return (void *)object; |
198 | } | |
199 | ||
d57a964e AK |
200 | static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object, |
201 | unsigned long ip, bool quarantine, bool init) | |
bffa986c | 202 | { |
7f94ffbc | 203 | void *tagged_object; |
bffa986c | 204 | |
af3751f3 DA |
205 | if (!kasan_arch_is_ready()) |
206 | return false; | |
207 | ||
7f94ffbc | 208 | tagged_object = object; |
c0054c56 | 209 | object = kasan_reset_tag(object); |
7f94ffbc | 210 | |
2b830526 AP |
211 | if (is_kfence_address(object)) |
212 | return false; | |
213 | ||
40f3bf0c | 214 | if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != |
bffa986c | 215 | object)) { |
3de0de75 | 216 | kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE); |
bffa986c AK |
217 | return true; |
218 | } | |
219 | ||
220 | /* RCU slabs could be legally used after free within the RCU period */ | |
221 | if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) | |
222 | return false; | |
223 | ||
611806b4 | 224 | if (!kasan_byte_accessible(tagged_object)) { |
3de0de75 | 225 | kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE); |
bffa986c AK |
226 | return true; |
227 | } | |
228 | ||
cde8a7eb | 229 | kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), |
06bc4cf6 | 230 | KASAN_SLAB_FREE, init); |
bffa986c | 231 | |
97593cad | 232 | if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine)) |
bffa986c AK |
233 | return false; |
234 | ||
df54b383 | 235 | if (kasan_stack_collection_enabled()) |
6b074349 | 236 | kasan_save_free_info(cache, tagged_object); |
ae8f06b3 | 237 | |
f00748bf | 238 | return kasan_quarantine_put(cache, object); |
bffa986c AK |
239 | } |
240 | ||
d57a964e AK |
241 | bool __kasan_slab_free(struct kmem_cache *cache, void *object, |
242 | unsigned long ip, bool init) | |
bffa986c | 243 | { |
d57a964e | 244 | return ____kasan_slab_free(cache, object, ip, true, init); |
bffa986c AK |
245 | } |
246 | ||
c80a0366 | 247 | static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip) |
200072ce | 248 | { |
55d77bae CL |
249 | if (!kasan_arch_is_ready()) |
250 | return false; | |
251 | ||
200072ce | 252 | if (ptr != page_address(virt_to_head_page(ptr))) { |
3de0de75 | 253 | kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE); |
200072ce AK |
254 | return true; |
255 | } | |
256 | ||
257 | if (!kasan_byte_accessible(ptr)) { | |
3de0de75 | 258 | kasan_report_invalid_free(ptr, ip, KASAN_REPORT_DOUBLE_FREE); |
200072ce AK |
259 | return true; |
260 | } | |
261 | ||
262 | /* | |
7c13c163 | 263 | * The object will be poisoned by kasan_poison_pages() or |
200072ce AK |
264 | * kasan_slab_free_mempool(). |
265 | */ | |
266 | ||
267 | return false; | |
268 | } | |
269 | ||
270 | void __kasan_kfree_large(void *ptr, unsigned long ip) | |
271 | { | |
272 | ____kasan_kfree_large(ptr, ip); | |
273 | } | |
274 | ||
eeb3160c AK |
275 | void __kasan_slab_free_mempool(void *ptr, unsigned long ip) |
276 | { | |
6e48a966 | 277 | struct folio *folio; |
eeb3160c | 278 | |
6e48a966 | 279 | folio = virt_to_folio(ptr); |
eeb3160c AK |
280 | |
281 | /* | |
282 | * Even though this function is only called for kmem_cache_alloc and | |
283 | * kmalloc backed mempool allocations, those allocations can still be | |
284 | * !PageSlab() when the size provided to kmalloc is larger than | |
285 | * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. | |
286 | */ | |
6e48a966 | 287 | if (unlikely(!folio_test_slab(folio))) { |
200072ce | 288 | if (____kasan_kfree_large(ptr, ip)) |
eeb3160c | 289 | return; |
06bc4cf6 | 290 | kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false); |
eeb3160c | 291 | } else { |
6e48a966 MWO |
292 | struct slab *slab = folio_slab(folio); |
293 | ||
294 | ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false); | |
eeb3160c AK |
295 | } |
296 | } | |
297 | ||
e2db1a9a | 298 | void * __must_check __kasan_slab_alloc(struct kmem_cache *cache, |
da844b78 | 299 | void *object, gfp_t flags, bool init) |
e2db1a9a AK |
300 | { |
301 | u8 tag; | |
302 | void *tagged_object; | |
303 | ||
304 | if (gfpflags_allow_blocking(flags)) | |
305 | kasan_quarantine_reduce(); | |
306 | ||
307 | if (unlikely(object == NULL)) | |
308 | return NULL; | |
309 | ||
310 | if (is_kfence_address(object)) | |
311 | return (void *)object; | |
312 | ||
313 | /* | |
314 | * Generate and assign random tag for tag-based modes. | |
315 | * Tag is ignored in set_tag() for the generic mode. | |
316 | */ | |
317 | tag = assign_tag(cache, object, false); | |
318 | tagged_object = set_tag(object, tag); | |
319 | ||
320 | /* | |
321 | * Unpoison the whole object. | |
322 | * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning. | |
323 | */ | |
da844b78 | 324 | kasan_unpoison(tagged_object, cache->object_size, init); |
e2db1a9a AK |
325 | |
326 | /* Save alloc info (if possible) for non-kmalloc() allocations. */ | |
bbc61844 | 327 | if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache)) |
6b074349 | 328 | kasan_save_alloc_info(cache, tagged_object, flags); |
e2db1a9a AK |
329 | |
330 | return tagged_object; | |
331 | } | |
332 | ||
c80a0366 AK |
333 | static inline void *____kasan_kmalloc(struct kmem_cache *cache, |
334 | const void *object, size_t size, gfp_t flags) | |
bffa986c AK |
335 | { |
336 | unsigned long redzone_start; | |
337 | unsigned long redzone_end; | |
338 | ||
339 | if (gfpflags_allow_blocking(flags)) | |
f00748bf | 340 | kasan_quarantine_reduce(); |
bffa986c AK |
341 | |
342 | if (unlikely(object == NULL)) | |
343 | return NULL; | |
344 | ||
2b830526 AP |
345 | if (is_kfence_address(kasan_reset_tag(object))) |
346 | return (void *)object; | |
347 | ||
e2db1a9a AK |
348 | /* |
349 | * The object has already been unpoisoned by kasan_slab_alloc() for | |
d12d9ad8 | 350 | * kmalloc() or by kasan_krealloc() for krealloc(). |
e2db1a9a AK |
351 | */ |
352 | ||
353 | /* | |
354 | * The redzone has byte-level precision for the generic mode. | |
355 | * Partially poison the last object granule to cover the unaligned | |
356 | * part of the redzone. | |
357 | */ | |
358 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) | |
359 | kasan_poison_last_granule((void *)object, size); | |
360 | ||
361 | /* Poison the aligned part of the redzone. */ | |
bffa986c | 362 | redzone_start = round_up((unsigned long)(object + size), |
1f600626 | 363 | KASAN_GRANULE_SIZE); |
cde8a7eb AK |
364 | redzone_end = round_up((unsigned long)(object + cache->object_size), |
365 | KASAN_GRANULE_SIZE); | |
f00748bf | 366 | kasan_poison((void *)redzone_start, redzone_end - redzone_start, |
06bc4cf6 | 367 | KASAN_SLAB_REDZONE, false); |
bffa986c | 368 | |
e2db1a9a AK |
369 | /* |
370 | * Save alloc info (if possible) for kmalloc() allocations. | |
371 | * This also rewrites the alloc info when called from kasan_krealloc(). | |
372 | */ | |
bbc61844 | 373 | if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache)) |
ccf643e6 | 374 | kasan_save_alloc_info(cache, (void *)object, flags); |
bffa986c | 375 | |
e2db1a9a AK |
376 | /* Keep the tag that was set by kasan_slab_alloc(). */ |
377 | return (void *)object; | |
e1db95be AK |
378 | } |
379 | ||
34303244 AK |
380 | void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object, |
381 | size_t size, gfp_t flags) | |
a3fe7cdf | 382 | { |
e2db1a9a | 383 | return ____kasan_kmalloc(cache, object, size, flags); |
a3fe7cdf | 384 | } |
34303244 | 385 | EXPORT_SYMBOL(__kasan_kmalloc); |
bffa986c | 386 | |
34303244 | 387 | void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, |
66afc7f1 | 388 | gfp_t flags) |
bffa986c | 389 | { |
bffa986c AK |
390 | unsigned long redzone_start; |
391 | unsigned long redzone_end; | |
392 | ||
393 | if (gfpflags_allow_blocking(flags)) | |
f00748bf | 394 | kasan_quarantine_reduce(); |
bffa986c AK |
395 | |
396 | if (unlikely(ptr == NULL)) | |
397 | return NULL; | |
398 | ||
43a219cb | 399 | /* |
b42090ae | 400 | * The object has already been unpoisoned by kasan_unpoison_pages() for |
d12d9ad8 | 401 | * alloc_pages() or by kasan_krealloc() for krealloc(). |
43a219cb AK |
402 | */ |
403 | ||
404 | /* | |
405 | * The redzone has byte-level precision for the generic mode. | |
406 | * Partially poison the last object granule to cover the unaligned | |
407 | * part of the redzone. | |
408 | */ | |
409 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) | |
410 | kasan_poison_last_granule(ptr, size); | |
411 | ||
412 | /* Poison the aligned part of the redzone. */ | |
bffa986c | 413 | redzone_start = round_up((unsigned long)(ptr + size), |
1f600626 | 414 | KASAN_GRANULE_SIZE); |
43a219cb | 415 | redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr)); |
f00748bf | 416 | kasan_poison((void *)redzone_start, redzone_end - redzone_start, |
aa5c219c | 417 | KASAN_PAGE_REDZONE, false); |
bffa986c AK |
418 | |
419 | return (void *)ptr; | |
420 | } | |
421 | ||
34303244 | 422 | void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags) |
bffa986c | 423 | { |
6e48a966 | 424 | struct slab *slab; |
bffa986c AK |
425 | |
426 | if (unlikely(object == ZERO_SIZE_PTR)) | |
427 | return (void *)object; | |
428 | ||
d12d9ad8 AK |
429 | /* |
430 | * Unpoison the object's data. | |
431 | * Part of it might already have been unpoisoned, but it's unknown | |
432 | * how big that part is. | |
433 | */ | |
aa5c219c | 434 | kasan_unpoison(object, size, false); |
d12d9ad8 | 435 | |
6e48a966 | 436 | slab = virt_to_slab(object); |
bffa986c | 437 | |
d12d9ad8 | 438 | /* Piggy-back on kmalloc() instrumentation to poison the redzone. */ |
6e48a966 | 439 | if (unlikely(!slab)) |
34303244 | 440 | return __kasan_kmalloc_large(object, size, flags); |
bffa986c | 441 | else |
6e48a966 | 442 | return ____kasan_kmalloc(slab->slab_cache, object, size, flags); |
bffa986c AK |
443 | } |
444 | ||
611806b4 AK |
445 | bool __kasan_check_byte(const void *address, unsigned long ip) |
446 | { | |
447 | if (!kasan_byte_accessible(address)) { | |
448 | kasan_report((unsigned long)address, 1, false, ip); | |
449 | return false; | |
450 | } | |
451 | return true; | |
452 | } |