kasan: optimize large kmalloc poisoning
[linux-block.git] / mm / kasan / common.c
CommitLineData
e886bf9d 1// SPDX-License-Identifier: GPL-2.0
bffa986c 2/*
bb359dbc 3 * This file contains common KASAN code.
bffa986c
AK
4 *
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 *
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
bffa986c
AK
10 */
11
12#include <linux/export.h>
bffa986c
AK
13#include <linux/init.h>
14#include <linux/kasan.h>
15#include <linux/kernel.h>
bffa986c
AK
16#include <linux/linkage.h>
17#include <linux/memblock.h>
18#include <linux/memory.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/printk.h>
22#include <linux/sched.h>
23#include <linux/sched/task_stack.h>
24#include <linux/slab.h>
25#include <linux/stacktrace.h>
26#include <linux/string.h>
27#include <linux/types.h>
bffa986c
AK
28#include <linux/bug.h>
29
30#include "kasan.h"
31#include "../slab.h"
32
26e760c9 33depot_stack_handle_t kasan_save_stack(gfp_t flags)
bffa986c
AK
34{
35 unsigned long entries[KASAN_STACK_DEPTH];
880e049c 36 unsigned int nr_entries;
bffa986c 37
880e049c
TG
38 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
39 nr_entries = filter_irq_stacks(entries, nr_entries);
40 return stack_depot_save(entries, nr_entries, flags);
bffa986c
AK
41}
42
e4b7818b 43void kasan_set_track(struct kasan_track *track, gfp_t flags)
bffa986c
AK
44{
45 track->pid = current->pid;
26e760c9 46 track->stack = kasan_save_stack(flags);
bffa986c
AK
47}
48
d73b4936 49#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
bffa986c
AK
50void kasan_enable_current(void)
51{
52 current->kasan_depth++;
53}
54
55void kasan_disable_current(void)
56{
57 current->kasan_depth--;
58}
d73b4936 59#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
bffa986c 60
34303244 61void __kasan_unpoison_range(const void *address, size_t size)
cebd0eb2 62{
f00748bf 63 kasan_unpoison(address, size);
cebd0eb2
AK
64}
65
d56a9ef8 66#if CONFIG_KASAN_STACK
bffa986c
AK
67/* Unpoison the entire stack for a task. */
68void kasan_unpoison_task_stack(struct task_struct *task)
69{
77f57c98
AK
70 void *base = task_stack_page(task);
71
f00748bf 72 kasan_unpoison(base, THREAD_SIZE);
bffa986c
AK
73}
74
75/* Unpoison the stack for the current task beyond a watermark sp value. */
76asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
77{
78 /*
79 * Calculate the task stack base address. Avoid using 'current'
80 * because this function is called by early resume code which hasn't
81 * yet set up the percpu register (%gs).
82 */
83 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
84
f00748bf 85 kasan_unpoison(base, watermark - base);
bffa986c 86}
d56a9ef8 87#endif /* CONFIG_KASAN_STACK */
bffa986c 88
e86f8b09
AK
89/*
90 * Only allow cache merging when stack collection is disabled and no metadata
91 * is present.
92 */
93slab_flags_t __kasan_never_merge(void)
94{
95 if (kasan_stack_collection_enabled())
96 return SLAB_KASAN;
97 return 0;
98}
99
34303244 100void __kasan_alloc_pages(struct page *page, unsigned int order)
bffa986c 101{
2813b9c0
AK
102 u8 tag;
103 unsigned long i;
104
7f94ffbc
AK
105 if (unlikely(PageHighMem(page)))
106 return;
2813b9c0 107
f00748bf 108 tag = kasan_random_tag();
2813b9c0
AK
109 for (i = 0; i < (1 << order); i++)
110 page_kasan_tag_set(page + i, tag);
f00748bf 111 kasan_unpoison(page_address(page), PAGE_SIZE << order);
bffa986c
AK
112}
113
34303244 114void __kasan_free_pages(struct page *page, unsigned int order)
bffa986c
AK
115{
116 if (likely(!PageHighMem(page)))
f00748bf
AK
117 kasan_poison(page_address(page), PAGE_SIZE << order,
118 KASAN_FREE_PAGE);
bffa986c
AK
119}
120
121/*
122 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
123 * For larger allocations larger redzones are used.
124 */
125static inline unsigned int optimal_redzone(unsigned int object_size)
126{
127 return
128 object_size <= 64 - 16 ? 16 :
129 object_size <= 128 - 32 ? 32 :
130 object_size <= 512 - 64 ? 64 :
131 object_size <= 4096 - 128 ? 128 :
132 object_size <= (1 << 14) - 256 ? 256 :
133 object_size <= (1 << 15) - 512 ? 512 :
134 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
135}
136
34303244
AK
137void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
138 slab_flags_t *flags)
bffa986c 139{
97593cad
AK
140 unsigned int ok_size;
141 unsigned int optimal_size;
142
143 /*
144 * SLAB_KASAN is used to mark caches as ones that are sanitized by
145 * KASAN. Currently this flag is used in two places:
146 * 1. In slab_ksize() when calculating the size of the accessible
147 * memory within the object.
148 * 2. In slab_common.c to prevent merging of sanitized caches.
149 */
150 *flags |= SLAB_KASAN;
bffa986c 151
97593cad 152 if (!kasan_stack_collection_enabled())
8028caac 153 return;
8028caac 154
97593cad
AK
155 ok_size = *size;
156
157 /* Add alloc meta into redzone. */
bffa986c
AK
158 cache->kasan_info.alloc_meta_offset = *size;
159 *size += sizeof(struct kasan_alloc_meta);
160
97593cad
AK
161 /*
162 * If alloc meta doesn't fit, don't add it.
163 * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
164 * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
165 * larger sizes.
166 */
167 if (*size > KMALLOC_MAX_SIZE) {
168 cache->kasan_info.alloc_meta_offset = 0;
169 *size = ok_size;
170 /* Continue, since free meta might still fit. */
bffa986c 171 }
bffa986c 172
97593cad
AK
173 /* Only the generic mode uses free meta or flexible redzones. */
174 if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
175 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
176 return;
177 }
bffa986c
AK
178
179 /*
97593cad
AK
180 * Add free meta into redzone when it's not possible to store
181 * it in the object. This is the case when:
182 * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
183 * be touched after it was freed, or
184 * 2. Object has a constructor, which means it's expected to
185 * retain its content until the next allocation, or
186 * 3. Object is too small.
187 * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
bffa986c 188 */
97593cad
AK
189 if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
190 cache->object_size < sizeof(struct kasan_free_meta)) {
191 ok_size = *size;
192
193 cache->kasan_info.free_meta_offset = *size;
194 *size += sizeof(struct kasan_free_meta);
195
196 /* If free meta doesn't fit, don't add it. */
197 if (*size > KMALLOC_MAX_SIZE) {
198 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
199 *size = ok_size;
200 }
bffa986c
AK
201 }
202
97593cad
AK
203 /* Calculate size with optimal redzone. */
204 optimal_size = cache->object_size + optimal_redzone(cache->object_size);
205 /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
206 if (optimal_size > KMALLOC_MAX_SIZE)
207 optimal_size = KMALLOC_MAX_SIZE;
208 /* Use optimal size if the size with added metas is not large enough. */
209 if (*size < optimal_size)
210 *size = optimal_size;
bffa986c
AK
211}
212
92850134
AK
213void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
214{
215 cache->kasan_info.is_kmalloc = true;
216}
217
34303244 218size_t __kasan_metadata_size(struct kmem_cache *cache)
bffa986c 219{
8028caac
AK
220 if (!kasan_stack_collection_enabled())
221 return 0;
bffa986c
AK
222 return (cache->kasan_info.alloc_meta_offset ?
223 sizeof(struct kasan_alloc_meta) : 0) +
224 (cache->kasan_info.free_meta_offset ?
225 sizeof(struct kasan_free_meta) : 0);
226}
227
6476792f
AK
228struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
229 const void *object)
bffa986c 230{
97593cad
AK
231 if (!cache->kasan_info.alloc_meta_offset)
232 return NULL;
c0054c56 233 return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
bffa986c
AK
234}
235
97593cad 236#ifdef CONFIG_KASAN_GENERIC
6476792f
AK
237struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
238 const void *object)
bffa986c
AK
239{
240 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
97593cad
AK
241 if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
242 return NULL;
c0054c56 243 return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
bffa986c 244}
97593cad 245#endif
bffa986c 246
34303244 247void __kasan_poison_slab(struct page *page)
bffa986c 248{
2813b9c0
AK
249 unsigned long i;
250
d8c6546b 251 for (i = 0; i < compound_nr(page); i++)
2813b9c0 252 page_kasan_tag_reset(page + i);
f00748bf 253 kasan_poison(page_address(page), page_size(page),
cebd0eb2 254 KASAN_KMALLOC_REDZONE);
bffa986c
AK
255}
256
34303244 257void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
bffa986c 258{
f00748bf 259 kasan_unpoison(object, cache->object_size);
bffa986c
AK
260}
261
34303244 262void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
bffa986c 263{
f00748bf 264 kasan_poison(object, cache->object_size, KASAN_KMALLOC_REDZONE);
bffa986c
AK
265}
266
7f94ffbc 267/*
a3fe7cdf
AK
268 * This function assigns a tag to an object considering the following:
269 * 1. A cache might have a constructor, which might save a pointer to a slab
270 * object somewhere (e.g. in the object itself). We preassign a tag for
271 * each object in caches with constructors during slab creation and reuse
272 * the same tag each time a particular object is allocated.
273 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
274 * accessed after being freed. We preassign tags for objects in these
275 * caches as well.
276 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
277 * is stored as an array of indexes instead of a linked list. Assign tags
278 * based on objects indexes, so that objects that are next to each other
279 * get different tags.
7f94ffbc 280 */
e2db1a9a 281static u8 assign_tag(struct kmem_cache *cache, const void *object, bool init)
7f94ffbc 282{
1ef3133b
AK
283 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
284 return 0xff;
285
a3fe7cdf
AK
286 /*
287 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
288 * set, assign a tag when the object is being allocated (init == false).
289 */
7f94ffbc 290 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
f00748bf 291 return init ? KASAN_TAG_KERNEL : kasan_random_tag();
7f94ffbc 292
a3fe7cdf 293 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
7f94ffbc 294#ifdef CONFIG_SLAB
a3fe7cdf 295 /* For SLAB assign tags based on the object index in the freelist. */
7f94ffbc
AK
296 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
297#else
a3fe7cdf
AK
298 /*
299 * For SLUB assign a random tag during slab creation, otherwise reuse
300 * the already assigned tag.
301 */
f00748bf 302 return init ? kasan_random_tag() : get_tag(object);
7f94ffbc
AK
303#endif
304}
305
34303244 306void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
66afc7f1 307 const void *object)
bffa986c 308{
6476792f 309 struct kasan_alloc_meta *alloc_meta;
bffa986c 310
8028caac 311 if (kasan_stack_collection_enabled()) {
8028caac 312 alloc_meta = kasan_get_alloc_meta(cache, object);
97593cad
AK
313 if (alloc_meta)
314 __memset(alloc_meta, 0, sizeof(*alloc_meta));
8028caac 315 }
bffa986c 316
1ef3133b 317 /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
e2db1a9a 318 object = set_tag(object, assign_tag(cache, object, true));
7f94ffbc 319
bffa986c
AK
320 return (void *)object;
321}
322
34303244 323static bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
bffa986c
AK
324 unsigned long ip, bool quarantine)
325{
7f94ffbc
AK
326 u8 tag;
327 void *tagged_object;
bffa986c 328
7f94ffbc
AK
329 tag = get_tag(object);
330 tagged_object = object;
c0054c56 331 object = kasan_reset_tag(object);
7f94ffbc 332
2b830526
AP
333 if (is_kfence_address(object))
334 return false;
335
bffa986c
AK
336 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
337 object)) {
7f94ffbc 338 kasan_report_invalid_free(tagged_object, ip);
bffa986c
AK
339 return true;
340 }
341
342 /* RCU slabs could be legally used after free within the RCU period */
343 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
344 return false;
345
611806b4 346 if (!kasan_byte_accessible(tagged_object)) {
7f94ffbc 347 kasan_report_invalid_free(tagged_object, ip);
bffa986c
AK
348 return true;
349 }
350
f00748bf 351 kasan_poison(object, cache->object_size, KASAN_KMALLOC_FREE);
bffa986c 352
8028caac
AK
353 if (!kasan_stack_collection_enabled())
354 return false;
355
97593cad 356 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
bffa986c
AK
357 return false;
358
ae8f06b3
WW
359 kasan_set_free_info(cache, object, tag);
360
f00748bf 361 return kasan_quarantine_put(cache, object);
bffa986c
AK
362}
363
34303244 364bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
bffa986c 365{
34303244 366 return ____kasan_slab_free(cache, object, ip, true);
bffa986c
AK
367}
368
eeb3160c
AK
369void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
370{
371 struct page *page;
372
373 page = virt_to_head_page(ptr);
374
375 /*
376 * Even though this function is only called for kmem_cache_alloc and
377 * kmalloc backed mempool allocations, those allocations can still be
378 * !PageSlab() when the size provided to kmalloc is larger than
379 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
380 */
381 if (unlikely(!PageSlab(page))) {
382 if (ptr != page_address(page)) {
383 kasan_report_invalid_free(ptr, ip);
384 return;
385 }
f00748bf 386 kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE);
eeb3160c
AK
387 } else {
388 ____kasan_slab_free(page->slab_cache, ptr, ip, false);
389 }
390}
391
92850134
AK
392static void set_alloc_info(struct kmem_cache *cache, void *object,
393 gfp_t flags, bool is_kmalloc)
8bb0009b 394{
97593cad
AK
395 struct kasan_alloc_meta *alloc_meta;
396
92850134
AK
397 /* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */
398 if (cache->kasan_info.is_kmalloc && !is_kmalloc)
399 return;
400
97593cad
AK
401 alloc_meta = kasan_get_alloc_meta(cache, object);
402 if (alloc_meta)
403 kasan_set_track(&alloc_meta->alloc_track, flags);
8bb0009b
AK
404}
405
e2db1a9a
AK
406void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
407 void *object, gfp_t flags)
408{
409 u8 tag;
410 void *tagged_object;
411
412 if (gfpflags_allow_blocking(flags))
413 kasan_quarantine_reduce();
414
415 if (unlikely(object == NULL))
416 return NULL;
417
418 if (is_kfence_address(object))
419 return (void *)object;
420
421 /*
422 * Generate and assign random tag for tag-based modes.
423 * Tag is ignored in set_tag() for the generic mode.
424 */
425 tag = assign_tag(cache, object, false);
426 tagged_object = set_tag(object, tag);
427
428 /*
429 * Unpoison the whole object.
430 * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
431 */
432 kasan_unpoison(tagged_object, cache->object_size);
433
434 /* Save alloc info (if possible) for non-kmalloc() allocations. */
435 if (kasan_stack_collection_enabled())
436 set_alloc_info(cache, (void *)object, flags, false);
437
438 return tagged_object;
439}
440
34303244 441static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
e2db1a9a 442 size_t size, gfp_t flags)
bffa986c
AK
443{
444 unsigned long redzone_start;
445 unsigned long redzone_end;
446
447 if (gfpflags_allow_blocking(flags))
f00748bf 448 kasan_quarantine_reduce();
bffa986c
AK
449
450 if (unlikely(object == NULL))
451 return NULL;
452
2b830526
AP
453 if (is_kfence_address(kasan_reset_tag(object)))
454 return (void *)object;
455
e2db1a9a
AK
456 /*
457 * The object has already been unpoisoned by kasan_slab_alloc() for
458 * kmalloc() or by ksize() for krealloc().
459 */
460
461 /*
462 * The redzone has byte-level precision for the generic mode.
463 * Partially poison the last object granule to cover the unaligned
464 * part of the redzone.
465 */
466 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
467 kasan_poison_last_granule((void *)object, size);
468
469 /* Poison the aligned part of the redzone. */
bffa986c 470 redzone_start = round_up((unsigned long)(object + size),
1f600626 471 KASAN_GRANULE_SIZE);
e2db1a9a 472 redzone_end = (unsigned long)object + cache->object_size;
f00748bf
AK
473 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
474 KASAN_KMALLOC_REDZONE);
bffa986c 475
e2db1a9a
AK
476 /*
477 * Save alloc info (if possible) for kmalloc() allocations.
478 * This also rewrites the alloc info when called from kasan_krealloc().
479 */
97593cad 480 if (kasan_stack_collection_enabled())
e2db1a9a 481 set_alloc_info(cache, (void *)object, flags, true);
bffa986c 482
e2db1a9a
AK
483 /* Keep the tag that was set by kasan_slab_alloc(). */
484 return (void *)object;
e1db95be
AK
485}
486
34303244
AK
487void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
488 size_t size, gfp_t flags)
a3fe7cdf 489{
e2db1a9a 490 return ____kasan_kmalloc(cache, object, size, flags);
a3fe7cdf 491}
34303244 492EXPORT_SYMBOL(__kasan_kmalloc);
bffa986c 493
34303244 494void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
66afc7f1 495 gfp_t flags)
bffa986c 496{
bffa986c
AK
497 unsigned long redzone_start;
498 unsigned long redzone_end;
499
500 if (gfpflags_allow_blocking(flags))
f00748bf 501 kasan_quarantine_reduce();
bffa986c
AK
502
503 if (unlikely(ptr == NULL))
504 return NULL;
505
43a219cb
AK
506 /*
507 * The object has already been unpoisoned by kasan_alloc_pages() for
508 * alloc_pages() or by ksize() for krealloc().
509 */
510
511 /*
512 * The redzone has byte-level precision for the generic mode.
513 * Partially poison the last object granule to cover the unaligned
514 * part of the redzone.
515 */
516 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
517 kasan_poison_last_granule(ptr, size);
518
519 /* Poison the aligned part of the redzone. */
bffa986c 520 redzone_start = round_up((unsigned long)(ptr + size),
1f600626 521 KASAN_GRANULE_SIZE);
43a219cb 522 redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
f00748bf 523 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
cebd0eb2 524 KASAN_PAGE_REDZONE);
bffa986c
AK
525
526 return (void *)ptr;
527}
528
34303244 529void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
bffa986c
AK
530{
531 struct page *page;
532
533 if (unlikely(object == ZERO_SIZE_PTR))
534 return (void *)object;
535
536 page = virt_to_head_page(object);
537
538 if (unlikely(!PageSlab(page)))
34303244 539 return __kasan_kmalloc_large(object, size, flags);
bffa986c 540 else
e2db1a9a 541 return ____kasan_kmalloc(page->slab_cache, object, size, flags);
bffa986c
AK
542}
543
34303244 544void __kasan_kfree_large(void *ptr, unsigned long ip)
bffa986c 545{
2813b9c0 546 if (ptr != page_address(virt_to_head_page(ptr)))
bffa986c 547 kasan_report_invalid_free(ptr, ip);
3933c175 548 /* The object will be poisoned by kasan_free_pages(). */
bffa986c 549}
611806b4
AK
550
551bool __kasan_check_byte(const void *address, unsigned long ip)
552{
553 if (!kasan_byte_accessible(address)) {
554 kasan_report((unsigned long)address, 1, false, ip);
555 return false;
556 }
557 return true;
558}