kasan: clean up comments in tests
[linux-2.6-block.git] / lib / test_kasan.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
3f15801c
AR
2/*
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
3f15801c
AR
6 */
7
19a33ca6 8#include <linux/bitops.h>
0386bf38 9#include <linux/delay.h>
19a33ca6 10#include <linux/kasan.h>
3f15801c 11#include <linux/kernel.h>
eae08dca 12#include <linux/mm.h>
19a33ca6
ME
13#include <linux/mman.h>
14#include <linux/module.h>
3f15801c
AR
15#include <linux/printk.h>
16#include <linux/slab.h>
17#include <linux/string.h>
eae08dca 18#include <linux/uaccess.h>
b92a953c 19#include <linux/io.h>
06513916 20#include <linux/vmalloc.h>
b92a953c
MR
21
22#include <asm/page.h>
3f15801c 23
83c4e7a0
PA
24#include <kunit/test.h>
25
f33a0149
WW
26#include "../mm/kasan/kasan.h"
27
1f600626 28#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
f33a0149 29
adb72ae1 30/*
0fd37925
AK
31 * Some tests use these global variables to store return values from function
32 * calls that could otherwise be eliminated by the compiler as dead code.
adb72ae1 33 */
adb72ae1 34void *kasan_ptr_result;
83c4e7a0
PA
35int kasan_int_result;
36
37static struct kunit_resource resource;
38static struct kunit_kasan_expectation fail_data;
39static bool multishot;
40
0fd37925
AK
41/*
42 * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
43 * first detected bug and panic the kernel if panic_on_warn is enabled.
44 */
83c4e7a0
PA
45static int kasan_test_init(struct kunit *test)
46{
83c4e7a0 47 multishot = kasan_save_enable_multi_shot();
83c4e7a0
PA
48 return 0;
49}
50
51static void kasan_test_exit(struct kunit *test)
52{
53 kasan_restore_multi_shot(multishot);
54}
55
56/**
0fd37925
AK
57 * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
58 * KASAN report; causes a test failure otherwise. This relies on a KUnit
59 * resource named "kasan_data". Do not use this name for KUnit resources
60 * outside of KASAN tests.
83c4e7a0 61 */
0fd37925 62#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
83c4e7a0
PA
63 fail_data.report_expected = true; \
64 fail_data.report_found = false; \
65 kunit_add_named_resource(test, \
66 NULL, \
67 NULL, \
68 &resource, \
69 "kasan_data", &fail_data); \
0fd37925 70 expression; \
83c4e7a0
PA
71 KUNIT_EXPECT_EQ(test, \
72 fail_data.report_expected, \
73 fail_data.report_found); \
74} while (0)
75
73228c7e 76static void kmalloc_oob_right(struct kunit *test)
3f15801c
AR
77{
78 char *ptr;
79 size_t size = 123;
80
3f15801c 81 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 82 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
f33a0149 83
73228c7e 84 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 'x');
3f15801c
AR
85 kfree(ptr);
86}
87
73228c7e 88static void kmalloc_oob_left(struct kunit *test)
3f15801c
AR
89{
90 char *ptr;
91 size_t size = 15;
92
3f15801c 93 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 94 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
3f15801c 95
73228c7e 96 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
3f15801c
AR
97 kfree(ptr);
98}
99
73228c7e 100static void kmalloc_node_oob_right(struct kunit *test)
3f15801c
AR
101{
102 char *ptr;
103 size_t size = 4096;
104
3f15801c 105 ptr = kmalloc_node(size, GFP_KERNEL, 0);
73228c7e 106 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
3f15801c 107
73228c7e 108 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
3f15801c
AR
109 kfree(ptr);
110}
111
73228c7e 112static void kmalloc_pagealloc_oob_right(struct kunit *test)
3f15801c
AR
113{
114 char *ptr;
115 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
116
73228c7e
PA
117 if (!IS_ENABLED(CONFIG_SLUB)) {
118 kunit_info(test, "CONFIG_SLUB is not enabled.");
119 return;
120 }
121
0fd37925
AK
122 /*
123 * Allocate a chunk that does not fit into a SLUB cache to trigger
e6e8379c
AP
124 * the page allocator fallback.
125 */
e6e8379c 126 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 127 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
f33a0149 128
73228c7e 129 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
e6e8379c
AP
130 kfree(ptr);
131}
47adccce 132
73228c7e 133static void kmalloc_pagealloc_uaf(struct kunit *test)
47adccce
DV
134{
135 char *ptr;
136 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
137
73228c7e
PA
138 if (!IS_ENABLED(CONFIG_SLUB)) {
139 kunit_info(test, "CONFIG_SLUB is not enabled.");
47adccce
DV
140 return;
141 }
142
73228c7e
PA
143 ptr = kmalloc(size, GFP_KERNEL);
144 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
145
47adccce 146 kfree(ptr);
73228c7e 147 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
47adccce
DV
148}
149
73228c7e 150static void kmalloc_pagealloc_invalid_free(struct kunit *test)
47adccce
DV
151{
152 char *ptr;
153 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
154
73228c7e
PA
155 if (!IS_ENABLED(CONFIG_SLUB)) {
156 kunit_info(test, "CONFIG_SLUB is not enabled.");
47adccce
DV
157 return;
158 }
159
73228c7e
PA
160 ptr = kmalloc(size, GFP_KERNEL);
161 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
162
163 KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
47adccce 164}
e6e8379c 165
73228c7e 166static void kmalloc_large_oob_right(struct kunit *test)
e6e8379c
AP
167{
168 char *ptr;
169 size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
0fd37925
AK
170
171 /*
172 * Allocate a chunk that is large enough, but still fits into a slab
e6e8379c
AP
173 * and does not trigger the page allocator fallback in SLUB.
174 */
3f15801c 175 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 176 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
3f15801c 177
73228c7e 178 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
3f15801c
AR
179 kfree(ptr);
180}
181
73228c7e 182static void kmalloc_oob_krealloc_more(struct kunit *test)
3f15801c
AR
183{
184 char *ptr1, *ptr2;
185 size_t size1 = 17;
186 size_t size2 = 19;
187
3f15801c 188 ptr1 = kmalloc(size1, GFP_KERNEL);
73228c7e 189 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
3f15801c 190
73228c7e
PA
191 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
192 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
f33a0149 193
73228c7e 194 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
3f15801c
AR
195 kfree(ptr2);
196}
197
73228c7e 198static void kmalloc_oob_krealloc_less(struct kunit *test)
3f15801c
AR
199{
200 char *ptr1, *ptr2;
201 size_t size1 = 17;
202 size_t size2 = 15;
203
3f15801c 204 ptr1 = kmalloc(size1, GFP_KERNEL);
73228c7e 205 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
f33a0149 206
73228c7e
PA
207 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
208 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
f33a0149 209
73228c7e 210 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
3f15801c
AR
211 kfree(ptr2);
212}
213
73228c7e 214static void kmalloc_oob_16(struct kunit *test)
3f15801c
AR
215{
216 struct {
217 u64 words[2];
218 } *ptr1, *ptr2;
219
58b999d7
AK
220 /* This test is specifically crafted for the generic mode. */
221 if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
222 kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
223 return;
224 }
225
3f15801c 226 ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
73228c7e
PA
227 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
228
3f15801c 229 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
73228c7e
PA
230 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
231
232 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
3f15801c
AR
233 kfree(ptr1);
234 kfree(ptr2);
235}
236
58b999d7
AK
237static void kmalloc_uaf_16(struct kunit *test)
238{
239 struct {
240 u64 words[2];
241 } *ptr1, *ptr2;
242
243 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
244 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
245
246 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
247 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
248 kfree(ptr2);
249
250 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
251 kfree(ptr1);
252}
253
73228c7e 254static void kmalloc_oob_memset_2(struct kunit *test)
f523e737
WL
255{
256 char *ptr;
257 size_t size = 8;
258
f523e737 259 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 260 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
f33a0149 261
73228c7e 262 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 7 + OOB_TAG_OFF, 0, 2));
f523e737
WL
263 kfree(ptr);
264}
265
73228c7e 266static void kmalloc_oob_memset_4(struct kunit *test)
f523e737
WL
267{
268 char *ptr;
269 size_t size = 8;
270
f523e737 271 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 272 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
f33a0149 273
73228c7e 274 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 5 + OOB_TAG_OFF, 0, 4));
f523e737
WL
275 kfree(ptr);
276}
277
278
73228c7e 279static void kmalloc_oob_memset_8(struct kunit *test)
f523e737
WL
280{
281 char *ptr;
282 size_t size = 8;
283
f523e737 284 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 285 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
f33a0149 286
73228c7e 287 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 8));
f523e737
WL
288 kfree(ptr);
289}
290
73228c7e 291static void kmalloc_oob_memset_16(struct kunit *test)
f523e737
WL
292{
293 char *ptr;
294 size_t size = 16;
295
f523e737 296 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 297 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
f33a0149 298
73228c7e 299 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 16));
f523e737
WL
300 kfree(ptr);
301}
302
73228c7e 303static void kmalloc_oob_in_memset(struct kunit *test)
3f15801c
AR
304{
305 char *ptr;
306 size_t size = 666;
307
3f15801c 308 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 309 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
f33a0149 310
73228c7e 311 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + 5 + OOB_TAG_OFF));
3f15801c
AR
312 kfree(ptr);
313}
314
73228c7e 315static void kmalloc_memmove_invalid_size(struct kunit *test)
98f3b56f
WW
316{
317 char *ptr;
318 size_t size = 64;
319 volatile size_t invalid_size = -2;
320
98f3b56f 321 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 322 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
98f3b56f
WW
323
324 memset((char *)ptr, 0, 64);
73228c7e
PA
325
326 KUNIT_EXPECT_KASAN_FAIL(test,
327 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
98f3b56f
WW
328 kfree(ptr);
329}
330
73228c7e 331static void kmalloc_uaf(struct kunit *test)
3f15801c
AR
332{
333 char *ptr;
334 size_t size = 10;
335
3f15801c 336 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 337 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
3f15801c
AR
338
339 kfree(ptr);
73228c7e 340 KUNIT_EXPECT_KASAN_FAIL(test, *(ptr + 8) = 'x');
3f15801c
AR
341}
342
73228c7e 343static void kmalloc_uaf_memset(struct kunit *test)
3f15801c
AR
344{
345 char *ptr;
346 size_t size = 33;
347
3f15801c 348 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 349 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
3f15801c
AR
350
351 kfree(ptr);
73228c7e 352 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
3f15801c
AR
353}
354
73228c7e 355static void kmalloc_uaf2(struct kunit *test)
3f15801c
AR
356{
357 char *ptr1, *ptr2;
358 size_t size = 43;
359
3f15801c 360 ptr1 = kmalloc(size, GFP_KERNEL);
73228c7e 361 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
3f15801c
AR
362
363 kfree(ptr1);
73228c7e 364
3f15801c 365 ptr2 = kmalloc(size, GFP_KERNEL);
73228c7e
PA
366 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
367
368 KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x');
369 KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
3f15801c 370
3f15801c
AR
371 kfree(ptr2);
372}
373
73228c7e 374static void kfree_via_page(struct kunit *test)
b92a953c
MR
375{
376 char *ptr;
377 size_t size = 8;
378 struct page *page;
379 unsigned long offset;
380
b92a953c 381 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 382 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
b92a953c
MR
383
384 page = virt_to_page(ptr);
385 offset = offset_in_page(ptr);
386 kfree(page_address(page) + offset);
387}
388
73228c7e 389static void kfree_via_phys(struct kunit *test)
b92a953c
MR
390{
391 char *ptr;
392 size_t size = 8;
393 phys_addr_t phys;
394
b92a953c 395 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 396 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
b92a953c
MR
397
398 phys = virt_to_phys(ptr);
399 kfree(phys_to_virt(phys));
400}
401
73228c7e 402static void kmem_cache_oob(struct kunit *test)
3f15801c
AR
403{
404 char *p;
405 size_t size = 200;
406 struct kmem_cache *cache = kmem_cache_create("test_cache",
407 size, 0,
408 0, NULL);
73228c7e 409 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
3f15801c
AR
410 p = kmem_cache_alloc(cache, GFP_KERNEL);
411 if (!p) {
73228c7e 412 kunit_err(test, "Allocation failed: %s\n", __func__);
3f15801c
AR
413 kmem_cache_destroy(cache);
414 return;
415 }
416
73228c7e 417 KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
3f15801c
AR
418 kmem_cache_free(cache, p);
419 kmem_cache_destroy(cache);
420}
421
73228c7e 422static void memcg_accounted_kmem_cache(struct kunit *test)
0386bf38
GT
423{
424 int i;
425 char *p;
426 size_t size = 200;
427 struct kmem_cache *cache;
428
429 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
73228c7e 430 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
0386bf38 431
0386bf38
GT
432 /*
433 * Several allocations with a delay to allow for lazy per memcg kmem
434 * cache creation.
435 */
436 for (i = 0; i < 5; i++) {
437 p = kmem_cache_alloc(cache, GFP_KERNEL);
dc2bf000 438 if (!p)
0386bf38 439 goto free_cache;
dc2bf000 440
0386bf38
GT
441 kmem_cache_free(cache, p);
442 msleep(100);
443 }
444
445free_cache:
446 kmem_cache_destroy(cache);
447}
448
3f15801c
AR
449static char global_array[10];
450
73228c7e 451static void kasan_global_oob(struct kunit *test)
3f15801c
AR
452{
453 volatile int i = 3;
454 char *p = &global_array[ARRAY_SIZE(global_array) + i];
455
58b999d7
AK
456 /* Only generic mode instruments globals. */
457 if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
458 kunit_info(test, "CONFIG_KASAN_GENERIC required");
459 return;
460 }
461
73228c7e 462 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
3f15801c
AR
463}
464
73228c7e 465static void ksize_unpoisons_memory(struct kunit *test)
96fe805f
AP
466{
467 char *ptr;
48c23239 468 size_t size = 123, real_size;
96fe805f 469
96fe805f 470 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 471 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
96fe805f 472 real_size = ksize(ptr);
0fd37925
AK
473
474 /* This access shouldn't trigger a KASAN report. */
96fe805f 475 ptr[size] = 'x';
0fd37925
AK
476
477 /* This one must. */
73228c7e 478 KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y');
0fd37925 479
96fe805f
AP
480 kfree(ptr);
481}
482
73228c7e 483static void kasan_stack_oob(struct kunit *test)
eae08dca 484{
73228c7e
PA
485 char stack_array[10];
486 volatile int i = OOB_TAG_OFF;
487 char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
eae08dca 488
73228c7e
PA
489 if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
490 kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
eae08dca
AR
491 return;
492 }
493
73228c7e 494 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
eae08dca
AR
495}
496
73228c7e 497static void kasan_alloca_oob_left(struct kunit *test)
00a14294
PL
498{
499 volatile int i = 10;
500 char alloca_array[i];
501 char *p = alloca_array - 1;
502
58b999d7
AK
503 /* Only generic mode instruments dynamic allocas. */
504 if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
505 kunit_info(test, "CONFIG_KASAN_GENERIC required");
506 return;
507 }
508
73228c7e
PA
509 if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
510 kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
511 return;
512 }
513
514 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
00a14294
PL
515}
516
73228c7e 517static void kasan_alloca_oob_right(struct kunit *test)
00a14294
PL
518{
519 volatile int i = 10;
520 char alloca_array[i];
521 char *p = alloca_array + i;
522
58b999d7
AK
523 /* Only generic mode instruments dynamic allocas. */
524 if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
525 kunit_info(test, "CONFIG_KASAN_GENERIC required");
526 return;
527 }
528
73228c7e
PA
529 if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
530 kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
531 return;
532 }
533
534 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
00a14294
PL
535}
536
73228c7e 537static void kmem_cache_double_free(struct kunit *test)
b1d57289
DV
538{
539 char *p;
540 size_t size = 200;
541 struct kmem_cache *cache;
542
543 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
73228c7e
PA
544 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
545
b1d57289
DV
546 p = kmem_cache_alloc(cache, GFP_KERNEL);
547 if (!p) {
73228c7e 548 kunit_err(test, "Allocation failed: %s\n", __func__);
b1d57289
DV
549 kmem_cache_destroy(cache);
550 return;
551 }
552
553 kmem_cache_free(cache, p);
73228c7e 554 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
b1d57289
DV
555 kmem_cache_destroy(cache);
556}
557
73228c7e 558static void kmem_cache_invalid_free(struct kunit *test)
b1d57289
DV
559{
560 char *p;
561 size_t size = 200;
562 struct kmem_cache *cache;
563
564 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
565 NULL);
73228c7e
PA
566 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
567
b1d57289
DV
568 p = kmem_cache_alloc(cache, GFP_KERNEL);
569 if (!p) {
73228c7e 570 kunit_err(test, "Allocation failed: %s\n", __func__);
b1d57289
DV
571 kmem_cache_destroy(cache);
572 return;
573 }
574
0fd37925 575 /* Trigger invalid free, the object doesn't get freed. */
73228c7e 576 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
91c93ed0
AK
577
578 /*
579 * Properly free the object to prevent the "Objects remaining in
580 * test_cache on __kmem_cache_shutdown" BUG failure.
581 */
582 kmem_cache_free(cache, p);
583
b1d57289
DV
584 kmem_cache_destroy(cache);
585}
586
73228c7e 587static void kasan_memchr(struct kunit *test)
0c96350a
AR
588{
589 char *ptr;
590 size_t size = 24;
591
0fd37925
AK
592 /*
593 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
594 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
595 */
73228c7e
PA
596 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
597 kunit_info(test,
598 "str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
0c96350a 599 return;
73228c7e
PA
600 }
601
58b999d7
AK
602 if (OOB_TAG_OFF)
603 size = round_up(size, OOB_TAG_OFF);
604
73228c7e
PA
605 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
606 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
607
608 KUNIT_EXPECT_KASAN_FAIL(test,
609 kasan_ptr_result = memchr(ptr, '1', size + 1));
0c96350a 610
0c96350a
AR
611 kfree(ptr);
612}
613
73228c7e 614static void kasan_memcmp(struct kunit *test)
0c96350a
AR
615{
616 char *ptr;
617 size_t size = 24;
618 int arr[9];
619
0fd37925
AK
620 /*
621 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
622 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
623 */
73228c7e
PA
624 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
625 kunit_info(test,
626 "str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
0c96350a 627 return;
73228c7e 628 }
0c96350a 629
58b999d7
AK
630 if (OOB_TAG_OFF)
631 size = round_up(size, OOB_TAG_OFF);
632
73228c7e
PA
633 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
634 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0c96350a 635 memset(arr, 0, sizeof(arr));
73228c7e
PA
636
637 KUNIT_EXPECT_KASAN_FAIL(test,
638 kasan_int_result = memcmp(ptr, arr, size+1));
0c96350a
AR
639 kfree(ptr);
640}
641
73228c7e 642static void kasan_strings(struct kunit *test)
0c96350a
AR
643{
644 char *ptr;
645 size_t size = 24;
646
0fd37925
AK
647 /*
648 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
649 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
650 */
73228c7e
PA
651 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
652 kunit_info(test,
653 "str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
0c96350a 654 return;
73228c7e
PA
655 }
656
657 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
658 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0c96350a
AR
659
660 kfree(ptr);
661
662 /*
663 * Try to cause only 1 invalid access (less spam in dmesg).
664 * For that we need ptr to point to zeroed byte.
665 * Skip metadata that could be stored in freed object so ptr
666 * will likely point to zeroed byte.
667 */
668 ptr += 16;
73228c7e 669 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
0c96350a 670
73228c7e 671 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
0c96350a 672
73228c7e 673 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
0c96350a 674
73228c7e 675 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
0c96350a 676
73228c7e 677 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
0c96350a 678
73228c7e 679 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
0c96350a
AR
680}
681
58b999d7
AK
682static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
683{
684 KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
685 KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
686 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
687 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
688 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
689 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
690 KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
691 KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
692}
693
694static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
695{
696 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
697 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
698 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
699 KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
700 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
701 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
702 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
703 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
704
705#if defined(clear_bit_unlock_is_negative_byte)
706 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
707 clear_bit_unlock_is_negative_byte(nr, addr));
708#endif
709}
710
711static void kasan_bitops_generic(struct kunit *test)
19a33ca6 712{
58b999d7
AK
713 long *bits;
714
715 /* This test is specifically crafted for the generic mode. */
716 if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
717 kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
718 return;
719 }
720
19a33ca6 721 /*
0fd37925 722 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
19a33ca6
ME
723 * this way we do not actually corrupt other memory.
724 */
58b999d7 725 bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
73228c7e 726 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
19a33ca6
ME
727
728 /*
729 * Below calls try to access bit within allocated memory; however, the
730 * below accesses are still out-of-bounds, since bitops are defined to
731 * operate on the whole long the bit is in.
732 */
58b999d7 733 kasan_bitops_modify(test, BITS_PER_LONG, bits);
19a33ca6
ME
734
735 /*
736 * Below calls try to access bit beyond allocated memory.
737 */
58b999d7 738 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
19a33ca6 739
58b999d7
AK
740 kfree(bits);
741}
19a33ca6 742
58b999d7
AK
743static void kasan_bitops_tags(struct kunit *test)
744{
745 long *bits;
19a33ca6 746
58b999d7
AK
747 /* This test is specifically crafted for the tag-based mode. */
748 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
749 kunit_info(test, "CONFIG_KASAN_SW_TAGS required\n");
750 return;
751 }
19a33ca6 752
58b999d7
AK
753 /* Allocation size will be rounded to up granule size, which is 16. */
754 bits = kzalloc(sizeof(*bits), GFP_KERNEL);
755 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
19a33ca6 756
58b999d7
AK
757 /* Do the accesses past the 16 allocated bytes. */
758 kasan_bitops_modify(test, BITS_PER_LONG, &bits[1]);
759 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, &bits[1]);
19a33ca6 760
19a33ca6
ME
761 kfree(bits);
762}
763
73228c7e 764static void kmalloc_double_kzfree(struct kunit *test)
bb104ed7
ME
765{
766 char *ptr;
767 size_t size = 16;
768
bb104ed7 769 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 770 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
bb104ed7 771
453431a5 772 kfree_sensitive(ptr);
73228c7e 773 KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
bb104ed7
ME
774}
775
73228c7e 776static void vmalloc_oob(struct kunit *test)
06513916
DA
777{
778 void *area;
779
73228c7e
PA
780 if (!IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
781 kunit_info(test, "CONFIG_KASAN_VMALLOC is not enabled.");
782 return;
783 }
06513916
DA
784
785 /*
786 * We have to be careful not to hit the guard page.
787 * The MMU will catch that and crash us.
788 */
789 area = vmalloc(3000);
73228c7e 790 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area);
06513916 791
73228c7e 792 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]);
06513916
DA
793 vfree(area);
794}
387d6e46 795
73228c7e
PA
796static struct kunit_case kasan_kunit_test_cases[] = {
797 KUNIT_CASE(kmalloc_oob_right),
798 KUNIT_CASE(kmalloc_oob_left),
799 KUNIT_CASE(kmalloc_node_oob_right),
800 KUNIT_CASE(kmalloc_pagealloc_oob_right),
801 KUNIT_CASE(kmalloc_pagealloc_uaf),
802 KUNIT_CASE(kmalloc_pagealloc_invalid_free),
803 KUNIT_CASE(kmalloc_large_oob_right),
804 KUNIT_CASE(kmalloc_oob_krealloc_more),
805 KUNIT_CASE(kmalloc_oob_krealloc_less),
806 KUNIT_CASE(kmalloc_oob_16),
58b999d7 807 KUNIT_CASE(kmalloc_uaf_16),
73228c7e
PA
808 KUNIT_CASE(kmalloc_oob_in_memset),
809 KUNIT_CASE(kmalloc_oob_memset_2),
810 KUNIT_CASE(kmalloc_oob_memset_4),
811 KUNIT_CASE(kmalloc_oob_memset_8),
812 KUNIT_CASE(kmalloc_oob_memset_16),
813 KUNIT_CASE(kmalloc_memmove_invalid_size),
814 KUNIT_CASE(kmalloc_uaf),
815 KUNIT_CASE(kmalloc_uaf_memset),
816 KUNIT_CASE(kmalloc_uaf2),
817 KUNIT_CASE(kfree_via_page),
818 KUNIT_CASE(kfree_via_phys),
819 KUNIT_CASE(kmem_cache_oob),
820 KUNIT_CASE(memcg_accounted_kmem_cache),
821 KUNIT_CASE(kasan_global_oob),
822 KUNIT_CASE(kasan_stack_oob),
823 KUNIT_CASE(kasan_alloca_oob_left),
824 KUNIT_CASE(kasan_alloca_oob_right),
825 KUNIT_CASE(ksize_unpoisons_memory),
826 KUNIT_CASE(kmem_cache_double_free),
827 KUNIT_CASE(kmem_cache_invalid_free),
828 KUNIT_CASE(kasan_memchr),
829 KUNIT_CASE(kasan_memcmp),
830 KUNIT_CASE(kasan_strings),
58b999d7
AK
831 KUNIT_CASE(kasan_bitops_generic),
832 KUNIT_CASE(kasan_bitops_tags),
73228c7e
PA
833 KUNIT_CASE(kmalloc_double_kzfree),
834 KUNIT_CASE(vmalloc_oob),
835 {}
836};
837
838static struct kunit_suite kasan_kunit_test_suite = {
839 .name = "kasan",
840 .init = kasan_test_init,
841 .test_cases = kasan_kunit_test_cases,
842 .exit = kasan_test_exit,
843};
844
845kunit_test_suite(kasan_kunit_test_suite);
3f15801c 846
3f15801c 847MODULE_LICENSE("GPL");