kasan: test: avoid corrupting memory via memset
[linux-2.6-block.git] / lib / test_kasan.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
3f15801c
AR
2/*
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
3f15801c
AR
6 */
7
19a33ca6 8#include <linux/bitops.h>
0386bf38 9#include <linux/delay.h>
19a33ca6 10#include <linux/kasan.h>
3f15801c 11#include <linux/kernel.h>
eae08dca 12#include <linux/mm.h>
19a33ca6
ME
13#include <linux/mman.h>
14#include <linux/module.h>
3f15801c 15#include <linux/printk.h>
573a4809 16#include <linux/random.h>
3f15801c
AR
17#include <linux/slab.h>
18#include <linux/string.h>
eae08dca 19#include <linux/uaccess.h>
b92a953c 20#include <linux/io.h>
06513916 21#include <linux/vmalloc.h>
b92a953c
MR
22
23#include <asm/page.h>
3f15801c 24
83c4e7a0
PA
25#include <kunit/test.h>
26
f33a0149
WW
27#include "../mm/kasan/kasan.h"
28
1f600626 29#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
f33a0149 30
adb72ae1 31/*
0fd37925
AK
32 * Some tests use these global variables to store return values from function
33 * calls that could otherwise be eliminated by the compiler as dead code.
adb72ae1 34 */
adb72ae1 35void *kasan_ptr_result;
83c4e7a0
PA
36int kasan_int_result;
37
38static struct kunit_resource resource;
39static struct kunit_kasan_expectation fail_data;
40static bool multishot;
41
0fd37925
AK
42/*
43 * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
f05842cf
AK
44 * first detected bug and panic the kernel if panic_on_warn is enabled. For
45 * hardware tag-based KASAN also allow tag checking to be reenabled for each
46 * test, see the comment for KUNIT_EXPECT_KASAN_FAIL().
0fd37925 47 */
83c4e7a0
PA
48static int kasan_test_init(struct kunit *test)
49{
d82dc3a4
AK
50 if (!kasan_enabled()) {
51 kunit_err(test, "can't run KASAN tests with KASAN disabled");
52 return -1;
53 }
54
83c4e7a0 55 multishot = kasan_save_enable_multi_shot();
f05842cf 56 kasan_set_tagging_report_once(false);
99734b53 57 fail_data.report_found = false;
99734b53
AK
58 kunit_add_named_resource(test, NULL, NULL, &resource,
59 "kasan_data", &fail_data);
83c4e7a0
PA
60 return 0;
61}
62
63static void kasan_test_exit(struct kunit *test)
64{
f05842cf 65 kasan_set_tagging_report_once(true);
83c4e7a0 66 kasan_restore_multi_shot(multishot);
99734b53 67 KUNIT_EXPECT_FALSE(test, fail_data.report_found);
83c4e7a0
PA
68}
69
70/**
0fd37925
AK
71 * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
72 * KASAN report; causes a test failure otherwise. This relies on a KUnit
73 * resource named "kasan_data". Do not use this name for KUnit resources
74 * outside of KASAN tests.
f05842cf 75 *
e80a76aa
AK
76 * For hardware tag-based KASAN in sync mode, when a tag fault happens, tag
77 * checking is auto-disabled. When this happens, this test handler reenables
78 * tag checking. As tag checking can be only disabled or enabled per CPU,
79 * this handler disables migration (preemption).
2e4bde6a
AK
80 *
81 * Since the compiler doesn't see that the expression can change the fail_data
82 * fields, it can reorder or optimize away the accesses to those fields.
83 * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
84 * expression to prevent that.
99734b53
AK
85 *
86 * In between KUNIT_EXPECT_KASAN_FAIL checks, fail_data.report_found is kept as
87 * false. This allows detecting KASAN reports that happen outside of the checks
88 * by asserting !fail_data.report_found at the start of KUNIT_EXPECT_KASAN_FAIL
89 * and in kasan_test_exit.
83c4e7a0 90 */
99734b53
AK
91#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
92 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
93 !kasan_async_mode_enabled()) \
94 migrate_disable(); \
95 KUNIT_EXPECT_FALSE(test, READ_ONCE(fail_data.report_found)); \
99734b53
AK
96 barrier(); \
97 expression; \
98 barrier(); \
3ff16d30
DG
99 if (!READ_ONCE(fail_data.report_found)) { \
100 KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
101 "expected in \"" #expression \
102 "\", but none occurred"); \
103 } \
99734b53
AK
104 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
105 if (READ_ONCE(fail_data.report_found)) \
106 kasan_enable_tagging_sync(); \
107 migrate_enable(); \
108 } \
109 WRITE_ONCE(fail_data.report_found, false); \
83c4e7a0
PA
110} while (0)
111
da17e377 112#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
40eb5cf4
ME
113 if (!IS_ENABLED(config)) \
114 kunit_skip((test), "Test requires " #config "=y"); \
da17e377
AK
115} while (0)
116
117#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
40eb5cf4
ME
118 if (IS_ENABLED(config)) \
119 kunit_skip((test), "Test requires " #config "=n"); \
da17e377
AK
120} while (0)
121
73228c7e 122static void kmalloc_oob_right(struct kunit *test)
3f15801c
AR
123{
124 char *ptr;
ab512805 125 size_t size = 128 - KASAN_GRANULE_SIZE - 5;
3f15801c 126
3f15801c 127 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 128 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
f33a0149 129
ab512805
AK
130 /*
131 * An unaligned access past the requested kmalloc size.
132 * Only generic KASAN can precisely detect these.
133 */
134 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
135 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
136
137 /*
138 * An aligned access into the first out-of-bounds granule that falls
139 * within the aligned kmalloc object.
140 */
141 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
142
143 /* Out-of-bounds access past the aligned kmalloc object. */
144 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
145 ptr[size + KASAN_GRANULE_SIZE + 5]);
146
3f15801c
AR
147 kfree(ptr);
148}
149
73228c7e 150static void kmalloc_oob_left(struct kunit *test)
3f15801c
AR
151{
152 char *ptr;
153 size_t size = 15;
154
3f15801c 155 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 156 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
3f15801c 157
73228c7e 158 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
3f15801c
AR
159 kfree(ptr);
160}
161
73228c7e 162static void kmalloc_node_oob_right(struct kunit *test)
3f15801c
AR
163{
164 char *ptr;
165 size_t size = 4096;
166
3f15801c 167 ptr = kmalloc_node(size, GFP_KERNEL, 0);
73228c7e 168 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
3f15801c 169
8fbad19b 170 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
3f15801c
AR
171 kfree(ptr);
172}
173
858bdeb0
AK
174/*
175 * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
176 * fit into a slab cache and therefore is allocated via the page allocator
177 * fallback. Since this kind of fallback is only implemented for SLUB, these
178 * tests are limited to that allocator.
179 */
73228c7e 180static void kmalloc_pagealloc_oob_right(struct kunit *test)
3f15801c
AR
181{
182 char *ptr;
183 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
184
da17e377 185 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
73228c7e 186
e6e8379c 187 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 188 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
f33a0149 189
73228c7e 190 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
858bdeb0 191
e6e8379c
AP
192 kfree(ptr);
193}
47adccce 194
73228c7e 195static void kmalloc_pagealloc_uaf(struct kunit *test)
47adccce
DV
196{
197 char *ptr;
198 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
199
da17e377 200 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
47adccce 201
73228c7e
PA
202 ptr = kmalloc(size, GFP_KERNEL);
203 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
47adccce 204 kfree(ptr);
858bdeb0 205
8fbad19b 206 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
47adccce
DV
207}
208
73228c7e 209static void kmalloc_pagealloc_invalid_free(struct kunit *test)
47adccce
DV
210{
211 char *ptr;
212 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
213
da17e377 214 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
47adccce 215
73228c7e
PA
216 ptr = kmalloc(size, GFP_KERNEL);
217 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
218
219 KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
47adccce 220}
e6e8379c 221
858bdeb0
AK
222static void pagealloc_oob_right(struct kunit *test)
223{
224 char *ptr;
225 struct page *pages;
226 size_t order = 4;
227 size_t size = (1UL << (PAGE_SHIFT + order));
228
229 /*
230 * With generic KASAN page allocations have no redzones, thus
231 * out-of-bounds detection is not guaranteed.
232 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
233 */
234 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
235
236 pages = alloc_pages(GFP_KERNEL, order);
237 ptr = page_address(pages);
238 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
239
8fbad19b 240 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
858bdeb0
AK
241 free_pages((unsigned long)ptr, order);
242}
243
244static void pagealloc_uaf(struct kunit *test)
245{
246 char *ptr;
247 struct page *pages;
248 size_t order = 4;
249
250 pages = alloc_pages(GFP_KERNEL, order);
251 ptr = page_address(pages);
252 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
253 free_pages((unsigned long)ptr, order);
254
8fbad19b 255 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
858bdeb0
AK
256}
257
73228c7e 258static void kmalloc_large_oob_right(struct kunit *test)
e6e8379c
AP
259{
260 char *ptr;
261 size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
0fd37925
AK
262
263 /*
264 * Allocate a chunk that is large enough, but still fits into a slab
e6e8379c
AP
265 * and does not trigger the page allocator fallback in SLUB.
266 */
3f15801c 267 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 268 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
3f15801c 269
73228c7e 270 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
3f15801c
AR
271 kfree(ptr);
272}
273
b87c28b9
AK
274static void krealloc_more_oob_helper(struct kunit *test,
275 size_t size1, size_t size2)
3f15801c
AR
276{
277 char *ptr1, *ptr2;
b87c28b9
AK
278 size_t middle;
279
280 KUNIT_ASSERT_LT(test, size1, size2);
281 middle = size1 + (size2 - size1) / 2;
3f15801c 282
3f15801c 283 ptr1 = kmalloc(size1, GFP_KERNEL);
73228c7e 284 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
3f15801c 285
73228c7e
PA
286 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
287 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
f33a0149 288
b87c28b9
AK
289 /* All offsets up to size2 must be accessible. */
290 ptr2[size1 - 1] = 'x';
291 ptr2[size1] = 'x';
292 ptr2[middle] = 'x';
293 ptr2[size2 - 1] = 'x';
294
295 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
296 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
297 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
298
299 /* For all modes first aligned offset after size2 must be inaccessible. */
300 KUNIT_EXPECT_KASAN_FAIL(test,
301 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
302
3f15801c
AR
303 kfree(ptr2);
304}
305
b87c28b9
AK
306static void krealloc_less_oob_helper(struct kunit *test,
307 size_t size1, size_t size2)
3f15801c
AR
308{
309 char *ptr1, *ptr2;
b87c28b9
AK
310 size_t middle;
311
312 KUNIT_ASSERT_LT(test, size2, size1);
313 middle = size2 + (size1 - size2) / 2;
3f15801c 314
3f15801c 315 ptr1 = kmalloc(size1, GFP_KERNEL);
73228c7e 316 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
f33a0149 317
73228c7e
PA
318 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
319 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
f33a0149 320
b87c28b9
AK
321 /* Must be accessible for all modes. */
322 ptr2[size2 - 1] = 'x';
323
324 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
325 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
326 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
327
328 /* For all modes first aligned offset after size2 must be inaccessible. */
329 KUNIT_EXPECT_KASAN_FAIL(test,
330 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
331
332 /*
333 * For all modes all size2, middle, and size1 should land in separate
334 * granules and thus the latter two offsets should be inaccessible.
335 */
336 KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
337 round_down(middle, KASAN_GRANULE_SIZE));
338 KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
339 round_down(size1, KASAN_GRANULE_SIZE));
340 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
341 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
342 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
343
3f15801c
AR
344 kfree(ptr2);
345}
346
b87c28b9
AK
347static void krealloc_more_oob(struct kunit *test)
348{
349 krealloc_more_oob_helper(test, 201, 235);
350}
351
352static void krealloc_less_oob(struct kunit *test)
353{
354 krealloc_less_oob_helper(test, 235, 201);
355}
356
357static void krealloc_pagealloc_more_oob(struct kunit *test)
358{
359 /* page_alloc fallback in only implemented for SLUB. */
360 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
361
362 krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
363 KMALLOC_MAX_CACHE_SIZE + 235);
364}
365
366static void krealloc_pagealloc_less_oob(struct kunit *test)
367{
368 /* page_alloc fallback in only implemented for SLUB. */
369 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
370
371 krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
372 KMALLOC_MAX_CACHE_SIZE + 201);
373}
374
26a5ca7a
AK
375/*
376 * Check that krealloc() detects a use-after-free, returns NULL,
377 * and doesn't unpoison the freed object.
378 */
379static void krealloc_uaf(struct kunit *test)
380{
381 char *ptr1, *ptr2;
382 int size1 = 201;
383 int size2 = 235;
384
385 ptr1 = kmalloc(size1, GFP_KERNEL);
386 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
387 kfree(ptr1);
388
389 KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
390 KUNIT_ASSERT_PTR_EQ(test, (void *)ptr2, NULL);
391 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
392}
393
73228c7e 394static void kmalloc_oob_16(struct kunit *test)
3f15801c
AR
395{
396 struct {
397 u64 words[2];
398 } *ptr1, *ptr2;
399
58b999d7 400 /* This test is specifically crafted for the generic mode. */
da17e377 401 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
58b999d7 402
3f15801c 403 ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
73228c7e
PA
404 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
405
3f15801c 406 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
73228c7e
PA
407 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
408
409 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
3f15801c
AR
410 kfree(ptr1);
411 kfree(ptr2);
412}
413
58b999d7
AK
414static void kmalloc_uaf_16(struct kunit *test)
415{
416 struct {
417 u64 words[2];
418 } *ptr1, *ptr2;
419
420 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
421 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
422
423 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
424 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
425 kfree(ptr2);
426
427 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
428 kfree(ptr1);
429}
430
555999a0
AK
431/*
432 * Note: in the memset tests below, the written range touches both valid and
433 * invalid memory. This makes sure that the instrumentation does not only check
434 * the starting address but the whole range.
435 */
436
73228c7e 437static void kmalloc_oob_memset_2(struct kunit *test)
f523e737
WL
438{
439 char *ptr;
555999a0 440 size_t size = 128 - KASAN_GRANULE_SIZE;
f523e737 441
f523e737 442 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 443 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
f33a0149 444
555999a0 445 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2));
f523e737
WL
446 kfree(ptr);
447}
448
73228c7e 449static void kmalloc_oob_memset_4(struct kunit *test)
f523e737
WL
450{
451 char *ptr;
555999a0 452 size_t size = 128 - KASAN_GRANULE_SIZE;
f523e737 453
f523e737 454 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 455 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
f33a0149 456
555999a0 457 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4));
f523e737
WL
458 kfree(ptr);
459}
460
73228c7e 461static void kmalloc_oob_memset_8(struct kunit *test)
f523e737
WL
462{
463 char *ptr;
555999a0 464 size_t size = 128 - KASAN_GRANULE_SIZE;
f523e737 465
f523e737 466 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 467 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
f33a0149 468
555999a0 469 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8));
f523e737
WL
470 kfree(ptr);
471}
472
73228c7e 473static void kmalloc_oob_memset_16(struct kunit *test)
f523e737
WL
474{
475 char *ptr;
555999a0 476 size_t size = 128 - KASAN_GRANULE_SIZE;
f523e737 477
f523e737 478 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 479 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
f33a0149 480
555999a0 481 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16));
f523e737
WL
482 kfree(ptr);
483}
484
73228c7e 485static void kmalloc_oob_in_memset(struct kunit *test)
3f15801c
AR
486{
487 char *ptr;
555999a0 488 size_t size = 128 - KASAN_GRANULE_SIZE;
3f15801c 489
3f15801c 490 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 491 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
f33a0149 492
555999a0
AK
493 KUNIT_EXPECT_KASAN_FAIL(test,
494 memset(ptr, 0, size + KASAN_GRANULE_SIZE));
3f15801c
AR
495 kfree(ptr);
496}
497
73228c7e 498static void kmalloc_memmove_invalid_size(struct kunit *test)
98f3b56f
WW
499{
500 char *ptr;
501 size_t size = 64;
502 volatile size_t invalid_size = -2;
503
98f3b56f 504 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 505 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
98f3b56f
WW
506
507 memset((char *)ptr, 0, 64);
73228c7e
PA
508
509 KUNIT_EXPECT_KASAN_FAIL(test,
510 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
98f3b56f
WW
511 kfree(ptr);
512}
513
73228c7e 514static void kmalloc_uaf(struct kunit *test)
3f15801c
AR
515{
516 char *ptr;
517 size_t size = 10;
518
3f15801c 519 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 520 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
3f15801c
AR
521
522 kfree(ptr);
8fbad19b 523 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
3f15801c
AR
524}
525
73228c7e 526static void kmalloc_uaf_memset(struct kunit *test)
3f15801c
AR
527{
528 char *ptr;
529 size_t size = 33;
530
3f15801c 531 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 532 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
3f15801c
AR
533
534 kfree(ptr);
73228c7e 535 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
3f15801c
AR
536}
537
73228c7e 538static void kmalloc_uaf2(struct kunit *test)
3f15801c
AR
539{
540 char *ptr1, *ptr2;
541 size_t size = 43;
1b1df4c4 542 int counter = 0;
3f15801c 543
1b1df4c4 544again:
3f15801c 545 ptr1 = kmalloc(size, GFP_KERNEL);
73228c7e 546 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
3f15801c
AR
547
548 kfree(ptr1);
73228c7e 549
3f15801c 550 ptr2 = kmalloc(size, GFP_KERNEL);
73228c7e
PA
551 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
552
1b1df4c4
AK
553 /*
554 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
555 * Allow up to 16 attempts at generating different tags.
556 */
557 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
558 kfree(ptr2);
559 goto again;
560 }
561
8fbad19b 562 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
73228c7e 563 KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
3f15801c 564
3f15801c
AR
565 kfree(ptr2);
566}
567
73228c7e 568static void kfree_via_page(struct kunit *test)
b92a953c
MR
569{
570 char *ptr;
571 size_t size = 8;
572 struct page *page;
573 unsigned long offset;
574
b92a953c 575 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 576 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
b92a953c
MR
577
578 page = virt_to_page(ptr);
579 offset = offset_in_page(ptr);
580 kfree(page_address(page) + offset);
581}
582
73228c7e 583static void kfree_via_phys(struct kunit *test)
b92a953c
MR
584{
585 char *ptr;
586 size_t size = 8;
587 phys_addr_t phys;
588
b92a953c 589 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 590 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
b92a953c
MR
591
592 phys = virt_to_phys(ptr);
593 kfree(phys_to_virt(phys));
594}
595
73228c7e 596static void kmem_cache_oob(struct kunit *test)
3f15801c
AR
597{
598 char *p;
599 size_t size = 200;
11516135
AK
600 struct kmem_cache *cache;
601
602 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
73228c7e 603 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
11516135 604
3f15801c
AR
605 p = kmem_cache_alloc(cache, GFP_KERNEL);
606 if (!p) {
73228c7e 607 kunit_err(test, "Allocation failed: %s\n", __func__);
3f15801c
AR
608 kmem_cache_destroy(cache);
609 return;
610 }
611
73228c7e 612 KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
11516135 613
3f15801c
AR
614 kmem_cache_free(cache, p);
615 kmem_cache_destroy(cache);
616}
617
11516135 618static void kmem_cache_accounted(struct kunit *test)
0386bf38
GT
619{
620 int i;
621 char *p;
622 size_t size = 200;
623 struct kmem_cache *cache;
624
625 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
73228c7e 626 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
0386bf38 627
0386bf38
GT
628 /*
629 * Several allocations with a delay to allow for lazy per memcg kmem
630 * cache creation.
631 */
632 for (i = 0; i < 5; i++) {
633 p = kmem_cache_alloc(cache, GFP_KERNEL);
dc2bf000 634 if (!p)
0386bf38 635 goto free_cache;
dc2bf000 636
0386bf38
GT
637 kmem_cache_free(cache, p);
638 msleep(100);
639 }
640
641free_cache:
642 kmem_cache_destroy(cache);
643}
644
11516135
AK
645static void kmem_cache_bulk(struct kunit *test)
646{
647 struct kmem_cache *cache;
648 size_t size = 200;
649 char *p[10];
650 bool ret;
651 int i;
652
653 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
654 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
655
656 ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
657 if (!ret) {
658 kunit_err(test, "Allocation failed: %s\n", __func__);
659 kmem_cache_destroy(cache);
660 return;
661 }
662
663 for (i = 0; i < ARRAY_SIZE(p); i++)
664 p[i][0] = p[i][size - 1] = 42;
665
666 kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
667 kmem_cache_destroy(cache);
668}
669
3f15801c
AR
670static char global_array[10];
671
73228c7e 672static void kasan_global_oob(struct kunit *test)
3f15801c 673{
f649dc0e
PC
674 /*
675 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
53b0fe36 676 * from failing here and panicking the kernel, access the array via a
f649dc0e
PC
677 * volatile pointer, which will prevent the compiler from being able to
678 * determine the array bounds.
679 *
680 * This access uses a volatile pointer to char (char *volatile) rather
681 * than the more conventional pointer to volatile char (volatile char *)
682 * because we want to prevent the compiler from making inferences about
683 * the pointer itself (i.e. its array bounds), not the data that it
684 * refers to.
685 */
686 char *volatile array = global_array;
687 char *p = &array[ARRAY_SIZE(global_array) + 3];
3f15801c 688
58b999d7 689 /* Only generic mode instruments globals. */
da17e377 690 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
58b999d7 691
73228c7e 692 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
3f15801c
AR
693}
694
611806b4 695/* Check that ksize() makes the whole object accessible. */
73228c7e 696static void ksize_unpoisons_memory(struct kunit *test)
96fe805f
AP
697{
698 char *ptr;
48c23239 699 size_t size = 123, real_size;
96fe805f 700
96fe805f 701 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 702 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
96fe805f 703 real_size = ksize(ptr);
0fd37925
AK
704
705 /* This access shouldn't trigger a KASAN report. */
96fe805f 706 ptr[size] = 'x';
0fd37925
AK
707
708 /* This one must. */
8fbad19b 709 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
0fd37925 710
96fe805f
AP
711 kfree(ptr);
712}
713
611806b4
AK
714/*
715 * Check that a use-after-free is detected by ksize() and via normal accesses
716 * after it.
717 */
718static void ksize_uaf(struct kunit *test)
719{
720 char *ptr;
721 int size = 128 - KASAN_GRANULE_SIZE;
722
723 ptr = kmalloc(size, GFP_KERNEL);
724 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
725 kfree(ptr);
726
727 KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
728 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *ptr);
729 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *(ptr + size));
730}
731
73228c7e 732static void kasan_stack_oob(struct kunit *test)
eae08dca 733{
73228c7e 734 char stack_array[10];
f649dc0e
PC
735 /* See comment in kasan_global_oob. */
736 char *volatile array = stack_array;
737 char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
eae08dca 738
da17e377 739 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
eae08dca 740
73228c7e 741 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
eae08dca
AR
742}
743
73228c7e 744static void kasan_alloca_oob_left(struct kunit *test)
00a14294
PL
745{
746 volatile int i = 10;
747 char alloca_array[i];
f649dc0e
PC
748 /* See comment in kasan_global_oob. */
749 char *volatile array = alloca_array;
750 char *p = array - 1;
00a14294 751
58b999d7 752 /* Only generic mode instruments dynamic allocas. */
da17e377
AK
753 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
754 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
73228c7e
PA
755
756 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
00a14294
PL
757}
758
73228c7e 759static void kasan_alloca_oob_right(struct kunit *test)
00a14294
PL
760{
761 volatile int i = 10;
762 char alloca_array[i];
f649dc0e
PC
763 /* See comment in kasan_global_oob. */
764 char *volatile array = alloca_array;
765 char *p = array + i;
00a14294 766
58b999d7 767 /* Only generic mode instruments dynamic allocas. */
da17e377
AK
768 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
769 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
73228c7e
PA
770
771 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
00a14294
PL
772}
773
73228c7e 774static void kmem_cache_double_free(struct kunit *test)
b1d57289
DV
775{
776 char *p;
777 size_t size = 200;
778 struct kmem_cache *cache;
779
780 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
73228c7e
PA
781 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
782
b1d57289
DV
783 p = kmem_cache_alloc(cache, GFP_KERNEL);
784 if (!p) {
73228c7e 785 kunit_err(test, "Allocation failed: %s\n", __func__);
b1d57289
DV
786 kmem_cache_destroy(cache);
787 return;
788 }
789
790 kmem_cache_free(cache, p);
73228c7e 791 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
b1d57289
DV
792 kmem_cache_destroy(cache);
793}
794
73228c7e 795static void kmem_cache_invalid_free(struct kunit *test)
b1d57289
DV
796{
797 char *p;
798 size_t size = 200;
799 struct kmem_cache *cache;
800
801 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
802 NULL);
73228c7e
PA
803 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
804
b1d57289
DV
805 p = kmem_cache_alloc(cache, GFP_KERNEL);
806 if (!p) {
73228c7e 807 kunit_err(test, "Allocation failed: %s\n", __func__);
b1d57289
DV
808 kmem_cache_destroy(cache);
809 return;
810 }
811
0fd37925 812 /* Trigger invalid free, the object doesn't get freed. */
73228c7e 813 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
91c93ed0
AK
814
815 /*
816 * Properly free the object to prevent the "Objects remaining in
817 * test_cache on __kmem_cache_shutdown" BUG failure.
818 */
819 kmem_cache_free(cache, p);
820
b1d57289
DV
821 kmem_cache_destroy(cache);
822}
823
73228c7e 824static void kasan_memchr(struct kunit *test)
0c96350a
AR
825{
826 char *ptr;
827 size_t size = 24;
828
0fd37925
AK
829 /*
830 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
831 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
832 */
da17e377 833 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
73228c7e 834
58b999d7
AK
835 if (OOB_TAG_OFF)
836 size = round_up(size, OOB_TAG_OFF);
837
73228c7e
PA
838 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
839 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
840
841 KUNIT_EXPECT_KASAN_FAIL(test,
842 kasan_ptr_result = memchr(ptr, '1', size + 1));
0c96350a 843
0c96350a
AR
844 kfree(ptr);
845}
846
73228c7e 847static void kasan_memcmp(struct kunit *test)
0c96350a
AR
848{
849 char *ptr;
850 size_t size = 24;
851 int arr[9];
852
0fd37925
AK
853 /*
854 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
855 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
856 */
da17e377 857 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
0c96350a 858
58b999d7
AK
859 if (OOB_TAG_OFF)
860 size = round_up(size, OOB_TAG_OFF);
861
73228c7e
PA
862 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
863 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0c96350a 864 memset(arr, 0, sizeof(arr));
73228c7e
PA
865
866 KUNIT_EXPECT_KASAN_FAIL(test,
867 kasan_int_result = memcmp(ptr, arr, size+1));
0c96350a
AR
868 kfree(ptr);
869}
870
73228c7e 871static void kasan_strings(struct kunit *test)
0c96350a
AR
872{
873 char *ptr;
874 size_t size = 24;
875
0fd37925
AK
876 /*
877 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
878 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
879 */
da17e377 880 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
73228c7e
PA
881
882 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
883 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0c96350a
AR
884
885 kfree(ptr);
886
887 /*
888 * Try to cause only 1 invalid access (less spam in dmesg).
889 * For that we need ptr to point to zeroed byte.
890 * Skip metadata that could be stored in freed object so ptr
891 * will likely point to zeroed byte.
892 */
893 ptr += 16;
73228c7e 894 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
0c96350a 895
73228c7e 896 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
0c96350a 897
73228c7e 898 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
0c96350a 899
73228c7e 900 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
0c96350a 901
73228c7e 902 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
0c96350a 903
73228c7e 904 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
0c96350a
AR
905}
906
58b999d7
AK
907static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
908{
909 KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
910 KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
911 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
912 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
913 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
914 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
915 KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
916 KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
917}
918
919static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
920{
921 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
922 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
923 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
924 KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
925 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
926 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
927 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
928 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
929
930#if defined(clear_bit_unlock_is_negative_byte)
931 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
932 clear_bit_unlock_is_negative_byte(nr, addr));
933#endif
934}
935
936static void kasan_bitops_generic(struct kunit *test)
19a33ca6 937{
58b999d7
AK
938 long *bits;
939
940 /* This test is specifically crafted for the generic mode. */
da17e377 941 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
58b999d7 942
19a33ca6 943 /*
0fd37925 944 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
19a33ca6
ME
945 * this way we do not actually corrupt other memory.
946 */
58b999d7 947 bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
73228c7e 948 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
19a33ca6
ME
949
950 /*
951 * Below calls try to access bit within allocated memory; however, the
952 * below accesses are still out-of-bounds, since bitops are defined to
953 * operate on the whole long the bit is in.
954 */
58b999d7 955 kasan_bitops_modify(test, BITS_PER_LONG, bits);
19a33ca6
ME
956
957 /*
958 * Below calls try to access bit beyond allocated memory.
959 */
58b999d7 960 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
19a33ca6 961
58b999d7
AK
962 kfree(bits);
963}
19a33ca6 964
58b999d7
AK
965static void kasan_bitops_tags(struct kunit *test)
966{
967 long *bits;
19a33ca6 968
da17e377
AK
969 /* This test is specifically crafted for tag-based modes. */
970 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
19a33ca6 971
e66e1799
AK
972 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
973 bits = kzalloc(48, GFP_KERNEL);
58b999d7 974 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
19a33ca6 975
e66e1799
AK
976 /* Do the accesses past the 48 allocated bytes, but within the redone. */
977 kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
978 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
19a33ca6 979
19a33ca6
ME
980 kfree(bits);
981}
982
73228c7e 983static void kmalloc_double_kzfree(struct kunit *test)
bb104ed7
ME
984{
985 char *ptr;
986 size_t size = 16;
987
bb104ed7 988 ptr = kmalloc(size, GFP_KERNEL);
73228c7e 989 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
bb104ed7 990
453431a5 991 kfree_sensitive(ptr);
73228c7e 992 KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
bb104ed7
ME
993}
994
73228c7e 995static void vmalloc_oob(struct kunit *test)
06513916
DA
996{
997 void *area;
998
da17e377 999 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
06513916
DA
1000
1001 /*
1002 * We have to be careful not to hit the guard page.
1003 * The MMU will catch that and crash us.
1004 */
1005 area = vmalloc(3000);
73228c7e 1006 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area);
06513916 1007
73228c7e 1008 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]);
06513916
DA
1009 vfree(area);
1010}
387d6e46 1011
573a4809
AK
1012/*
1013 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
1014 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1015 * modes.
1016 */
1017static void match_all_not_assigned(struct kunit *test)
1018{
1019 char *ptr;
1020 struct page *pages;
1021 int i, size, order;
1022
1023 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1024
1025 for (i = 0; i < 256; i++) {
1026 size = (get_random_int() % 1024) + 1;
1027 ptr = kmalloc(size, GFP_KERNEL);
1028 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1029 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1030 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1031 kfree(ptr);
1032 }
1033
1034 for (i = 0; i < 256; i++) {
1035 order = (get_random_int() % 4) + 1;
1036 pages = alloc_pages(GFP_KERNEL, order);
1037 ptr = page_address(pages);
1038 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1039 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1040 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1041 free_pages((unsigned long)ptr, order);
1042 }
1043}
1044
1045/* Check that 0xff works as a match-all pointer tag for tag-based modes. */
1046static void match_all_ptr_tag(struct kunit *test)
1047{
1048 char *ptr;
1049 u8 tag;
1050
1051 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1052
1053 ptr = kmalloc(128, GFP_KERNEL);
1054 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1055
1056 /* Backup the assigned tag. */
1057 tag = get_tag(ptr);
1058 KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
1059
1060 /* Reset the tag to 0xff.*/
1061 ptr = set_tag(ptr, KASAN_TAG_KERNEL);
1062
1063 /* This access shouldn't trigger a KASAN report. */
1064 *ptr = 0;
1065
1066 /* Recover the pointer tag and free. */
1067 ptr = set_tag(ptr, tag);
1068 kfree(ptr);
1069}
1070
1071/* Check that there are no match-all memory tags for tag-based modes. */
1072static void match_all_mem_tag(struct kunit *test)
1073{
1074 char *ptr;
1075 int tag;
1076
1077 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1078
1079 ptr = kmalloc(128, GFP_KERNEL);
1080 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1081 KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1082
1083 /* For each possible tag value not matching the pointer tag. */
1084 for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
1085 if (tag == get_tag(ptr))
1086 continue;
1087
1088 /* Mark the first memory granule with the chosen memory tag. */
aa5c219c 1089 kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
573a4809
AK
1090
1091 /* This access must cause a KASAN report. */
1092 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
1093 }
1094
1095 /* Recover the memory tag and free. */
aa5c219c 1096 kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
573a4809
AK
1097 kfree(ptr);
1098}
1099
73228c7e
PA
1100static struct kunit_case kasan_kunit_test_cases[] = {
1101 KUNIT_CASE(kmalloc_oob_right),
1102 KUNIT_CASE(kmalloc_oob_left),
1103 KUNIT_CASE(kmalloc_node_oob_right),
1104 KUNIT_CASE(kmalloc_pagealloc_oob_right),
1105 KUNIT_CASE(kmalloc_pagealloc_uaf),
1106 KUNIT_CASE(kmalloc_pagealloc_invalid_free),
858bdeb0
AK
1107 KUNIT_CASE(pagealloc_oob_right),
1108 KUNIT_CASE(pagealloc_uaf),
73228c7e 1109 KUNIT_CASE(kmalloc_large_oob_right),
b87c28b9
AK
1110 KUNIT_CASE(krealloc_more_oob),
1111 KUNIT_CASE(krealloc_less_oob),
1112 KUNIT_CASE(krealloc_pagealloc_more_oob),
1113 KUNIT_CASE(krealloc_pagealloc_less_oob),
26a5ca7a 1114 KUNIT_CASE(krealloc_uaf),
73228c7e 1115 KUNIT_CASE(kmalloc_oob_16),
58b999d7 1116 KUNIT_CASE(kmalloc_uaf_16),
73228c7e
PA
1117 KUNIT_CASE(kmalloc_oob_in_memset),
1118 KUNIT_CASE(kmalloc_oob_memset_2),
1119 KUNIT_CASE(kmalloc_oob_memset_4),
1120 KUNIT_CASE(kmalloc_oob_memset_8),
1121 KUNIT_CASE(kmalloc_oob_memset_16),
1122 KUNIT_CASE(kmalloc_memmove_invalid_size),
1123 KUNIT_CASE(kmalloc_uaf),
1124 KUNIT_CASE(kmalloc_uaf_memset),
1125 KUNIT_CASE(kmalloc_uaf2),
1126 KUNIT_CASE(kfree_via_page),
1127 KUNIT_CASE(kfree_via_phys),
1128 KUNIT_CASE(kmem_cache_oob),
11516135
AK
1129 KUNIT_CASE(kmem_cache_accounted),
1130 KUNIT_CASE(kmem_cache_bulk),
73228c7e
PA
1131 KUNIT_CASE(kasan_global_oob),
1132 KUNIT_CASE(kasan_stack_oob),
1133 KUNIT_CASE(kasan_alloca_oob_left),
1134 KUNIT_CASE(kasan_alloca_oob_right),
1135 KUNIT_CASE(ksize_unpoisons_memory),
611806b4 1136 KUNIT_CASE(ksize_uaf),
73228c7e
PA
1137 KUNIT_CASE(kmem_cache_double_free),
1138 KUNIT_CASE(kmem_cache_invalid_free),
1139 KUNIT_CASE(kasan_memchr),
1140 KUNIT_CASE(kasan_memcmp),
1141 KUNIT_CASE(kasan_strings),
58b999d7
AK
1142 KUNIT_CASE(kasan_bitops_generic),
1143 KUNIT_CASE(kasan_bitops_tags),
73228c7e
PA
1144 KUNIT_CASE(kmalloc_double_kzfree),
1145 KUNIT_CASE(vmalloc_oob),
573a4809
AK
1146 KUNIT_CASE(match_all_not_assigned),
1147 KUNIT_CASE(match_all_ptr_tag),
1148 KUNIT_CASE(match_all_mem_tag),
73228c7e
PA
1149 {}
1150};
1151
1152static struct kunit_suite kasan_kunit_test_suite = {
1153 .name = "kasan",
1154 .init = kasan_test_init,
1155 .test_cases = kasan_kunit_test_cases,
1156 .exit = kasan_test_exit,
1157};
1158
1159kunit_test_suite(kasan_kunit_test_suite);
3f15801c 1160
3f15801c 1161MODULE_LICENSE("GPL");