Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
3f15801c AR |
2 | /* |
3 | * | |
4 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. | |
5 | * Author: Andrey Ryabinin <a.ryabinin@samsung.com> | |
3f15801c AR |
6 | */ |
7 | ||
19a33ca6 | 8 | #include <linux/bitops.h> |
0386bf38 | 9 | #include <linux/delay.h> |
19a33ca6 | 10 | #include <linux/kasan.h> |
3f15801c | 11 | #include <linux/kernel.h> |
eae08dca | 12 | #include <linux/mm.h> |
19a33ca6 ME |
13 | #include <linux/mman.h> |
14 | #include <linux/module.h> | |
3f15801c | 15 | #include <linux/printk.h> |
573a4809 | 16 | #include <linux/random.h> |
3f15801c AR |
17 | #include <linux/slab.h> |
18 | #include <linux/string.h> | |
eae08dca | 19 | #include <linux/uaccess.h> |
b92a953c | 20 | #include <linux/io.h> |
06513916 | 21 | #include <linux/vmalloc.h> |
1a2473f0 | 22 | #include <linux/set_memory.h> |
b92a953c MR |
23 | |
24 | #include <asm/page.h> | |
3f15801c | 25 | |
83c4e7a0 PA |
26 | #include <kunit/test.h> |
27 | ||
f33a0149 WW |
28 | #include "../mm/kasan/kasan.h" |
29 | ||
1f600626 | 30 | #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE) |
f33a0149 | 31 | |
adb72ae1 | 32 | /* |
0fd37925 AK |
33 | * Some tests use these global variables to store return values from function |
34 | * calls that could otherwise be eliminated by the compiler as dead code. | |
adb72ae1 | 35 | */ |
adb72ae1 | 36 | void *kasan_ptr_result; |
83c4e7a0 PA |
37 | int kasan_int_result; |
38 | ||
39 | static struct kunit_resource resource; | |
ed6d7444 | 40 | static struct kunit_kasan_status test_status; |
83c4e7a0 PA |
41 | static bool multishot; |
42 | ||
0fd37925 AK |
43 | /* |
44 | * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the | |
f05842cf AK |
45 | * first detected bug and panic the kernel if panic_on_warn is enabled. For |
46 | * hardware tag-based KASAN also allow tag checking to be reenabled for each | |
47 | * test, see the comment for KUNIT_EXPECT_KASAN_FAIL(). | |
0fd37925 | 48 | */ |
83c4e7a0 PA |
49 | static int kasan_test_init(struct kunit *test) |
50 | { | |
d82dc3a4 AK |
51 | if (!kasan_enabled()) { |
52 | kunit_err(test, "can't run KASAN tests with KASAN disabled"); | |
53 | return -1; | |
54 | } | |
55 | ||
83c4e7a0 | 56 | multishot = kasan_save_enable_multi_shot(); |
ed6d7444 AK |
57 | test_status.report_found = false; |
58 | test_status.sync_fault = false; | |
99734b53 | 59 | kunit_add_named_resource(test, NULL, NULL, &resource, |
ed6d7444 | 60 | "kasan_status", &test_status); |
83c4e7a0 PA |
61 | return 0; |
62 | } | |
63 | ||
64 | static void kasan_test_exit(struct kunit *test) | |
65 | { | |
66 | kasan_restore_multi_shot(multishot); | |
ed6d7444 | 67 | KUNIT_EXPECT_FALSE(test, test_status.report_found); |
83c4e7a0 PA |
68 | } |
69 | ||
70 | /** | |
0fd37925 AK |
71 | * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a |
72 | * KASAN report; causes a test failure otherwise. This relies on a KUnit | |
ed6d7444 | 73 | * resource named "kasan_status". Do not use this name for KUnit resources |
0fd37925 | 74 | * outside of KASAN tests. |
f05842cf | 75 | * |
ed6d7444 | 76 | * For hardware tag-based KASAN, when a synchronous tag fault happens, tag |
e80a76aa AK |
77 | * checking is auto-disabled. When this happens, this test handler reenables |
78 | * tag checking. As tag checking can be only disabled or enabled per CPU, | |
79 | * this handler disables migration (preemption). | |
2e4bde6a | 80 | * |
ed6d7444 | 81 | * Since the compiler doesn't see that the expression can change the test_status |
2e4bde6a AK |
82 | * fields, it can reorder or optimize away the accesses to those fields. |
83 | * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the | |
84 | * expression to prevent that. | |
99734b53 | 85 | * |
ed6d7444 AK |
86 | * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept |
87 | * as false. This allows detecting KASAN reports that happen outside of the | |
88 | * checks by asserting !test_status.report_found at the start of | |
89 | * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit. | |
83c4e7a0 | 90 | */ |
99734b53 AK |
91 | #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \ |
92 | if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ | |
2d27e585 | 93 | kasan_sync_fault_possible()) \ |
99734b53 | 94 | migrate_disable(); \ |
ed6d7444 | 95 | KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found)); \ |
99734b53 AK |
96 | barrier(); \ |
97 | expression; \ | |
98 | barrier(); \ | |
ed6d7444 AK |
99 | if (kasan_async_fault_possible()) \ |
100 | kasan_force_async_fault(); \ | |
101 | if (!READ_ONCE(test_status.report_found)) { \ | |
3ff16d30 DG |
102 | KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \ |
103 | "expected in \"" #expression \ | |
104 | "\", but none occurred"); \ | |
105 | } \ | |
ed6d7444 AK |
106 | if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ |
107 | kasan_sync_fault_possible()) { \ | |
108 | if (READ_ONCE(test_status.report_found) && \ | |
109 | READ_ONCE(test_status.sync_fault)) \ | |
110 | kasan_enable_tagging(); \ | |
99734b53 AK |
111 | migrate_enable(); \ |
112 | } \ | |
ed6d7444 | 113 | WRITE_ONCE(test_status.report_found, false); \ |
83c4e7a0 PA |
114 | } while (0) |
115 | ||
da17e377 | 116 | #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \ |
40eb5cf4 ME |
117 | if (!IS_ENABLED(config)) \ |
118 | kunit_skip((test), "Test requires " #config "=y"); \ | |
da17e377 AK |
119 | } while (0) |
120 | ||
121 | #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \ | |
40eb5cf4 ME |
122 | if (IS_ENABLED(config)) \ |
123 | kunit_skip((test), "Test requires " #config "=n"); \ | |
da17e377 AK |
124 | } while (0) |
125 | ||
73228c7e | 126 | static void kmalloc_oob_right(struct kunit *test) |
3f15801c AR |
127 | { |
128 | char *ptr; | |
ab512805 | 129 | size_t size = 128 - KASAN_GRANULE_SIZE - 5; |
3f15801c | 130 | |
3f15801c | 131 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 132 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
f33a0149 | 133 | |
aaf50b19 | 134 | OPTIMIZER_HIDE_VAR(ptr); |
ab512805 AK |
135 | /* |
136 | * An unaligned access past the requested kmalloc size. | |
137 | * Only generic KASAN can precisely detect these. | |
138 | */ | |
139 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) | |
140 | KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x'); | |
141 | ||
142 | /* | |
143 | * An aligned access into the first out-of-bounds granule that falls | |
144 | * within the aligned kmalloc object. | |
145 | */ | |
146 | KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y'); | |
147 | ||
148 | /* Out-of-bounds access past the aligned kmalloc object. */ | |
149 | KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = | |
150 | ptr[size + KASAN_GRANULE_SIZE + 5]); | |
151 | ||
3f15801c AR |
152 | kfree(ptr); |
153 | } | |
154 | ||
73228c7e | 155 | static void kmalloc_oob_left(struct kunit *test) |
3f15801c AR |
156 | { |
157 | char *ptr; | |
158 | size_t size = 15; | |
159 | ||
3f15801c | 160 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 161 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
3f15801c | 162 | |
aaf50b19 | 163 | OPTIMIZER_HIDE_VAR(ptr); |
73228c7e | 164 | KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1)); |
3f15801c AR |
165 | kfree(ptr); |
166 | } | |
167 | ||
73228c7e | 168 | static void kmalloc_node_oob_right(struct kunit *test) |
3f15801c AR |
169 | { |
170 | char *ptr; | |
171 | size_t size = 4096; | |
172 | ||
3f15801c | 173 | ptr = kmalloc_node(size, GFP_KERNEL, 0); |
73228c7e | 174 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
3f15801c | 175 | |
aaf50b19 | 176 | OPTIMIZER_HIDE_VAR(ptr); |
8fbad19b | 177 | KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]); |
3f15801c AR |
178 | kfree(ptr); |
179 | } | |
180 | ||
858bdeb0 AK |
181 | /* |
182 | * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't | |
183 | * fit into a slab cache and therefore is allocated via the page allocator | |
184 | * fallback. Since this kind of fallback is only implemented for SLUB, these | |
185 | * tests are limited to that allocator. | |
186 | */ | |
73228c7e | 187 | static void kmalloc_pagealloc_oob_right(struct kunit *test) |
3f15801c AR |
188 | { |
189 | char *ptr; | |
190 | size_t size = KMALLOC_MAX_CACHE_SIZE + 10; | |
191 | ||
da17e377 | 192 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); |
73228c7e | 193 | |
e6e8379c | 194 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 195 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
f33a0149 | 196 | |
aaf50b19 | 197 | OPTIMIZER_HIDE_VAR(ptr); |
73228c7e | 198 | KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0); |
858bdeb0 | 199 | |
e6e8379c AP |
200 | kfree(ptr); |
201 | } | |
47adccce | 202 | |
73228c7e | 203 | static void kmalloc_pagealloc_uaf(struct kunit *test) |
47adccce DV |
204 | { |
205 | char *ptr; | |
206 | size_t size = KMALLOC_MAX_CACHE_SIZE + 10; | |
207 | ||
da17e377 | 208 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); |
47adccce | 209 | |
73228c7e PA |
210 | ptr = kmalloc(size, GFP_KERNEL); |
211 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
47adccce | 212 | kfree(ptr); |
858bdeb0 | 213 | |
8fbad19b | 214 | KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); |
47adccce DV |
215 | } |
216 | ||
73228c7e | 217 | static void kmalloc_pagealloc_invalid_free(struct kunit *test) |
47adccce DV |
218 | { |
219 | char *ptr; | |
220 | size_t size = KMALLOC_MAX_CACHE_SIZE + 10; | |
221 | ||
da17e377 | 222 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); |
47adccce | 223 | |
73228c7e PA |
224 | ptr = kmalloc(size, GFP_KERNEL); |
225 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
226 | ||
227 | KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1)); | |
47adccce | 228 | } |
e6e8379c | 229 | |
858bdeb0 AK |
230 | static void pagealloc_oob_right(struct kunit *test) |
231 | { | |
232 | char *ptr; | |
233 | struct page *pages; | |
234 | size_t order = 4; | |
235 | size_t size = (1UL << (PAGE_SHIFT + order)); | |
236 | ||
237 | /* | |
238 | * With generic KASAN page allocations have no redzones, thus | |
239 | * out-of-bounds detection is not guaranteed. | |
240 | * See https://bugzilla.kernel.org/show_bug.cgi?id=210503. | |
241 | */ | |
242 | KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); | |
243 | ||
244 | pages = alloc_pages(GFP_KERNEL, order); | |
245 | ptr = page_address(pages); | |
246 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
247 | ||
8fbad19b | 248 | KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]); |
858bdeb0 AK |
249 | free_pages((unsigned long)ptr, order); |
250 | } | |
251 | ||
252 | static void pagealloc_uaf(struct kunit *test) | |
253 | { | |
254 | char *ptr; | |
255 | struct page *pages; | |
256 | size_t order = 4; | |
257 | ||
258 | pages = alloc_pages(GFP_KERNEL, order); | |
259 | ptr = page_address(pages); | |
260 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
261 | free_pages((unsigned long)ptr, order); | |
262 | ||
8fbad19b | 263 | KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); |
858bdeb0 AK |
264 | } |
265 | ||
73228c7e | 266 | static void kmalloc_large_oob_right(struct kunit *test) |
e6e8379c AP |
267 | { |
268 | char *ptr; | |
269 | size_t size = KMALLOC_MAX_CACHE_SIZE - 256; | |
0fd37925 AK |
270 | |
271 | /* | |
272 | * Allocate a chunk that is large enough, but still fits into a slab | |
e6e8379c AP |
273 | * and does not trigger the page allocator fallback in SLUB. |
274 | */ | |
3f15801c | 275 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 276 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
3f15801c | 277 | |
aaf50b19 | 278 | OPTIMIZER_HIDE_VAR(ptr); |
73228c7e | 279 | KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0); |
3f15801c AR |
280 | kfree(ptr); |
281 | } | |
282 | ||
b87c28b9 AK |
283 | static void krealloc_more_oob_helper(struct kunit *test, |
284 | size_t size1, size_t size2) | |
3f15801c AR |
285 | { |
286 | char *ptr1, *ptr2; | |
b87c28b9 AK |
287 | size_t middle; |
288 | ||
289 | KUNIT_ASSERT_LT(test, size1, size2); | |
290 | middle = size1 + (size2 - size1) / 2; | |
3f15801c | 291 | |
3f15801c | 292 | ptr1 = kmalloc(size1, GFP_KERNEL); |
73228c7e | 293 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); |
3f15801c | 294 | |
73228c7e PA |
295 | ptr2 = krealloc(ptr1, size2, GFP_KERNEL); |
296 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); | |
f33a0149 | 297 | |
b87c28b9 AK |
298 | /* All offsets up to size2 must be accessible. */ |
299 | ptr2[size1 - 1] = 'x'; | |
300 | ptr2[size1] = 'x'; | |
301 | ptr2[middle] = 'x'; | |
302 | ptr2[size2 - 1] = 'x'; | |
303 | ||
304 | /* Generic mode is precise, so unaligned size2 must be inaccessible. */ | |
305 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) | |
306 | KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x'); | |
307 | ||
308 | /* For all modes first aligned offset after size2 must be inaccessible. */ | |
309 | KUNIT_EXPECT_KASAN_FAIL(test, | |
310 | ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x'); | |
311 | ||
3f15801c AR |
312 | kfree(ptr2); |
313 | } | |
314 | ||
b87c28b9 AK |
315 | static void krealloc_less_oob_helper(struct kunit *test, |
316 | size_t size1, size_t size2) | |
3f15801c AR |
317 | { |
318 | char *ptr1, *ptr2; | |
b87c28b9 AK |
319 | size_t middle; |
320 | ||
321 | KUNIT_ASSERT_LT(test, size2, size1); | |
322 | middle = size2 + (size1 - size2) / 2; | |
3f15801c | 323 | |
3f15801c | 324 | ptr1 = kmalloc(size1, GFP_KERNEL); |
73228c7e | 325 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); |
f33a0149 | 326 | |
73228c7e PA |
327 | ptr2 = krealloc(ptr1, size2, GFP_KERNEL); |
328 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); | |
f33a0149 | 329 | |
b87c28b9 AK |
330 | /* Must be accessible for all modes. */ |
331 | ptr2[size2 - 1] = 'x'; | |
332 | ||
333 | /* Generic mode is precise, so unaligned size2 must be inaccessible. */ | |
334 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) | |
335 | KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x'); | |
336 | ||
337 | /* For all modes first aligned offset after size2 must be inaccessible. */ | |
338 | KUNIT_EXPECT_KASAN_FAIL(test, | |
339 | ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x'); | |
340 | ||
341 | /* | |
342 | * For all modes all size2, middle, and size1 should land in separate | |
343 | * granules and thus the latter two offsets should be inaccessible. | |
344 | */ | |
345 | KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE), | |
346 | round_down(middle, KASAN_GRANULE_SIZE)); | |
347 | KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE), | |
348 | round_down(size1, KASAN_GRANULE_SIZE)); | |
349 | KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x'); | |
350 | KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x'); | |
351 | KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x'); | |
352 | ||
3f15801c AR |
353 | kfree(ptr2); |
354 | } | |
355 | ||
b87c28b9 AK |
356 | static void krealloc_more_oob(struct kunit *test) |
357 | { | |
358 | krealloc_more_oob_helper(test, 201, 235); | |
359 | } | |
360 | ||
361 | static void krealloc_less_oob(struct kunit *test) | |
362 | { | |
363 | krealloc_less_oob_helper(test, 235, 201); | |
364 | } | |
365 | ||
366 | static void krealloc_pagealloc_more_oob(struct kunit *test) | |
367 | { | |
368 | /* page_alloc fallback in only implemented for SLUB. */ | |
369 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); | |
370 | ||
371 | krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201, | |
372 | KMALLOC_MAX_CACHE_SIZE + 235); | |
373 | } | |
374 | ||
375 | static void krealloc_pagealloc_less_oob(struct kunit *test) | |
376 | { | |
377 | /* page_alloc fallback in only implemented for SLUB. */ | |
378 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); | |
379 | ||
380 | krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235, | |
381 | KMALLOC_MAX_CACHE_SIZE + 201); | |
382 | } | |
383 | ||
26a5ca7a AK |
384 | /* |
385 | * Check that krealloc() detects a use-after-free, returns NULL, | |
386 | * and doesn't unpoison the freed object. | |
387 | */ | |
388 | static void krealloc_uaf(struct kunit *test) | |
389 | { | |
390 | char *ptr1, *ptr2; | |
391 | int size1 = 201; | |
392 | int size2 = 235; | |
393 | ||
394 | ptr1 = kmalloc(size1, GFP_KERNEL); | |
395 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); | |
396 | kfree(ptr1); | |
397 | ||
398 | KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL)); | |
ccad78f1 | 399 | KUNIT_ASSERT_NULL(test, ptr2); |
26a5ca7a AK |
400 | KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1); |
401 | } | |
402 | ||
73228c7e | 403 | static void kmalloc_oob_16(struct kunit *test) |
3f15801c AR |
404 | { |
405 | struct { | |
406 | u64 words[2]; | |
407 | } *ptr1, *ptr2; | |
408 | ||
58b999d7 | 409 | /* This test is specifically crafted for the generic mode. */ |
da17e377 | 410 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); |
58b999d7 | 411 | |
3f15801c | 412 | ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL); |
73228c7e PA |
413 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); |
414 | ||
3f15801c | 415 | ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL); |
73228c7e PA |
416 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); |
417 | ||
aaf50b19 KC |
418 | OPTIMIZER_HIDE_VAR(ptr1); |
419 | OPTIMIZER_HIDE_VAR(ptr2); | |
73228c7e | 420 | KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2); |
3f15801c AR |
421 | kfree(ptr1); |
422 | kfree(ptr2); | |
423 | } | |
424 | ||
58b999d7 AK |
425 | static void kmalloc_uaf_16(struct kunit *test) |
426 | { | |
427 | struct { | |
428 | u64 words[2]; | |
429 | } *ptr1, *ptr2; | |
430 | ||
431 | ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL); | |
432 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); | |
433 | ||
434 | ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL); | |
435 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); | |
436 | kfree(ptr2); | |
437 | ||
438 | KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2); | |
439 | kfree(ptr1); | |
440 | } | |
441 | ||
555999a0 AK |
442 | /* |
443 | * Note: in the memset tests below, the written range touches both valid and | |
444 | * invalid memory. This makes sure that the instrumentation does not only check | |
445 | * the starting address but the whole range. | |
446 | */ | |
447 | ||
73228c7e | 448 | static void kmalloc_oob_memset_2(struct kunit *test) |
f523e737 WL |
449 | { |
450 | char *ptr; | |
555999a0 | 451 | size_t size = 128 - KASAN_GRANULE_SIZE; |
f523e737 | 452 | |
f523e737 | 453 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 454 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
f33a0149 | 455 | |
d73dad4e | 456 | OPTIMIZER_HIDE_VAR(size); |
555999a0 | 457 | KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2)); |
f523e737 WL |
458 | kfree(ptr); |
459 | } | |
460 | ||
73228c7e | 461 | static void kmalloc_oob_memset_4(struct kunit *test) |
f523e737 WL |
462 | { |
463 | char *ptr; | |
555999a0 | 464 | size_t size = 128 - KASAN_GRANULE_SIZE; |
f523e737 | 465 | |
f523e737 | 466 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 467 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
f33a0149 | 468 | |
d73dad4e | 469 | OPTIMIZER_HIDE_VAR(size); |
555999a0 | 470 | KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4)); |
f523e737 WL |
471 | kfree(ptr); |
472 | } | |
473 | ||
73228c7e | 474 | static void kmalloc_oob_memset_8(struct kunit *test) |
f523e737 WL |
475 | { |
476 | char *ptr; | |
555999a0 | 477 | size_t size = 128 - KASAN_GRANULE_SIZE; |
f523e737 | 478 | |
f523e737 | 479 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 480 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
f33a0149 | 481 | |
d73dad4e | 482 | OPTIMIZER_HIDE_VAR(size); |
555999a0 | 483 | KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8)); |
f523e737 WL |
484 | kfree(ptr); |
485 | } | |
486 | ||
73228c7e | 487 | static void kmalloc_oob_memset_16(struct kunit *test) |
f523e737 WL |
488 | { |
489 | char *ptr; | |
555999a0 | 490 | size_t size = 128 - KASAN_GRANULE_SIZE; |
f523e737 | 491 | |
f523e737 | 492 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 493 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
f33a0149 | 494 | |
d73dad4e | 495 | OPTIMIZER_HIDE_VAR(size); |
555999a0 | 496 | KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16)); |
f523e737 WL |
497 | kfree(ptr); |
498 | } | |
499 | ||
73228c7e | 500 | static void kmalloc_oob_in_memset(struct kunit *test) |
3f15801c AR |
501 | { |
502 | char *ptr; | |
555999a0 | 503 | size_t size = 128 - KASAN_GRANULE_SIZE; |
3f15801c | 504 | |
3f15801c | 505 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 506 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
f33a0149 | 507 | |
09c6304e | 508 | OPTIMIZER_HIDE_VAR(ptr); |
d73dad4e | 509 | OPTIMIZER_HIDE_VAR(size); |
555999a0 AK |
510 | KUNIT_EXPECT_KASAN_FAIL(test, |
511 | memset(ptr, 0, size + KASAN_GRANULE_SIZE)); | |
3f15801c AR |
512 | kfree(ptr); |
513 | } | |
514 | ||
758cabae | 515 | static void kmalloc_memmove_negative_size(struct kunit *test) |
98f3b56f WW |
516 | { |
517 | char *ptr; | |
518 | size_t size = 64; | |
d73dad4e | 519 | size_t invalid_size = -2; |
98f3b56f | 520 | |
1b0668be AK |
521 | /* |
522 | * Hardware tag-based mode doesn't check memmove for negative size. | |
523 | * As a result, this test introduces a side-effect memory corruption, | |
524 | * which can result in a crash. | |
525 | */ | |
526 | KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS); | |
527 | ||
98f3b56f | 528 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 529 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
98f3b56f WW |
530 | |
531 | memset((char *)ptr, 0, 64); | |
09c6304e | 532 | OPTIMIZER_HIDE_VAR(ptr); |
d73dad4e | 533 | OPTIMIZER_HIDE_VAR(invalid_size); |
73228c7e PA |
534 | KUNIT_EXPECT_KASAN_FAIL(test, |
535 | memmove((char *)ptr, (char *)ptr + 4, invalid_size)); | |
98f3b56f WW |
536 | kfree(ptr); |
537 | } | |
538 | ||
758cabae PC |
539 | static void kmalloc_memmove_invalid_size(struct kunit *test) |
540 | { | |
541 | char *ptr; | |
542 | size_t size = 64; | |
543 | volatile size_t invalid_size = size; | |
544 | ||
545 | ptr = kmalloc(size, GFP_KERNEL); | |
546 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
547 | ||
98f3b56f | 548 | memset((char *)ptr, 0, 64); |
09c6304e | 549 | OPTIMIZER_HIDE_VAR(ptr); |
73228c7e PA |
550 | KUNIT_EXPECT_KASAN_FAIL(test, |
551 | memmove((char *)ptr, (char *)ptr + 4, invalid_size)); | |
98f3b56f WW |
552 | kfree(ptr); |
553 | } | |
554 | ||
73228c7e | 555 | static void kmalloc_uaf(struct kunit *test) |
3f15801c AR |
556 | { |
557 | char *ptr; | |
558 | size_t size = 10; | |
559 | ||
3f15801c | 560 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 561 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
3f15801c AR |
562 | |
563 | kfree(ptr); | |
8fbad19b | 564 | KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]); |
3f15801c AR |
565 | } |
566 | ||
73228c7e | 567 | static void kmalloc_uaf_memset(struct kunit *test) |
3f15801c AR |
568 | { |
569 | char *ptr; | |
570 | size_t size = 33; | |
571 | ||
25b12a58 AK |
572 | /* |
573 | * Only generic KASAN uses quarantine, which is required to avoid a | |
574 | * kernel memory corruption this test causes. | |
575 | */ | |
576 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); | |
577 | ||
3f15801c | 578 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 579 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
3f15801c AR |
580 | |
581 | kfree(ptr); | |
73228c7e | 582 | KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size)); |
3f15801c AR |
583 | } |
584 | ||
73228c7e | 585 | static void kmalloc_uaf2(struct kunit *test) |
3f15801c AR |
586 | { |
587 | char *ptr1, *ptr2; | |
588 | size_t size = 43; | |
1b1df4c4 | 589 | int counter = 0; |
3f15801c | 590 | |
1b1df4c4 | 591 | again: |
3f15801c | 592 | ptr1 = kmalloc(size, GFP_KERNEL); |
73228c7e | 593 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); |
3f15801c AR |
594 | |
595 | kfree(ptr1); | |
73228c7e | 596 | |
3f15801c | 597 | ptr2 = kmalloc(size, GFP_KERNEL); |
73228c7e PA |
598 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); |
599 | ||
1b1df4c4 AK |
600 | /* |
601 | * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same. | |
602 | * Allow up to 16 attempts at generating different tags. | |
603 | */ | |
604 | if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) { | |
605 | kfree(ptr2); | |
606 | goto again; | |
607 | } | |
608 | ||
8fbad19b | 609 | KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]); |
73228c7e | 610 | KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2); |
3f15801c | 611 | |
3f15801c AR |
612 | kfree(ptr2); |
613 | } | |
614 | ||
73228c7e | 615 | static void kfree_via_page(struct kunit *test) |
b92a953c MR |
616 | { |
617 | char *ptr; | |
618 | size_t size = 8; | |
619 | struct page *page; | |
620 | unsigned long offset; | |
621 | ||
b92a953c | 622 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 623 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
b92a953c MR |
624 | |
625 | page = virt_to_page(ptr); | |
626 | offset = offset_in_page(ptr); | |
627 | kfree(page_address(page) + offset); | |
628 | } | |
629 | ||
73228c7e | 630 | static void kfree_via_phys(struct kunit *test) |
b92a953c MR |
631 | { |
632 | char *ptr; | |
633 | size_t size = 8; | |
634 | phys_addr_t phys; | |
635 | ||
b92a953c | 636 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 637 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
b92a953c MR |
638 | |
639 | phys = virt_to_phys(ptr); | |
640 | kfree(phys_to_virt(phys)); | |
641 | } | |
642 | ||
73228c7e | 643 | static void kmem_cache_oob(struct kunit *test) |
3f15801c AR |
644 | { |
645 | char *p; | |
646 | size_t size = 200; | |
11516135 AK |
647 | struct kmem_cache *cache; |
648 | ||
649 | cache = kmem_cache_create("test_cache", size, 0, 0, NULL); | |
73228c7e | 650 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); |
11516135 | 651 | |
3f15801c AR |
652 | p = kmem_cache_alloc(cache, GFP_KERNEL); |
653 | if (!p) { | |
73228c7e | 654 | kunit_err(test, "Allocation failed: %s\n", __func__); |
3f15801c AR |
655 | kmem_cache_destroy(cache); |
656 | return; | |
657 | } | |
658 | ||
73228c7e | 659 | KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]); |
11516135 | 660 | |
3f15801c AR |
661 | kmem_cache_free(cache, p); |
662 | kmem_cache_destroy(cache); | |
663 | } | |
664 | ||
11516135 | 665 | static void kmem_cache_accounted(struct kunit *test) |
0386bf38 GT |
666 | { |
667 | int i; | |
668 | char *p; | |
669 | size_t size = 200; | |
670 | struct kmem_cache *cache; | |
671 | ||
672 | cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL); | |
73228c7e | 673 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); |
0386bf38 | 674 | |
0386bf38 GT |
675 | /* |
676 | * Several allocations with a delay to allow for lazy per memcg kmem | |
677 | * cache creation. | |
678 | */ | |
679 | for (i = 0; i < 5; i++) { | |
680 | p = kmem_cache_alloc(cache, GFP_KERNEL); | |
dc2bf000 | 681 | if (!p) |
0386bf38 | 682 | goto free_cache; |
dc2bf000 | 683 | |
0386bf38 GT |
684 | kmem_cache_free(cache, p); |
685 | msleep(100); | |
686 | } | |
687 | ||
688 | free_cache: | |
689 | kmem_cache_destroy(cache); | |
690 | } | |
691 | ||
11516135 AK |
692 | static void kmem_cache_bulk(struct kunit *test) |
693 | { | |
694 | struct kmem_cache *cache; | |
695 | size_t size = 200; | |
696 | char *p[10]; | |
697 | bool ret; | |
698 | int i; | |
699 | ||
700 | cache = kmem_cache_create("test_cache", size, 0, 0, NULL); | |
701 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); | |
702 | ||
703 | ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p); | |
704 | if (!ret) { | |
705 | kunit_err(test, "Allocation failed: %s\n", __func__); | |
706 | kmem_cache_destroy(cache); | |
707 | return; | |
708 | } | |
709 | ||
710 | for (i = 0; i < ARRAY_SIZE(p); i++) | |
711 | p[i][0] = p[i][size - 1] = 42; | |
712 | ||
713 | kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p); | |
714 | kmem_cache_destroy(cache); | |
715 | } | |
716 | ||
3f15801c AR |
717 | static char global_array[10]; |
718 | ||
e5f47287 | 719 | static void kasan_global_oob_right(struct kunit *test) |
3f15801c | 720 | { |
f649dc0e PC |
721 | /* |
722 | * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS | |
53b0fe36 | 723 | * from failing here and panicking the kernel, access the array via a |
f649dc0e PC |
724 | * volatile pointer, which will prevent the compiler from being able to |
725 | * determine the array bounds. | |
726 | * | |
727 | * This access uses a volatile pointer to char (char *volatile) rather | |
728 | * than the more conventional pointer to volatile char (volatile char *) | |
729 | * because we want to prevent the compiler from making inferences about | |
730 | * the pointer itself (i.e. its array bounds), not the data that it | |
731 | * refers to. | |
732 | */ | |
733 | char *volatile array = global_array; | |
734 | char *p = &array[ARRAY_SIZE(global_array) + 3]; | |
3f15801c | 735 | |
58b999d7 | 736 | /* Only generic mode instruments globals. */ |
da17e377 | 737 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); |
58b999d7 | 738 | |
73228c7e | 739 | KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); |
3f15801c AR |
740 | } |
741 | ||
e5f47287 ME |
742 | static void kasan_global_oob_left(struct kunit *test) |
743 | { | |
744 | char *volatile array = global_array; | |
745 | char *p = array - 3; | |
746 | ||
747 | /* | |
748 | * GCC is known to fail this test, skip it. | |
749 | * See https://bugzilla.kernel.org/show_bug.cgi?id=215051. | |
750 | */ | |
751 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG); | |
752 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); | |
753 | KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); | |
754 | } | |
755 | ||
611806b4 | 756 | /* Check that ksize() makes the whole object accessible. */ |
73228c7e | 757 | static void ksize_unpoisons_memory(struct kunit *test) |
96fe805f AP |
758 | { |
759 | char *ptr; | |
48c23239 | 760 | size_t size = 123, real_size; |
96fe805f | 761 | |
96fe805f | 762 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 763 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
96fe805f | 764 | real_size = ksize(ptr); |
0fd37925 | 765 | |
aaf50b19 KC |
766 | OPTIMIZER_HIDE_VAR(ptr); |
767 | ||
0fd37925 | 768 | /* This access shouldn't trigger a KASAN report. */ |
96fe805f | 769 | ptr[size] = 'x'; |
0fd37925 AK |
770 | |
771 | /* This one must. */ | |
8fbad19b | 772 | KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]); |
0fd37925 | 773 | |
96fe805f AP |
774 | kfree(ptr); |
775 | } | |
776 | ||
611806b4 AK |
777 | /* |
778 | * Check that a use-after-free is detected by ksize() and via normal accesses | |
779 | * after it. | |
780 | */ | |
781 | static void ksize_uaf(struct kunit *test) | |
782 | { | |
783 | char *ptr; | |
784 | int size = 128 - KASAN_GRANULE_SIZE; | |
785 | ||
786 | ptr = kmalloc(size, GFP_KERNEL); | |
787 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
788 | kfree(ptr); | |
789 | ||
aaf50b19 | 790 | OPTIMIZER_HIDE_VAR(ptr); |
611806b4 | 791 | KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr)); |
b38fcca3 AK |
792 | KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); |
793 | KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]); | |
611806b4 AK |
794 | } |
795 | ||
73228c7e | 796 | static void kasan_stack_oob(struct kunit *test) |
eae08dca | 797 | { |
73228c7e | 798 | char stack_array[10]; |
2dfd1bd9 | 799 | /* See comment in kasan_global_oob_right. */ |
f649dc0e PC |
800 | char *volatile array = stack_array; |
801 | char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF]; | |
eae08dca | 802 | |
da17e377 | 803 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK); |
eae08dca | 804 | |
73228c7e | 805 | KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); |
eae08dca AR |
806 | } |
807 | ||
73228c7e | 808 | static void kasan_alloca_oob_left(struct kunit *test) |
00a14294 PL |
809 | { |
810 | volatile int i = 10; | |
811 | char alloca_array[i]; | |
2dfd1bd9 | 812 | /* See comment in kasan_global_oob_right. */ |
f649dc0e PC |
813 | char *volatile array = alloca_array; |
814 | char *p = array - 1; | |
00a14294 | 815 | |
58b999d7 | 816 | /* Only generic mode instruments dynamic allocas. */ |
da17e377 AK |
817 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); |
818 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK); | |
73228c7e PA |
819 | |
820 | KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); | |
00a14294 PL |
821 | } |
822 | ||
73228c7e | 823 | static void kasan_alloca_oob_right(struct kunit *test) |
00a14294 PL |
824 | { |
825 | volatile int i = 10; | |
826 | char alloca_array[i]; | |
2dfd1bd9 | 827 | /* See comment in kasan_global_oob_right. */ |
f649dc0e PC |
828 | char *volatile array = alloca_array; |
829 | char *p = array + i; | |
00a14294 | 830 | |
58b999d7 | 831 | /* Only generic mode instruments dynamic allocas. */ |
da17e377 AK |
832 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); |
833 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK); | |
73228c7e PA |
834 | |
835 | KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); | |
00a14294 PL |
836 | } |
837 | ||
73228c7e | 838 | static void kmem_cache_double_free(struct kunit *test) |
b1d57289 DV |
839 | { |
840 | char *p; | |
841 | size_t size = 200; | |
842 | struct kmem_cache *cache; | |
843 | ||
844 | cache = kmem_cache_create("test_cache", size, 0, 0, NULL); | |
73228c7e PA |
845 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); |
846 | ||
b1d57289 DV |
847 | p = kmem_cache_alloc(cache, GFP_KERNEL); |
848 | if (!p) { | |
73228c7e | 849 | kunit_err(test, "Allocation failed: %s\n", __func__); |
b1d57289 DV |
850 | kmem_cache_destroy(cache); |
851 | return; | |
852 | } | |
853 | ||
854 | kmem_cache_free(cache, p); | |
73228c7e | 855 | KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p)); |
b1d57289 DV |
856 | kmem_cache_destroy(cache); |
857 | } | |
858 | ||
73228c7e | 859 | static void kmem_cache_invalid_free(struct kunit *test) |
b1d57289 DV |
860 | { |
861 | char *p; | |
862 | size_t size = 200; | |
863 | struct kmem_cache *cache; | |
864 | ||
865 | cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU, | |
866 | NULL); | |
73228c7e PA |
867 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); |
868 | ||
b1d57289 DV |
869 | p = kmem_cache_alloc(cache, GFP_KERNEL); |
870 | if (!p) { | |
73228c7e | 871 | kunit_err(test, "Allocation failed: %s\n", __func__); |
b1d57289 DV |
872 | kmem_cache_destroy(cache); |
873 | return; | |
874 | } | |
875 | ||
0fd37925 | 876 | /* Trigger invalid free, the object doesn't get freed. */ |
73228c7e | 877 | KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1)); |
91c93ed0 AK |
878 | |
879 | /* | |
880 | * Properly free the object to prevent the "Objects remaining in | |
881 | * test_cache on __kmem_cache_shutdown" BUG failure. | |
882 | */ | |
883 | kmem_cache_free(cache, p); | |
884 | ||
b1d57289 DV |
885 | kmem_cache_destroy(cache); |
886 | } | |
887 | ||
70effdc3 AK |
888 | static void empty_cache_ctor(void *object) { } |
889 | ||
f98f966c ME |
890 | static void kmem_cache_double_destroy(struct kunit *test) |
891 | { | |
892 | struct kmem_cache *cache; | |
893 | ||
70effdc3 AK |
894 | /* Provide a constructor to prevent cache merging. */ |
895 | cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor); | |
f98f966c ME |
896 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); |
897 | kmem_cache_destroy(cache); | |
898 | KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache)); | |
899 | } | |
900 | ||
73228c7e | 901 | static void kasan_memchr(struct kunit *test) |
0c96350a AR |
902 | { |
903 | char *ptr; | |
904 | size_t size = 24; | |
905 | ||
0fd37925 AK |
906 | /* |
907 | * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT. | |
908 | * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details. | |
909 | */ | |
da17e377 | 910 | KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT); |
73228c7e | 911 | |
58b999d7 AK |
912 | if (OOB_TAG_OFF) |
913 | size = round_up(size, OOB_TAG_OFF); | |
914 | ||
73228c7e PA |
915 | ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); |
916 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
917 | ||
09c6304e | 918 | OPTIMIZER_HIDE_VAR(ptr); |
cab71f74 | 919 | OPTIMIZER_HIDE_VAR(size); |
73228c7e PA |
920 | KUNIT_EXPECT_KASAN_FAIL(test, |
921 | kasan_ptr_result = memchr(ptr, '1', size + 1)); | |
0c96350a | 922 | |
0c96350a AR |
923 | kfree(ptr); |
924 | } | |
925 | ||
73228c7e | 926 | static void kasan_memcmp(struct kunit *test) |
0c96350a AR |
927 | { |
928 | char *ptr; | |
929 | size_t size = 24; | |
930 | int arr[9]; | |
931 | ||
0fd37925 AK |
932 | /* |
933 | * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT. | |
934 | * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details. | |
935 | */ | |
da17e377 | 936 | KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT); |
0c96350a | 937 | |
58b999d7 AK |
938 | if (OOB_TAG_OFF) |
939 | size = round_up(size, OOB_TAG_OFF); | |
940 | ||
73228c7e PA |
941 | ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); |
942 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
0c96350a | 943 | memset(arr, 0, sizeof(arr)); |
73228c7e | 944 | |
09c6304e | 945 | OPTIMIZER_HIDE_VAR(ptr); |
cab71f74 | 946 | OPTIMIZER_HIDE_VAR(size); |
73228c7e PA |
947 | KUNIT_EXPECT_KASAN_FAIL(test, |
948 | kasan_int_result = memcmp(ptr, arr, size+1)); | |
0c96350a AR |
949 | kfree(ptr); |
950 | } | |
951 | ||
73228c7e | 952 | static void kasan_strings(struct kunit *test) |
0c96350a AR |
953 | { |
954 | char *ptr; | |
955 | size_t size = 24; | |
956 | ||
0fd37925 AK |
957 | /* |
958 | * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT. | |
959 | * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details. | |
960 | */ | |
da17e377 | 961 | KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT); |
73228c7e PA |
962 | |
963 | ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); | |
964 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
0c96350a AR |
965 | |
966 | kfree(ptr); | |
967 | ||
968 | /* | |
969 | * Try to cause only 1 invalid access (less spam in dmesg). | |
970 | * For that we need ptr to point to zeroed byte. | |
971 | * Skip metadata that could be stored in freed object so ptr | |
972 | * will likely point to zeroed byte. | |
973 | */ | |
974 | ptr += 16; | |
73228c7e | 975 | KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1')); |
0c96350a | 976 | |
73228c7e | 977 | KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1')); |
0c96350a | 978 | |
73228c7e | 979 | KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2")); |
0c96350a | 980 | |
73228c7e | 981 | KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1)); |
0c96350a | 982 | |
73228c7e | 983 | KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr)); |
0c96350a | 984 | |
73228c7e | 985 | KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1)); |
0c96350a AR |
986 | } |
987 | ||
58b999d7 AK |
988 | static void kasan_bitops_modify(struct kunit *test, int nr, void *addr) |
989 | { | |
990 | KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr)); | |
991 | KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr)); | |
992 | KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr)); | |
993 | KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr)); | |
994 | KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr)); | |
995 | KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr)); | |
996 | KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr)); | |
997 | KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr)); | |
998 | } | |
999 | ||
1000 | static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr) | |
1001 | { | |
1002 | KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr)); | |
1003 | KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr)); | |
1004 | KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr)); | |
1005 | KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr)); | |
1006 | KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr)); | |
1007 | KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr)); | |
1008 | KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr)); | |
1009 | KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr)); | |
1010 | ||
1011 | #if defined(clear_bit_unlock_is_negative_byte) | |
1012 | KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = | |
1013 | clear_bit_unlock_is_negative_byte(nr, addr)); | |
1014 | #endif | |
1015 | } | |
1016 | ||
1017 | static void kasan_bitops_generic(struct kunit *test) | |
19a33ca6 | 1018 | { |
58b999d7 AK |
1019 | long *bits; |
1020 | ||
1021 | /* This test is specifically crafted for the generic mode. */ | |
da17e377 | 1022 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); |
58b999d7 | 1023 | |
19a33ca6 | 1024 | /* |
0fd37925 | 1025 | * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes; |
19a33ca6 ME |
1026 | * this way we do not actually corrupt other memory. |
1027 | */ | |
58b999d7 | 1028 | bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL); |
73228c7e | 1029 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits); |
19a33ca6 ME |
1030 | |
1031 | /* | |
1032 | * Below calls try to access bit within allocated memory; however, the | |
1033 | * below accesses are still out-of-bounds, since bitops are defined to | |
1034 | * operate on the whole long the bit is in. | |
1035 | */ | |
58b999d7 | 1036 | kasan_bitops_modify(test, BITS_PER_LONG, bits); |
19a33ca6 ME |
1037 | |
1038 | /* | |
1039 | * Below calls try to access bit beyond allocated memory. | |
1040 | */ | |
58b999d7 | 1041 | kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits); |
19a33ca6 | 1042 | |
58b999d7 AK |
1043 | kfree(bits); |
1044 | } | |
19a33ca6 | 1045 | |
58b999d7 AK |
1046 | static void kasan_bitops_tags(struct kunit *test) |
1047 | { | |
1048 | long *bits; | |
19a33ca6 | 1049 | |
da17e377 AK |
1050 | /* This test is specifically crafted for tag-based modes. */ |
1051 | KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); | |
19a33ca6 | 1052 | |
e66e1799 AK |
1053 | /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */ |
1054 | bits = kzalloc(48, GFP_KERNEL); | |
58b999d7 | 1055 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits); |
19a33ca6 | 1056 | |
e66e1799 AK |
1057 | /* Do the accesses past the 48 allocated bytes, but within the redone. */ |
1058 | kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48); | |
1059 | kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48); | |
19a33ca6 | 1060 | |
19a33ca6 ME |
1061 | kfree(bits); |
1062 | } | |
1063 | ||
73228c7e | 1064 | static void kmalloc_double_kzfree(struct kunit *test) |
bb104ed7 ME |
1065 | { |
1066 | char *ptr; | |
1067 | size_t size = 16; | |
1068 | ||
bb104ed7 | 1069 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 1070 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
bb104ed7 | 1071 | |
453431a5 | 1072 | kfree_sensitive(ptr); |
73228c7e | 1073 | KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr)); |
bb104ed7 ME |
1074 | } |
1075 | ||
1a2473f0 AK |
1076 | static void vmalloc_helpers_tags(struct kunit *test) |
1077 | { | |
1078 | void *ptr; | |
1079 | ||
1080 | /* This test is intended for tag-based modes. */ | |
1081 | KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); | |
1082 | ||
1083 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC); | |
1084 | ||
1085 | ptr = vmalloc(PAGE_SIZE); | |
1086 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
1087 | ||
1088 | /* Check that the returned pointer is tagged. */ | |
1089 | KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); | |
1090 | KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); | |
1091 | ||
1092 | /* Make sure exported vmalloc helpers handle tagged pointers. */ | |
1093 | KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr)); | |
1094 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr)); | |
1095 | ||
1096 | #if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST) | |
1097 | { | |
1098 | int rv; | |
1099 | ||
1100 | /* Make sure vmalloc'ed memory permissions can be changed. */ | |
1101 | rv = set_memory_ro((unsigned long)ptr, 1); | |
1102 | KUNIT_ASSERT_GE(test, rv, 0); | |
1103 | rv = set_memory_rw((unsigned long)ptr, 1); | |
1104 | KUNIT_ASSERT_GE(test, rv, 0); | |
1105 | } | |
1106 | #endif | |
1107 | ||
1108 | vfree(ptr); | |
1109 | } | |
1110 | ||
73228c7e | 1111 | static void vmalloc_oob(struct kunit *test) |
06513916 | 1112 | { |
1a2473f0 AK |
1113 | char *v_ptr, *p_ptr; |
1114 | struct page *page; | |
1115 | size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5; | |
06513916 | 1116 | |
da17e377 | 1117 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC); |
06513916 | 1118 | |
1a2473f0 AK |
1119 | v_ptr = vmalloc(size); |
1120 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr); | |
1121 | ||
1122 | OPTIMIZER_HIDE_VAR(v_ptr); | |
1123 | ||
06513916 | 1124 | /* |
1a2473f0 | 1125 | * We have to be careful not to hit the guard page in vmalloc tests. |
06513916 DA |
1126 | * The MMU will catch that and crash us. |
1127 | */ | |
06513916 | 1128 | |
1a2473f0 AK |
1129 | /* Make sure in-bounds accesses are valid. */ |
1130 | v_ptr[0] = 0; | |
1131 | v_ptr[size - 1] = 0; | |
1132 | ||
1133 | /* | |
1134 | * An unaligned access past the requested vmalloc size. | |
1135 | * Only generic KASAN can precisely detect these. | |
1136 | */ | |
1137 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) | |
1138 | KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]); | |
1139 | ||
1140 | /* An aligned access into the first out-of-bounds granule. */ | |
1141 | KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]); | |
1142 | ||
1143 | /* Check that in-bounds accesses to the physical page are valid. */ | |
1144 | page = vmalloc_to_page(v_ptr); | |
1145 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page); | |
1146 | p_ptr = page_address(page); | |
1147 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr); | |
1148 | p_ptr[0] = 0; | |
1149 | ||
1150 | vfree(v_ptr); | |
1151 | ||
1152 | /* | |
1153 | * We can't check for use-after-unmap bugs in this nor in the following | |
1154 | * vmalloc tests, as the page might be fully unmapped and accessing it | |
1155 | * will crash the kernel. | |
1156 | */ | |
1157 | } | |
1158 | ||
1159 | static void vmap_tags(struct kunit *test) | |
1160 | { | |
1161 | char *p_ptr, *v_ptr; | |
1162 | struct page *p_page, *v_page; | |
1163 | ||
1164 | /* | |
1165 | * This test is specifically crafted for the software tag-based mode, | |
1166 | * the only tag-based mode that poisons vmap mappings. | |
1167 | */ | |
1168 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS); | |
1169 | ||
1170 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC); | |
1171 | ||
1172 | p_page = alloc_pages(GFP_KERNEL, 1); | |
1173 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page); | |
1174 | p_ptr = page_address(p_page); | |
1175 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr); | |
1176 | ||
1177 | v_ptr = vmap(&p_page, 1, VM_MAP, PAGE_KERNEL); | |
1178 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr); | |
1179 | ||
1180 | /* | |
1181 | * We can't check for out-of-bounds bugs in this nor in the following | |
1182 | * vmalloc tests, as allocations have page granularity and accessing | |
1183 | * the guard page will crash the kernel. | |
1184 | */ | |
1185 | ||
1186 | KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN); | |
1187 | KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL); | |
1188 | ||
1189 | /* Make sure that in-bounds accesses through both pointers work. */ | |
1190 | *p_ptr = 0; | |
1191 | *v_ptr = 0; | |
1192 | ||
1193 | /* Make sure vmalloc_to_page() correctly recovers the page pointer. */ | |
1194 | v_page = vmalloc_to_page(v_ptr); | |
1195 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page); | |
1196 | KUNIT_EXPECT_PTR_EQ(test, p_page, v_page); | |
1197 | ||
1198 | vunmap(v_ptr); | |
1199 | free_pages((unsigned long)p_ptr, 1); | |
1200 | } | |
1201 | ||
1202 | static void vm_map_ram_tags(struct kunit *test) | |
1203 | { | |
1204 | char *p_ptr, *v_ptr; | |
1205 | struct page *page; | |
1206 | ||
1207 | /* | |
1208 | * This test is specifically crafted for the software tag-based mode, | |
1209 | * the only tag-based mode that poisons vm_map_ram mappings. | |
1210 | */ | |
1211 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS); | |
1212 | ||
1213 | page = alloc_pages(GFP_KERNEL, 1); | |
1214 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page); | |
1215 | p_ptr = page_address(page); | |
1216 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr); | |
1217 | ||
1218 | v_ptr = vm_map_ram(&page, 1, -1); | |
1219 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr); | |
1220 | ||
1221 | KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN); | |
1222 | KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL); | |
1223 | ||
1224 | /* Make sure that in-bounds accesses through both pointers work. */ | |
1225 | *p_ptr = 0; | |
1226 | *v_ptr = 0; | |
1227 | ||
1228 | vm_unmap_ram(v_ptr, 1); | |
1229 | free_pages((unsigned long)p_ptr, 1); | |
1230 | } | |
1231 | ||
1232 | static void vmalloc_percpu(struct kunit *test) | |
1233 | { | |
1234 | char __percpu *ptr; | |
1235 | int cpu; | |
1236 | ||
1237 | /* | |
1238 | * This test is specifically crafted for the software tag-based mode, | |
1239 | * the only tag-based mode that poisons percpu mappings. | |
1240 | */ | |
1241 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS); | |
1242 | ||
1243 | ptr = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); | |
1244 | ||
1245 | for_each_possible_cpu(cpu) { | |
1246 | char *c_ptr = per_cpu_ptr(ptr, cpu); | |
1247 | ||
1248 | KUNIT_EXPECT_GE(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_MIN); | |
1249 | KUNIT_EXPECT_LT(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_KERNEL); | |
1250 | ||
1251 | /* Make sure that in-bounds accesses don't crash the kernel. */ | |
1252 | *c_ptr = 0; | |
1253 | } | |
1254 | ||
1255 | free_percpu(ptr); | |
06513916 | 1256 | } |
387d6e46 | 1257 | |
573a4809 AK |
1258 | /* |
1259 | * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN, | |
1260 | * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based | |
1261 | * modes. | |
1262 | */ | |
1263 | static void match_all_not_assigned(struct kunit *test) | |
1264 | { | |
1265 | char *ptr; | |
1266 | struct page *pages; | |
1267 | int i, size, order; | |
1268 | ||
1269 | KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); | |
1270 | ||
1271 | for (i = 0; i < 256; i++) { | |
1272 | size = (get_random_int() % 1024) + 1; | |
1273 | ptr = kmalloc(size, GFP_KERNEL); | |
1274 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
1275 | KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); | |
1276 | KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); | |
1277 | kfree(ptr); | |
1278 | } | |
1279 | ||
1280 | for (i = 0; i < 256; i++) { | |
1281 | order = (get_random_int() % 4) + 1; | |
1282 | pages = alloc_pages(GFP_KERNEL, order); | |
1283 | ptr = page_address(pages); | |
1284 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
1285 | KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); | |
1286 | KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); | |
1287 | free_pages((unsigned long)ptr, order); | |
1288 | } | |
1a2473f0 AK |
1289 | |
1290 | if (!IS_ENABLED(CONFIG_KASAN_VMALLOC)) | |
1291 | return; | |
1292 | ||
1293 | for (i = 0; i < 256; i++) { | |
1294 | size = (get_random_int() % 1024) + 1; | |
1295 | ptr = vmalloc(size); | |
1296 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
1297 | KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); | |
1298 | KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); | |
1299 | vfree(ptr); | |
1300 | } | |
573a4809 AK |
1301 | } |
1302 | ||
1303 | /* Check that 0xff works as a match-all pointer tag for tag-based modes. */ | |
1304 | static void match_all_ptr_tag(struct kunit *test) | |
1305 | { | |
1306 | char *ptr; | |
1307 | u8 tag; | |
1308 | ||
1309 | KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); | |
1310 | ||
1311 | ptr = kmalloc(128, GFP_KERNEL); | |
1312 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
1313 | ||
1314 | /* Backup the assigned tag. */ | |
1315 | tag = get_tag(ptr); | |
1316 | KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL); | |
1317 | ||
1318 | /* Reset the tag to 0xff.*/ | |
1319 | ptr = set_tag(ptr, KASAN_TAG_KERNEL); | |
1320 | ||
1321 | /* This access shouldn't trigger a KASAN report. */ | |
1322 | *ptr = 0; | |
1323 | ||
1324 | /* Recover the pointer tag and free. */ | |
1325 | ptr = set_tag(ptr, tag); | |
1326 | kfree(ptr); | |
1327 | } | |
1328 | ||
1329 | /* Check that there are no match-all memory tags for tag-based modes. */ | |
1330 | static void match_all_mem_tag(struct kunit *test) | |
1331 | { | |
1332 | char *ptr; | |
1333 | int tag; | |
1334 | ||
1335 | KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); | |
1336 | ||
1337 | ptr = kmalloc(128, GFP_KERNEL); | |
1338 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
1339 | KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); | |
1340 | ||
1341 | /* For each possible tag value not matching the pointer tag. */ | |
1342 | for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) { | |
1343 | if (tag == get_tag(ptr)) | |
1344 | continue; | |
1345 | ||
1346 | /* Mark the first memory granule with the chosen memory tag. */ | |
aa5c219c | 1347 | kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false); |
573a4809 AK |
1348 | |
1349 | /* This access must cause a KASAN report. */ | |
1350 | KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0); | |
1351 | } | |
1352 | ||
1353 | /* Recover the memory tag and free. */ | |
aa5c219c | 1354 | kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false); |
573a4809 AK |
1355 | kfree(ptr); |
1356 | } | |
1357 | ||
73228c7e PA |
1358 | static struct kunit_case kasan_kunit_test_cases[] = { |
1359 | KUNIT_CASE(kmalloc_oob_right), | |
1360 | KUNIT_CASE(kmalloc_oob_left), | |
1361 | KUNIT_CASE(kmalloc_node_oob_right), | |
1362 | KUNIT_CASE(kmalloc_pagealloc_oob_right), | |
1363 | KUNIT_CASE(kmalloc_pagealloc_uaf), | |
1364 | KUNIT_CASE(kmalloc_pagealloc_invalid_free), | |
858bdeb0 AK |
1365 | KUNIT_CASE(pagealloc_oob_right), |
1366 | KUNIT_CASE(pagealloc_uaf), | |
73228c7e | 1367 | KUNIT_CASE(kmalloc_large_oob_right), |
b87c28b9 AK |
1368 | KUNIT_CASE(krealloc_more_oob), |
1369 | KUNIT_CASE(krealloc_less_oob), | |
1370 | KUNIT_CASE(krealloc_pagealloc_more_oob), | |
1371 | KUNIT_CASE(krealloc_pagealloc_less_oob), | |
26a5ca7a | 1372 | KUNIT_CASE(krealloc_uaf), |
73228c7e | 1373 | KUNIT_CASE(kmalloc_oob_16), |
58b999d7 | 1374 | KUNIT_CASE(kmalloc_uaf_16), |
73228c7e PA |
1375 | KUNIT_CASE(kmalloc_oob_in_memset), |
1376 | KUNIT_CASE(kmalloc_oob_memset_2), | |
1377 | KUNIT_CASE(kmalloc_oob_memset_4), | |
1378 | KUNIT_CASE(kmalloc_oob_memset_8), | |
1379 | KUNIT_CASE(kmalloc_oob_memset_16), | |
758cabae | 1380 | KUNIT_CASE(kmalloc_memmove_negative_size), |
73228c7e PA |
1381 | KUNIT_CASE(kmalloc_memmove_invalid_size), |
1382 | KUNIT_CASE(kmalloc_uaf), | |
1383 | KUNIT_CASE(kmalloc_uaf_memset), | |
1384 | KUNIT_CASE(kmalloc_uaf2), | |
1385 | KUNIT_CASE(kfree_via_page), | |
1386 | KUNIT_CASE(kfree_via_phys), | |
1387 | KUNIT_CASE(kmem_cache_oob), | |
11516135 AK |
1388 | KUNIT_CASE(kmem_cache_accounted), |
1389 | KUNIT_CASE(kmem_cache_bulk), | |
e5f47287 ME |
1390 | KUNIT_CASE(kasan_global_oob_right), |
1391 | KUNIT_CASE(kasan_global_oob_left), | |
73228c7e PA |
1392 | KUNIT_CASE(kasan_stack_oob), |
1393 | KUNIT_CASE(kasan_alloca_oob_left), | |
1394 | KUNIT_CASE(kasan_alloca_oob_right), | |
1395 | KUNIT_CASE(ksize_unpoisons_memory), | |
611806b4 | 1396 | KUNIT_CASE(ksize_uaf), |
73228c7e PA |
1397 | KUNIT_CASE(kmem_cache_double_free), |
1398 | KUNIT_CASE(kmem_cache_invalid_free), | |
f98f966c | 1399 | KUNIT_CASE(kmem_cache_double_destroy), |
73228c7e PA |
1400 | KUNIT_CASE(kasan_memchr), |
1401 | KUNIT_CASE(kasan_memcmp), | |
1402 | KUNIT_CASE(kasan_strings), | |
58b999d7 AK |
1403 | KUNIT_CASE(kasan_bitops_generic), |
1404 | KUNIT_CASE(kasan_bitops_tags), | |
73228c7e | 1405 | KUNIT_CASE(kmalloc_double_kzfree), |
1a2473f0 | 1406 | KUNIT_CASE(vmalloc_helpers_tags), |
73228c7e | 1407 | KUNIT_CASE(vmalloc_oob), |
1a2473f0 AK |
1408 | KUNIT_CASE(vmap_tags), |
1409 | KUNIT_CASE(vm_map_ram_tags), | |
1410 | KUNIT_CASE(vmalloc_percpu), | |
573a4809 AK |
1411 | KUNIT_CASE(match_all_not_assigned), |
1412 | KUNIT_CASE(match_all_ptr_tag), | |
1413 | KUNIT_CASE(match_all_mem_tag), | |
73228c7e PA |
1414 | {} |
1415 | }; | |
1416 | ||
1417 | static struct kunit_suite kasan_kunit_test_suite = { | |
1418 | .name = "kasan", | |
1419 | .init = kasan_test_init, | |
1420 | .test_cases = kasan_kunit_test_cases, | |
1421 | .exit = kasan_test_exit, | |
1422 | }; | |
1423 | ||
1424 | kunit_test_suite(kasan_kunit_test_suite); | |
3f15801c | 1425 | |
3f15801c | 1426 | MODULE_LICENSE("GPL"); |