Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
3f15801c AR |
2 | /* |
3 | * | |
4 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. | |
5 | * Author: Andrey Ryabinin <a.ryabinin@samsung.com> | |
3f15801c AR |
6 | */ |
7 | ||
19a33ca6 | 8 | #include <linux/bitops.h> |
0386bf38 | 9 | #include <linux/delay.h> |
19a33ca6 | 10 | #include <linux/kasan.h> |
3f15801c | 11 | #include <linux/kernel.h> |
eae08dca | 12 | #include <linux/mm.h> |
19a33ca6 ME |
13 | #include <linux/mman.h> |
14 | #include <linux/module.h> | |
3f15801c | 15 | #include <linux/printk.h> |
573a4809 | 16 | #include <linux/random.h> |
3f15801c AR |
17 | #include <linux/slab.h> |
18 | #include <linux/string.h> | |
eae08dca | 19 | #include <linux/uaccess.h> |
b92a953c | 20 | #include <linux/io.h> |
06513916 | 21 | #include <linux/vmalloc.h> |
b92a953c MR |
22 | |
23 | #include <asm/page.h> | |
3f15801c | 24 | |
83c4e7a0 PA |
25 | #include <kunit/test.h> |
26 | ||
f33a0149 WW |
27 | #include "../mm/kasan/kasan.h" |
28 | ||
1f600626 | 29 | #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE) |
f33a0149 | 30 | |
adb72ae1 | 31 | /* |
0fd37925 AK |
32 | * Some tests use these global variables to store return values from function |
33 | * calls that could otherwise be eliminated by the compiler as dead code. | |
adb72ae1 | 34 | */ |
adb72ae1 | 35 | void *kasan_ptr_result; |
83c4e7a0 PA |
36 | int kasan_int_result; |
37 | ||
38 | static struct kunit_resource resource; | |
39 | static struct kunit_kasan_expectation fail_data; | |
40 | static bool multishot; | |
41 | ||
0fd37925 AK |
42 | /* |
43 | * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the | |
f05842cf AK |
44 | * first detected bug and panic the kernel if panic_on_warn is enabled. For |
45 | * hardware tag-based KASAN also allow tag checking to be reenabled for each | |
46 | * test, see the comment for KUNIT_EXPECT_KASAN_FAIL(). | |
0fd37925 | 47 | */ |
83c4e7a0 PA |
48 | static int kasan_test_init(struct kunit *test) |
49 | { | |
d82dc3a4 AK |
50 | if (!kasan_enabled()) { |
51 | kunit_err(test, "can't run KASAN tests with KASAN disabled"); | |
52 | return -1; | |
53 | } | |
54 | ||
83c4e7a0 | 55 | multishot = kasan_save_enable_multi_shot(); |
f05842cf | 56 | kasan_set_tagging_report_once(false); |
83c4e7a0 PA |
57 | return 0; |
58 | } | |
59 | ||
60 | static void kasan_test_exit(struct kunit *test) | |
61 | { | |
f05842cf | 62 | kasan_set_tagging_report_once(true); |
83c4e7a0 PA |
63 | kasan_restore_multi_shot(multishot); |
64 | } | |
65 | ||
66 | /** | |
0fd37925 AK |
67 | * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a |
68 | * KASAN report; causes a test failure otherwise. This relies on a KUnit | |
69 | * resource named "kasan_data". Do not use this name for KUnit resources | |
70 | * outside of KASAN tests. | |
f05842cf AK |
71 | * |
72 | * For hardware tag-based KASAN, when a tag fault happens, tag checking is | |
73 | * normally auto-disabled. When this happens, this test handler reenables | |
74 | * tag checking. As tag checking can be only disabled or enabled per CPU, this | |
75 | * handler disables migration (preemption). | |
2e4bde6a AK |
76 | * |
77 | * Since the compiler doesn't see that the expression can change the fail_data | |
78 | * fields, it can reorder or optimize away the accesses to those fields. | |
79 | * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the | |
80 | * expression to prevent that. | |
83c4e7a0 | 81 | */ |
f05842cf AK |
82 | #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \ |
83 | if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \ | |
84 | migrate_disable(); \ | |
2e4bde6a AK |
85 | WRITE_ONCE(fail_data.report_expected, true); \ |
86 | WRITE_ONCE(fail_data.report_found, false); \ | |
f05842cf AK |
87 | kunit_add_named_resource(test, \ |
88 | NULL, \ | |
89 | NULL, \ | |
90 | &resource, \ | |
91 | "kasan_data", &fail_data); \ | |
2e4bde6a | 92 | barrier(); \ |
f05842cf | 93 | expression; \ |
2e4bde6a | 94 | barrier(); \ |
f05842cf | 95 | KUNIT_EXPECT_EQ(test, \ |
2e4bde6a AK |
96 | READ_ONCE(fail_data.report_expected), \ |
97 | READ_ONCE(fail_data.report_found)); \ | |
f05842cf | 98 | if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \ |
2e4bde6a | 99 | if (READ_ONCE(fail_data.report_found)) \ |
f05842cf AK |
100 | kasan_enable_tagging(); \ |
101 | migrate_enable(); \ | |
102 | } \ | |
83c4e7a0 PA |
103 | } while (0) |
104 | ||
da17e377 AK |
105 | #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \ |
106 | if (!IS_ENABLED(config)) { \ | |
107 | kunit_info((test), "skipping, " #config " required"); \ | |
108 | return; \ | |
109 | } \ | |
110 | } while (0) | |
111 | ||
112 | #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \ | |
113 | if (IS_ENABLED(config)) { \ | |
114 | kunit_info((test), "skipping, " #config " enabled"); \ | |
115 | return; \ | |
116 | } \ | |
117 | } while (0) | |
118 | ||
73228c7e | 119 | static void kmalloc_oob_right(struct kunit *test) |
3f15801c AR |
120 | { |
121 | char *ptr; | |
122 | size_t size = 123; | |
123 | ||
3f15801c | 124 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 125 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
f33a0149 | 126 | |
73228c7e | 127 | KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 'x'); |
3f15801c AR |
128 | kfree(ptr); |
129 | } | |
130 | ||
73228c7e | 131 | static void kmalloc_oob_left(struct kunit *test) |
3f15801c AR |
132 | { |
133 | char *ptr; | |
134 | size_t size = 15; | |
135 | ||
3f15801c | 136 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 137 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
3f15801c | 138 | |
73228c7e | 139 | KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1)); |
3f15801c AR |
140 | kfree(ptr); |
141 | } | |
142 | ||
73228c7e | 143 | static void kmalloc_node_oob_right(struct kunit *test) |
3f15801c AR |
144 | { |
145 | char *ptr; | |
146 | size_t size = 4096; | |
147 | ||
3f15801c | 148 | ptr = kmalloc_node(size, GFP_KERNEL, 0); |
73228c7e | 149 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
3f15801c | 150 | |
73228c7e | 151 | KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0); |
3f15801c AR |
152 | kfree(ptr); |
153 | } | |
154 | ||
858bdeb0 AK |
155 | /* |
156 | * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't | |
157 | * fit into a slab cache and therefore is allocated via the page allocator | |
158 | * fallback. Since this kind of fallback is only implemented for SLUB, these | |
159 | * tests are limited to that allocator. | |
160 | */ | |
73228c7e | 161 | static void kmalloc_pagealloc_oob_right(struct kunit *test) |
3f15801c AR |
162 | { |
163 | char *ptr; | |
164 | size_t size = KMALLOC_MAX_CACHE_SIZE + 10; | |
165 | ||
da17e377 | 166 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); |
73228c7e | 167 | |
e6e8379c | 168 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 169 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
f33a0149 | 170 | |
73228c7e | 171 | KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0); |
858bdeb0 | 172 | |
e6e8379c AP |
173 | kfree(ptr); |
174 | } | |
47adccce | 175 | |
73228c7e | 176 | static void kmalloc_pagealloc_uaf(struct kunit *test) |
47adccce DV |
177 | { |
178 | char *ptr; | |
179 | size_t size = KMALLOC_MAX_CACHE_SIZE + 10; | |
180 | ||
da17e377 | 181 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); |
47adccce | 182 | |
73228c7e PA |
183 | ptr = kmalloc(size, GFP_KERNEL); |
184 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
47adccce | 185 | kfree(ptr); |
858bdeb0 | 186 | |
73228c7e | 187 | KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0); |
47adccce DV |
188 | } |
189 | ||
73228c7e | 190 | static void kmalloc_pagealloc_invalid_free(struct kunit *test) |
47adccce DV |
191 | { |
192 | char *ptr; | |
193 | size_t size = KMALLOC_MAX_CACHE_SIZE + 10; | |
194 | ||
da17e377 | 195 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); |
47adccce | 196 | |
73228c7e PA |
197 | ptr = kmalloc(size, GFP_KERNEL); |
198 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
199 | ||
200 | KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1)); | |
47adccce | 201 | } |
e6e8379c | 202 | |
858bdeb0 AK |
203 | static void pagealloc_oob_right(struct kunit *test) |
204 | { | |
205 | char *ptr; | |
206 | struct page *pages; | |
207 | size_t order = 4; | |
208 | size_t size = (1UL << (PAGE_SHIFT + order)); | |
209 | ||
210 | /* | |
211 | * With generic KASAN page allocations have no redzones, thus | |
212 | * out-of-bounds detection is not guaranteed. | |
213 | * See https://bugzilla.kernel.org/show_bug.cgi?id=210503. | |
214 | */ | |
215 | KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); | |
216 | ||
217 | pages = alloc_pages(GFP_KERNEL, order); | |
218 | ptr = page_address(pages); | |
219 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
220 | ||
221 | KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0); | |
222 | free_pages((unsigned long)ptr, order); | |
223 | } | |
224 | ||
225 | static void pagealloc_uaf(struct kunit *test) | |
226 | { | |
227 | char *ptr; | |
228 | struct page *pages; | |
229 | size_t order = 4; | |
230 | ||
231 | pages = alloc_pages(GFP_KERNEL, order); | |
232 | ptr = page_address(pages); | |
233 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
234 | free_pages((unsigned long)ptr, order); | |
235 | ||
236 | KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0); | |
237 | } | |
238 | ||
73228c7e | 239 | static void kmalloc_large_oob_right(struct kunit *test) |
e6e8379c AP |
240 | { |
241 | char *ptr; | |
242 | size_t size = KMALLOC_MAX_CACHE_SIZE - 256; | |
0fd37925 AK |
243 | |
244 | /* | |
245 | * Allocate a chunk that is large enough, but still fits into a slab | |
e6e8379c AP |
246 | * and does not trigger the page allocator fallback in SLUB. |
247 | */ | |
3f15801c | 248 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 249 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
3f15801c | 250 | |
73228c7e | 251 | KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0); |
3f15801c AR |
252 | kfree(ptr); |
253 | } | |
254 | ||
b87c28b9 AK |
255 | static void krealloc_more_oob_helper(struct kunit *test, |
256 | size_t size1, size_t size2) | |
3f15801c AR |
257 | { |
258 | char *ptr1, *ptr2; | |
b87c28b9 AK |
259 | size_t middle; |
260 | ||
261 | KUNIT_ASSERT_LT(test, size1, size2); | |
262 | middle = size1 + (size2 - size1) / 2; | |
3f15801c | 263 | |
3f15801c | 264 | ptr1 = kmalloc(size1, GFP_KERNEL); |
73228c7e | 265 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); |
3f15801c | 266 | |
73228c7e PA |
267 | ptr2 = krealloc(ptr1, size2, GFP_KERNEL); |
268 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); | |
f33a0149 | 269 | |
b87c28b9 AK |
270 | /* All offsets up to size2 must be accessible. */ |
271 | ptr2[size1 - 1] = 'x'; | |
272 | ptr2[size1] = 'x'; | |
273 | ptr2[middle] = 'x'; | |
274 | ptr2[size2 - 1] = 'x'; | |
275 | ||
276 | /* Generic mode is precise, so unaligned size2 must be inaccessible. */ | |
277 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) | |
278 | KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x'); | |
279 | ||
280 | /* For all modes first aligned offset after size2 must be inaccessible. */ | |
281 | KUNIT_EXPECT_KASAN_FAIL(test, | |
282 | ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x'); | |
283 | ||
3f15801c AR |
284 | kfree(ptr2); |
285 | } | |
286 | ||
b87c28b9 AK |
287 | static void krealloc_less_oob_helper(struct kunit *test, |
288 | size_t size1, size_t size2) | |
3f15801c AR |
289 | { |
290 | char *ptr1, *ptr2; | |
b87c28b9 AK |
291 | size_t middle; |
292 | ||
293 | KUNIT_ASSERT_LT(test, size2, size1); | |
294 | middle = size2 + (size1 - size2) / 2; | |
3f15801c | 295 | |
3f15801c | 296 | ptr1 = kmalloc(size1, GFP_KERNEL); |
73228c7e | 297 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); |
f33a0149 | 298 | |
73228c7e PA |
299 | ptr2 = krealloc(ptr1, size2, GFP_KERNEL); |
300 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); | |
f33a0149 | 301 | |
b87c28b9 AK |
302 | /* Must be accessible for all modes. */ |
303 | ptr2[size2 - 1] = 'x'; | |
304 | ||
305 | /* Generic mode is precise, so unaligned size2 must be inaccessible. */ | |
306 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) | |
307 | KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x'); | |
308 | ||
309 | /* For all modes first aligned offset after size2 must be inaccessible. */ | |
310 | KUNIT_EXPECT_KASAN_FAIL(test, | |
311 | ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x'); | |
312 | ||
313 | /* | |
314 | * For all modes all size2, middle, and size1 should land in separate | |
315 | * granules and thus the latter two offsets should be inaccessible. | |
316 | */ | |
317 | KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE), | |
318 | round_down(middle, KASAN_GRANULE_SIZE)); | |
319 | KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE), | |
320 | round_down(size1, KASAN_GRANULE_SIZE)); | |
321 | KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x'); | |
322 | KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x'); | |
323 | KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x'); | |
324 | ||
3f15801c AR |
325 | kfree(ptr2); |
326 | } | |
327 | ||
b87c28b9 AK |
328 | static void krealloc_more_oob(struct kunit *test) |
329 | { | |
330 | krealloc_more_oob_helper(test, 201, 235); | |
331 | } | |
332 | ||
333 | static void krealloc_less_oob(struct kunit *test) | |
334 | { | |
335 | krealloc_less_oob_helper(test, 235, 201); | |
336 | } | |
337 | ||
338 | static void krealloc_pagealloc_more_oob(struct kunit *test) | |
339 | { | |
340 | /* page_alloc fallback in only implemented for SLUB. */ | |
341 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); | |
342 | ||
343 | krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201, | |
344 | KMALLOC_MAX_CACHE_SIZE + 235); | |
345 | } | |
346 | ||
347 | static void krealloc_pagealloc_less_oob(struct kunit *test) | |
348 | { | |
349 | /* page_alloc fallback in only implemented for SLUB. */ | |
350 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); | |
351 | ||
352 | krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235, | |
353 | KMALLOC_MAX_CACHE_SIZE + 201); | |
354 | } | |
355 | ||
26a5ca7a AK |
356 | /* |
357 | * Check that krealloc() detects a use-after-free, returns NULL, | |
358 | * and doesn't unpoison the freed object. | |
359 | */ | |
360 | static void krealloc_uaf(struct kunit *test) | |
361 | { | |
362 | char *ptr1, *ptr2; | |
363 | int size1 = 201; | |
364 | int size2 = 235; | |
365 | ||
366 | ptr1 = kmalloc(size1, GFP_KERNEL); | |
367 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); | |
368 | kfree(ptr1); | |
369 | ||
370 | KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL)); | |
371 | KUNIT_ASSERT_PTR_EQ(test, (void *)ptr2, NULL); | |
372 | KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1); | |
373 | } | |
374 | ||
73228c7e | 375 | static void kmalloc_oob_16(struct kunit *test) |
3f15801c AR |
376 | { |
377 | struct { | |
378 | u64 words[2]; | |
379 | } *ptr1, *ptr2; | |
380 | ||
58b999d7 | 381 | /* This test is specifically crafted for the generic mode. */ |
da17e377 | 382 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); |
58b999d7 | 383 | |
3f15801c | 384 | ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL); |
73228c7e PA |
385 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); |
386 | ||
3f15801c | 387 | ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL); |
73228c7e PA |
388 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); |
389 | ||
390 | KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2); | |
3f15801c AR |
391 | kfree(ptr1); |
392 | kfree(ptr2); | |
393 | } | |
394 | ||
58b999d7 AK |
395 | static void kmalloc_uaf_16(struct kunit *test) |
396 | { | |
397 | struct { | |
398 | u64 words[2]; | |
399 | } *ptr1, *ptr2; | |
400 | ||
401 | ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL); | |
402 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); | |
403 | ||
404 | ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL); | |
405 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); | |
406 | kfree(ptr2); | |
407 | ||
408 | KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2); | |
409 | kfree(ptr1); | |
410 | } | |
411 | ||
73228c7e | 412 | static void kmalloc_oob_memset_2(struct kunit *test) |
f523e737 WL |
413 | { |
414 | char *ptr; | |
415 | size_t size = 8; | |
416 | ||
f523e737 | 417 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 418 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
f33a0149 | 419 | |
73228c7e | 420 | KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 7 + OOB_TAG_OFF, 0, 2)); |
f523e737 WL |
421 | kfree(ptr); |
422 | } | |
423 | ||
73228c7e | 424 | static void kmalloc_oob_memset_4(struct kunit *test) |
f523e737 WL |
425 | { |
426 | char *ptr; | |
427 | size_t size = 8; | |
428 | ||
f523e737 | 429 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 430 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
f33a0149 | 431 | |
73228c7e | 432 | KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 5 + OOB_TAG_OFF, 0, 4)); |
f523e737 WL |
433 | kfree(ptr); |
434 | } | |
435 | ||
436 | ||
73228c7e | 437 | static void kmalloc_oob_memset_8(struct kunit *test) |
f523e737 WL |
438 | { |
439 | char *ptr; | |
440 | size_t size = 8; | |
441 | ||
f523e737 | 442 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 443 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
f33a0149 | 444 | |
73228c7e | 445 | KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 8)); |
f523e737 WL |
446 | kfree(ptr); |
447 | } | |
448 | ||
73228c7e | 449 | static void kmalloc_oob_memset_16(struct kunit *test) |
f523e737 WL |
450 | { |
451 | char *ptr; | |
452 | size_t size = 16; | |
453 | ||
f523e737 | 454 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 455 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
f33a0149 | 456 | |
73228c7e | 457 | KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 16)); |
f523e737 WL |
458 | kfree(ptr); |
459 | } | |
460 | ||
73228c7e | 461 | static void kmalloc_oob_in_memset(struct kunit *test) |
3f15801c AR |
462 | { |
463 | char *ptr; | |
464 | size_t size = 666; | |
465 | ||
3f15801c | 466 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 467 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
f33a0149 | 468 | |
73228c7e | 469 | KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + 5 + OOB_TAG_OFF)); |
3f15801c AR |
470 | kfree(ptr); |
471 | } | |
472 | ||
73228c7e | 473 | static void kmalloc_memmove_invalid_size(struct kunit *test) |
98f3b56f WW |
474 | { |
475 | char *ptr; | |
476 | size_t size = 64; | |
477 | volatile size_t invalid_size = -2; | |
478 | ||
98f3b56f | 479 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 480 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
98f3b56f WW |
481 | |
482 | memset((char *)ptr, 0, 64); | |
73228c7e PA |
483 | |
484 | KUNIT_EXPECT_KASAN_FAIL(test, | |
485 | memmove((char *)ptr, (char *)ptr + 4, invalid_size)); | |
98f3b56f WW |
486 | kfree(ptr); |
487 | } | |
488 | ||
73228c7e | 489 | static void kmalloc_uaf(struct kunit *test) |
3f15801c AR |
490 | { |
491 | char *ptr; | |
492 | size_t size = 10; | |
493 | ||
3f15801c | 494 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 495 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
3f15801c AR |
496 | |
497 | kfree(ptr); | |
73228c7e | 498 | KUNIT_EXPECT_KASAN_FAIL(test, *(ptr + 8) = 'x'); |
3f15801c AR |
499 | } |
500 | ||
73228c7e | 501 | static void kmalloc_uaf_memset(struct kunit *test) |
3f15801c AR |
502 | { |
503 | char *ptr; | |
504 | size_t size = 33; | |
505 | ||
3f15801c | 506 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 507 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
3f15801c AR |
508 | |
509 | kfree(ptr); | |
73228c7e | 510 | KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size)); |
3f15801c AR |
511 | } |
512 | ||
73228c7e | 513 | static void kmalloc_uaf2(struct kunit *test) |
3f15801c AR |
514 | { |
515 | char *ptr1, *ptr2; | |
516 | size_t size = 43; | |
1b1df4c4 | 517 | int counter = 0; |
3f15801c | 518 | |
1b1df4c4 | 519 | again: |
3f15801c | 520 | ptr1 = kmalloc(size, GFP_KERNEL); |
73228c7e | 521 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); |
3f15801c AR |
522 | |
523 | kfree(ptr1); | |
73228c7e | 524 | |
3f15801c | 525 | ptr2 = kmalloc(size, GFP_KERNEL); |
73228c7e PA |
526 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); |
527 | ||
1b1df4c4 AK |
528 | /* |
529 | * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same. | |
530 | * Allow up to 16 attempts at generating different tags. | |
531 | */ | |
532 | if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) { | |
533 | kfree(ptr2); | |
534 | goto again; | |
535 | } | |
536 | ||
73228c7e PA |
537 | KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x'); |
538 | KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2); | |
3f15801c | 539 | |
3f15801c AR |
540 | kfree(ptr2); |
541 | } | |
542 | ||
73228c7e | 543 | static void kfree_via_page(struct kunit *test) |
b92a953c MR |
544 | { |
545 | char *ptr; | |
546 | size_t size = 8; | |
547 | struct page *page; | |
548 | unsigned long offset; | |
549 | ||
b92a953c | 550 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 551 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
b92a953c MR |
552 | |
553 | page = virt_to_page(ptr); | |
554 | offset = offset_in_page(ptr); | |
555 | kfree(page_address(page) + offset); | |
556 | } | |
557 | ||
73228c7e | 558 | static void kfree_via_phys(struct kunit *test) |
b92a953c MR |
559 | { |
560 | char *ptr; | |
561 | size_t size = 8; | |
562 | phys_addr_t phys; | |
563 | ||
b92a953c | 564 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 565 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
b92a953c MR |
566 | |
567 | phys = virt_to_phys(ptr); | |
568 | kfree(phys_to_virt(phys)); | |
569 | } | |
570 | ||
73228c7e | 571 | static void kmem_cache_oob(struct kunit *test) |
3f15801c AR |
572 | { |
573 | char *p; | |
574 | size_t size = 200; | |
11516135 AK |
575 | struct kmem_cache *cache; |
576 | ||
577 | cache = kmem_cache_create("test_cache", size, 0, 0, NULL); | |
73228c7e | 578 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); |
11516135 | 579 | |
3f15801c AR |
580 | p = kmem_cache_alloc(cache, GFP_KERNEL); |
581 | if (!p) { | |
73228c7e | 582 | kunit_err(test, "Allocation failed: %s\n", __func__); |
3f15801c AR |
583 | kmem_cache_destroy(cache); |
584 | return; | |
585 | } | |
586 | ||
73228c7e | 587 | KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]); |
11516135 | 588 | |
3f15801c AR |
589 | kmem_cache_free(cache, p); |
590 | kmem_cache_destroy(cache); | |
591 | } | |
592 | ||
11516135 | 593 | static void kmem_cache_accounted(struct kunit *test) |
0386bf38 GT |
594 | { |
595 | int i; | |
596 | char *p; | |
597 | size_t size = 200; | |
598 | struct kmem_cache *cache; | |
599 | ||
600 | cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL); | |
73228c7e | 601 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); |
0386bf38 | 602 | |
0386bf38 GT |
603 | /* |
604 | * Several allocations with a delay to allow for lazy per memcg kmem | |
605 | * cache creation. | |
606 | */ | |
607 | for (i = 0; i < 5; i++) { | |
608 | p = kmem_cache_alloc(cache, GFP_KERNEL); | |
dc2bf000 | 609 | if (!p) |
0386bf38 | 610 | goto free_cache; |
dc2bf000 | 611 | |
0386bf38 GT |
612 | kmem_cache_free(cache, p); |
613 | msleep(100); | |
614 | } | |
615 | ||
616 | free_cache: | |
617 | kmem_cache_destroy(cache); | |
618 | } | |
619 | ||
11516135 AK |
620 | static void kmem_cache_bulk(struct kunit *test) |
621 | { | |
622 | struct kmem_cache *cache; | |
623 | size_t size = 200; | |
624 | char *p[10]; | |
625 | bool ret; | |
626 | int i; | |
627 | ||
628 | cache = kmem_cache_create("test_cache", size, 0, 0, NULL); | |
629 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); | |
630 | ||
631 | ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p); | |
632 | if (!ret) { | |
633 | kunit_err(test, "Allocation failed: %s\n", __func__); | |
634 | kmem_cache_destroy(cache); | |
635 | return; | |
636 | } | |
637 | ||
638 | for (i = 0; i < ARRAY_SIZE(p); i++) | |
639 | p[i][0] = p[i][size - 1] = 42; | |
640 | ||
641 | kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p); | |
642 | kmem_cache_destroy(cache); | |
643 | } | |
644 | ||
3f15801c AR |
645 | static char global_array[10]; |
646 | ||
73228c7e | 647 | static void kasan_global_oob(struct kunit *test) |
3f15801c AR |
648 | { |
649 | volatile int i = 3; | |
650 | char *p = &global_array[ARRAY_SIZE(global_array) + i]; | |
651 | ||
58b999d7 | 652 | /* Only generic mode instruments globals. */ |
da17e377 | 653 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); |
58b999d7 | 654 | |
73228c7e | 655 | KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); |
3f15801c AR |
656 | } |
657 | ||
611806b4 | 658 | /* Check that ksize() makes the whole object accessible. */ |
73228c7e | 659 | static void ksize_unpoisons_memory(struct kunit *test) |
96fe805f AP |
660 | { |
661 | char *ptr; | |
48c23239 | 662 | size_t size = 123, real_size; |
96fe805f | 663 | |
96fe805f | 664 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 665 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
96fe805f | 666 | real_size = ksize(ptr); |
0fd37925 AK |
667 | |
668 | /* This access shouldn't trigger a KASAN report. */ | |
96fe805f | 669 | ptr[size] = 'x'; |
0fd37925 AK |
670 | |
671 | /* This one must. */ | |
73228c7e | 672 | KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y'); |
0fd37925 | 673 | |
96fe805f AP |
674 | kfree(ptr); |
675 | } | |
676 | ||
611806b4 AK |
677 | /* |
678 | * Check that a use-after-free is detected by ksize() and via normal accesses | |
679 | * after it. | |
680 | */ | |
681 | static void ksize_uaf(struct kunit *test) | |
682 | { | |
683 | char *ptr; | |
684 | int size = 128 - KASAN_GRANULE_SIZE; | |
685 | ||
686 | ptr = kmalloc(size, GFP_KERNEL); | |
687 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
688 | kfree(ptr); | |
689 | ||
690 | KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr)); | |
691 | KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *ptr); | |
692 | KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *(ptr + size)); | |
693 | } | |
694 | ||
73228c7e | 695 | static void kasan_stack_oob(struct kunit *test) |
eae08dca | 696 | { |
73228c7e PA |
697 | char stack_array[10]; |
698 | volatile int i = OOB_TAG_OFF; | |
699 | char *p = &stack_array[ARRAY_SIZE(stack_array) + i]; | |
eae08dca | 700 | |
da17e377 | 701 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK); |
eae08dca | 702 | |
73228c7e | 703 | KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); |
eae08dca AR |
704 | } |
705 | ||
73228c7e | 706 | static void kasan_alloca_oob_left(struct kunit *test) |
00a14294 PL |
707 | { |
708 | volatile int i = 10; | |
709 | char alloca_array[i]; | |
710 | char *p = alloca_array - 1; | |
711 | ||
58b999d7 | 712 | /* Only generic mode instruments dynamic allocas. */ |
da17e377 AK |
713 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); |
714 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK); | |
73228c7e PA |
715 | |
716 | KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); | |
00a14294 PL |
717 | } |
718 | ||
73228c7e | 719 | static void kasan_alloca_oob_right(struct kunit *test) |
00a14294 PL |
720 | { |
721 | volatile int i = 10; | |
722 | char alloca_array[i]; | |
723 | char *p = alloca_array + i; | |
724 | ||
58b999d7 | 725 | /* Only generic mode instruments dynamic allocas. */ |
da17e377 AK |
726 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); |
727 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK); | |
73228c7e PA |
728 | |
729 | KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); | |
00a14294 PL |
730 | } |
731 | ||
73228c7e | 732 | static void kmem_cache_double_free(struct kunit *test) |
b1d57289 DV |
733 | { |
734 | char *p; | |
735 | size_t size = 200; | |
736 | struct kmem_cache *cache; | |
737 | ||
738 | cache = kmem_cache_create("test_cache", size, 0, 0, NULL); | |
73228c7e PA |
739 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); |
740 | ||
b1d57289 DV |
741 | p = kmem_cache_alloc(cache, GFP_KERNEL); |
742 | if (!p) { | |
73228c7e | 743 | kunit_err(test, "Allocation failed: %s\n", __func__); |
b1d57289 DV |
744 | kmem_cache_destroy(cache); |
745 | return; | |
746 | } | |
747 | ||
748 | kmem_cache_free(cache, p); | |
73228c7e | 749 | KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p)); |
b1d57289 DV |
750 | kmem_cache_destroy(cache); |
751 | } | |
752 | ||
73228c7e | 753 | static void kmem_cache_invalid_free(struct kunit *test) |
b1d57289 DV |
754 | { |
755 | char *p; | |
756 | size_t size = 200; | |
757 | struct kmem_cache *cache; | |
758 | ||
759 | cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU, | |
760 | NULL); | |
73228c7e PA |
761 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); |
762 | ||
b1d57289 DV |
763 | p = kmem_cache_alloc(cache, GFP_KERNEL); |
764 | if (!p) { | |
73228c7e | 765 | kunit_err(test, "Allocation failed: %s\n", __func__); |
b1d57289 DV |
766 | kmem_cache_destroy(cache); |
767 | return; | |
768 | } | |
769 | ||
0fd37925 | 770 | /* Trigger invalid free, the object doesn't get freed. */ |
73228c7e | 771 | KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1)); |
91c93ed0 AK |
772 | |
773 | /* | |
774 | * Properly free the object to prevent the "Objects remaining in | |
775 | * test_cache on __kmem_cache_shutdown" BUG failure. | |
776 | */ | |
777 | kmem_cache_free(cache, p); | |
778 | ||
b1d57289 DV |
779 | kmem_cache_destroy(cache); |
780 | } | |
781 | ||
73228c7e | 782 | static void kasan_memchr(struct kunit *test) |
0c96350a AR |
783 | { |
784 | char *ptr; | |
785 | size_t size = 24; | |
786 | ||
0fd37925 AK |
787 | /* |
788 | * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT. | |
789 | * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details. | |
790 | */ | |
da17e377 | 791 | KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT); |
73228c7e | 792 | |
58b999d7 AK |
793 | if (OOB_TAG_OFF) |
794 | size = round_up(size, OOB_TAG_OFF); | |
795 | ||
73228c7e PA |
796 | ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); |
797 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
798 | ||
799 | KUNIT_EXPECT_KASAN_FAIL(test, | |
800 | kasan_ptr_result = memchr(ptr, '1', size + 1)); | |
0c96350a | 801 | |
0c96350a AR |
802 | kfree(ptr); |
803 | } | |
804 | ||
73228c7e | 805 | static void kasan_memcmp(struct kunit *test) |
0c96350a AR |
806 | { |
807 | char *ptr; | |
808 | size_t size = 24; | |
809 | int arr[9]; | |
810 | ||
0fd37925 AK |
811 | /* |
812 | * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT. | |
813 | * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details. | |
814 | */ | |
da17e377 | 815 | KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT); |
0c96350a | 816 | |
58b999d7 AK |
817 | if (OOB_TAG_OFF) |
818 | size = round_up(size, OOB_TAG_OFF); | |
819 | ||
73228c7e PA |
820 | ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); |
821 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
0c96350a | 822 | memset(arr, 0, sizeof(arr)); |
73228c7e PA |
823 | |
824 | KUNIT_EXPECT_KASAN_FAIL(test, | |
825 | kasan_int_result = memcmp(ptr, arr, size+1)); | |
0c96350a AR |
826 | kfree(ptr); |
827 | } | |
828 | ||
73228c7e | 829 | static void kasan_strings(struct kunit *test) |
0c96350a AR |
830 | { |
831 | char *ptr; | |
832 | size_t size = 24; | |
833 | ||
0fd37925 AK |
834 | /* |
835 | * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT. | |
836 | * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details. | |
837 | */ | |
da17e377 | 838 | KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT); |
73228c7e PA |
839 | |
840 | ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); | |
841 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
0c96350a AR |
842 | |
843 | kfree(ptr); | |
844 | ||
845 | /* | |
846 | * Try to cause only 1 invalid access (less spam in dmesg). | |
847 | * For that we need ptr to point to zeroed byte. | |
848 | * Skip metadata that could be stored in freed object so ptr | |
849 | * will likely point to zeroed byte. | |
850 | */ | |
851 | ptr += 16; | |
73228c7e | 852 | KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1')); |
0c96350a | 853 | |
73228c7e | 854 | KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1')); |
0c96350a | 855 | |
73228c7e | 856 | KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2")); |
0c96350a | 857 | |
73228c7e | 858 | KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1)); |
0c96350a | 859 | |
73228c7e | 860 | KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr)); |
0c96350a | 861 | |
73228c7e | 862 | KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1)); |
0c96350a AR |
863 | } |
864 | ||
58b999d7 AK |
865 | static void kasan_bitops_modify(struct kunit *test, int nr, void *addr) |
866 | { | |
867 | KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr)); | |
868 | KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr)); | |
869 | KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr)); | |
870 | KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr)); | |
871 | KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr)); | |
872 | KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr)); | |
873 | KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr)); | |
874 | KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr)); | |
875 | } | |
876 | ||
877 | static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr) | |
878 | { | |
879 | KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr)); | |
880 | KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr)); | |
881 | KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr)); | |
882 | KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr)); | |
883 | KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr)); | |
884 | KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr)); | |
885 | KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr)); | |
886 | KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr)); | |
887 | ||
888 | #if defined(clear_bit_unlock_is_negative_byte) | |
889 | KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = | |
890 | clear_bit_unlock_is_negative_byte(nr, addr)); | |
891 | #endif | |
892 | } | |
893 | ||
894 | static void kasan_bitops_generic(struct kunit *test) | |
19a33ca6 | 895 | { |
58b999d7 AK |
896 | long *bits; |
897 | ||
898 | /* This test is specifically crafted for the generic mode. */ | |
da17e377 | 899 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); |
58b999d7 | 900 | |
19a33ca6 | 901 | /* |
0fd37925 | 902 | * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes; |
19a33ca6 ME |
903 | * this way we do not actually corrupt other memory. |
904 | */ | |
58b999d7 | 905 | bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL); |
73228c7e | 906 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits); |
19a33ca6 ME |
907 | |
908 | /* | |
909 | * Below calls try to access bit within allocated memory; however, the | |
910 | * below accesses are still out-of-bounds, since bitops are defined to | |
911 | * operate on the whole long the bit is in. | |
912 | */ | |
58b999d7 | 913 | kasan_bitops_modify(test, BITS_PER_LONG, bits); |
19a33ca6 ME |
914 | |
915 | /* | |
916 | * Below calls try to access bit beyond allocated memory. | |
917 | */ | |
58b999d7 | 918 | kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits); |
19a33ca6 | 919 | |
58b999d7 AK |
920 | kfree(bits); |
921 | } | |
19a33ca6 | 922 | |
58b999d7 AK |
923 | static void kasan_bitops_tags(struct kunit *test) |
924 | { | |
925 | long *bits; | |
19a33ca6 | 926 | |
da17e377 AK |
927 | /* This test is specifically crafted for tag-based modes. */ |
928 | KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); | |
19a33ca6 | 929 | |
e66e1799 AK |
930 | /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */ |
931 | bits = kzalloc(48, GFP_KERNEL); | |
58b999d7 | 932 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits); |
19a33ca6 | 933 | |
e66e1799 AK |
934 | /* Do the accesses past the 48 allocated bytes, but within the redone. */ |
935 | kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48); | |
936 | kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48); | |
19a33ca6 | 937 | |
19a33ca6 ME |
938 | kfree(bits); |
939 | } | |
940 | ||
73228c7e | 941 | static void kmalloc_double_kzfree(struct kunit *test) |
bb104ed7 ME |
942 | { |
943 | char *ptr; | |
944 | size_t size = 16; | |
945 | ||
bb104ed7 | 946 | ptr = kmalloc(size, GFP_KERNEL); |
73228c7e | 947 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); |
bb104ed7 | 948 | |
453431a5 | 949 | kfree_sensitive(ptr); |
73228c7e | 950 | KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr)); |
bb104ed7 ME |
951 | } |
952 | ||
73228c7e | 953 | static void vmalloc_oob(struct kunit *test) |
06513916 DA |
954 | { |
955 | void *area; | |
956 | ||
da17e377 | 957 | KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC); |
06513916 DA |
958 | |
959 | /* | |
960 | * We have to be careful not to hit the guard page. | |
961 | * The MMU will catch that and crash us. | |
962 | */ | |
963 | area = vmalloc(3000); | |
73228c7e | 964 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area); |
06513916 | 965 | |
73228c7e | 966 | KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]); |
06513916 DA |
967 | vfree(area); |
968 | } | |
387d6e46 | 969 | |
573a4809 AK |
970 | /* |
971 | * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN, | |
972 | * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based | |
973 | * modes. | |
974 | */ | |
975 | static void match_all_not_assigned(struct kunit *test) | |
976 | { | |
977 | char *ptr; | |
978 | struct page *pages; | |
979 | int i, size, order; | |
980 | ||
981 | KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); | |
982 | ||
983 | for (i = 0; i < 256; i++) { | |
984 | size = (get_random_int() % 1024) + 1; | |
985 | ptr = kmalloc(size, GFP_KERNEL); | |
986 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
987 | KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); | |
988 | KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); | |
989 | kfree(ptr); | |
990 | } | |
991 | ||
992 | for (i = 0; i < 256; i++) { | |
993 | order = (get_random_int() % 4) + 1; | |
994 | pages = alloc_pages(GFP_KERNEL, order); | |
995 | ptr = page_address(pages); | |
996 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
997 | KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); | |
998 | KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); | |
999 | free_pages((unsigned long)ptr, order); | |
1000 | } | |
1001 | } | |
1002 | ||
1003 | /* Check that 0xff works as a match-all pointer tag for tag-based modes. */ | |
1004 | static void match_all_ptr_tag(struct kunit *test) | |
1005 | { | |
1006 | char *ptr; | |
1007 | u8 tag; | |
1008 | ||
1009 | KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); | |
1010 | ||
1011 | ptr = kmalloc(128, GFP_KERNEL); | |
1012 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
1013 | ||
1014 | /* Backup the assigned tag. */ | |
1015 | tag = get_tag(ptr); | |
1016 | KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL); | |
1017 | ||
1018 | /* Reset the tag to 0xff.*/ | |
1019 | ptr = set_tag(ptr, KASAN_TAG_KERNEL); | |
1020 | ||
1021 | /* This access shouldn't trigger a KASAN report. */ | |
1022 | *ptr = 0; | |
1023 | ||
1024 | /* Recover the pointer tag and free. */ | |
1025 | ptr = set_tag(ptr, tag); | |
1026 | kfree(ptr); | |
1027 | } | |
1028 | ||
1029 | /* Check that there are no match-all memory tags for tag-based modes. */ | |
1030 | static void match_all_mem_tag(struct kunit *test) | |
1031 | { | |
1032 | char *ptr; | |
1033 | int tag; | |
1034 | ||
1035 | KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); | |
1036 | ||
1037 | ptr = kmalloc(128, GFP_KERNEL); | |
1038 | KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); | |
1039 | KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); | |
1040 | ||
1041 | /* For each possible tag value not matching the pointer tag. */ | |
1042 | for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) { | |
1043 | if (tag == get_tag(ptr)) | |
1044 | continue; | |
1045 | ||
1046 | /* Mark the first memory granule with the chosen memory tag. */ | |
1047 | kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag); | |
1048 | ||
1049 | /* This access must cause a KASAN report. */ | |
1050 | KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0); | |
1051 | } | |
1052 | ||
1053 | /* Recover the memory tag and free. */ | |
1054 | kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr)); | |
1055 | kfree(ptr); | |
1056 | } | |
1057 | ||
73228c7e PA |
1058 | static struct kunit_case kasan_kunit_test_cases[] = { |
1059 | KUNIT_CASE(kmalloc_oob_right), | |
1060 | KUNIT_CASE(kmalloc_oob_left), | |
1061 | KUNIT_CASE(kmalloc_node_oob_right), | |
1062 | KUNIT_CASE(kmalloc_pagealloc_oob_right), | |
1063 | KUNIT_CASE(kmalloc_pagealloc_uaf), | |
1064 | KUNIT_CASE(kmalloc_pagealloc_invalid_free), | |
858bdeb0 AK |
1065 | KUNIT_CASE(pagealloc_oob_right), |
1066 | KUNIT_CASE(pagealloc_uaf), | |
73228c7e | 1067 | KUNIT_CASE(kmalloc_large_oob_right), |
b87c28b9 AK |
1068 | KUNIT_CASE(krealloc_more_oob), |
1069 | KUNIT_CASE(krealloc_less_oob), | |
1070 | KUNIT_CASE(krealloc_pagealloc_more_oob), | |
1071 | KUNIT_CASE(krealloc_pagealloc_less_oob), | |
26a5ca7a | 1072 | KUNIT_CASE(krealloc_uaf), |
73228c7e | 1073 | KUNIT_CASE(kmalloc_oob_16), |
58b999d7 | 1074 | KUNIT_CASE(kmalloc_uaf_16), |
73228c7e PA |
1075 | KUNIT_CASE(kmalloc_oob_in_memset), |
1076 | KUNIT_CASE(kmalloc_oob_memset_2), | |
1077 | KUNIT_CASE(kmalloc_oob_memset_4), | |
1078 | KUNIT_CASE(kmalloc_oob_memset_8), | |
1079 | KUNIT_CASE(kmalloc_oob_memset_16), | |
1080 | KUNIT_CASE(kmalloc_memmove_invalid_size), | |
1081 | KUNIT_CASE(kmalloc_uaf), | |
1082 | KUNIT_CASE(kmalloc_uaf_memset), | |
1083 | KUNIT_CASE(kmalloc_uaf2), | |
1084 | KUNIT_CASE(kfree_via_page), | |
1085 | KUNIT_CASE(kfree_via_phys), | |
1086 | KUNIT_CASE(kmem_cache_oob), | |
11516135 AK |
1087 | KUNIT_CASE(kmem_cache_accounted), |
1088 | KUNIT_CASE(kmem_cache_bulk), | |
73228c7e PA |
1089 | KUNIT_CASE(kasan_global_oob), |
1090 | KUNIT_CASE(kasan_stack_oob), | |
1091 | KUNIT_CASE(kasan_alloca_oob_left), | |
1092 | KUNIT_CASE(kasan_alloca_oob_right), | |
1093 | KUNIT_CASE(ksize_unpoisons_memory), | |
611806b4 | 1094 | KUNIT_CASE(ksize_uaf), |
73228c7e PA |
1095 | KUNIT_CASE(kmem_cache_double_free), |
1096 | KUNIT_CASE(kmem_cache_invalid_free), | |
1097 | KUNIT_CASE(kasan_memchr), | |
1098 | KUNIT_CASE(kasan_memcmp), | |
1099 | KUNIT_CASE(kasan_strings), | |
58b999d7 AK |
1100 | KUNIT_CASE(kasan_bitops_generic), |
1101 | KUNIT_CASE(kasan_bitops_tags), | |
73228c7e PA |
1102 | KUNIT_CASE(kmalloc_double_kzfree), |
1103 | KUNIT_CASE(vmalloc_oob), | |
573a4809 AK |
1104 | KUNIT_CASE(match_all_not_assigned), |
1105 | KUNIT_CASE(match_all_ptr_tag), | |
1106 | KUNIT_CASE(match_all_mem_tag), | |
73228c7e PA |
1107 | {} |
1108 | }; | |
1109 | ||
1110 | static struct kunit_suite kasan_kunit_test_suite = { | |
1111 | .name = "kasan", | |
1112 | .init = kasan_test_init, | |
1113 | .test_cases = kasan_kunit_test_cases, | |
1114 | .exit = kasan_test_exit, | |
1115 | }; | |
1116 | ||
1117 | kunit_test_suite(kasan_kunit_test_suite); | |
3f15801c | 1118 | |
3f15801c | 1119 | MODULE_LICENSE("GPL"); |