1 // SPDX-License-Identifier: GPL-2.0
3 * Runtime test cases for CONFIG_FORTIFY_SOURCE. For additional memcpy()
4 * testing see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c).
6 * For corner cases with UBSAN, try testing with:
8 * ./tools/testing/kunit/kunit.py run --arch=x86_64 \
9 * --kconfig_add CONFIG_FORTIFY_SOURCE=y \
10 * --kconfig_add CONFIG_UBSAN=y \
11 * --kconfig_add CONFIG_UBSAN_TRAP=y \
12 * --kconfig_add CONFIG_UBSAN_BOUNDS=y \
13 * --kconfig_add CONFIG_UBSAN_LOCAL_BOUNDS=y \
14 * --make_options LLVM=1 fortify
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 /* We don't need to fill dmesg with the fortify WARNs during testing. */
20 # define FORTIFY_REPORT_KUNIT(x...) __fortify_report(x)
21 # define FORTIFY_WARN_KUNIT(x...) WARN_ONCE(x)
23 # define FORTIFY_REPORT_KUNIT(x...) do { } while (0)
24 # define FORTIFY_WARN_KUNIT(x...) do { } while (0)
27 /* Redefine fortify_panic() to track failures. */
28 void fortify_add_kunit_error(int write);
29 #define fortify_panic(func, write, avail, size, retfail) do { \
30 FORTIFY_REPORT_KUNIT(FORTIFY_REASON(func, write), avail, size); \
31 fortify_add_kunit_error(write); \
35 /* Redefine fortify_warn_once() to track memcpy() failures. */
36 #define fortify_warn_once(chk_func, x...) do { \
37 bool __result = chk_func; \
38 FORTIFY_WARN_KUNIT(__result, x); \
40 fortify_add_kunit_error(1); \
43 #include <kunit/device.h>
44 #include <kunit/test.h>
45 #include <kunit/test-bug.h>
46 #include <linux/device.h>
47 #include <linux/slab.h>
48 #include <linux/string.h>
49 #include <linux/vmalloc.h>
51 /* Handle being built without CONFIG_FORTIFY_SOURCE */
52 #ifndef __compiletime_strlen
53 # define __compiletime_strlen __builtin_strlen
56 static struct kunit_resource read_resource;
57 static struct kunit_resource write_resource;
58 static int fortify_read_overflows;
59 static int fortify_write_overflows;
61 static const char array_of_10[] = "this is 10";
62 static const char *ptr_of_11 = "this is 11!";
63 static char array_unknown[] = "compiler thinks I might change";
65 void fortify_add_kunit_error(int write)
67 struct kunit_resource *resource;
68 struct kunit *current_test;
70 current_test = kunit_get_current_test();
74 resource = kunit_find_named_resource(current_test,
75 write ? "fortify_write_overflows"
76 : "fortify_read_overflows");
80 (*(int *)resource->data)++;
81 kunit_put_resource(resource);
84 static void fortify_test_known_sizes(struct kunit *test)
86 KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
87 KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10);
88 KUNIT_EXPECT_EQ(test, __compiletime_strlen(ptr_of_11), 11);
90 KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_unknown), SIZE_MAX);
91 /* Externally defined and dynamically sized string pointer: */
92 KUNIT_EXPECT_EQ(test, __compiletime_strlen(test->name), SIZE_MAX);
95 /* This is volatile so the optimizer can't perform DCE below. */
96 static volatile int pick;
98 /* Not inline to keep optimizer from figuring out which string we want. */
99 static noinline size_t want_minus_one(int pick)
114 return __compiletime_strlen(str);
117 static void fortify_test_control_flow_split(struct kunit *test)
119 KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX);
122 #define KUNIT_EXPECT_BOS(test, p, expected, name) \
123 KUNIT_EXPECT_EQ_MSG(test, __builtin_object_size(p, 1), \
125 "__alloc_size() not working with __bos on " name "\n")
127 #if !__has_builtin(__builtin_dynamic_object_size)
128 #define KUNIT_EXPECT_BDOS(test, p, expected, name) \
129 /* Silence "unused variable 'expected'" warning. */ \
130 KUNIT_EXPECT_EQ(test, expected, expected)
132 #define KUNIT_EXPECT_BDOS(test, p, expected, name) \
133 KUNIT_EXPECT_EQ_MSG(test, __builtin_dynamic_object_size(p, 1), \
135 "__alloc_size() not working with __bdos on " name "\n")
138 /* If the execpted size is a constant value, __bos can see it. */
139 #define check_const(_expected, alloc, free) do { \
140 size_t expected = (_expected); \
142 KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \
143 KUNIT_EXPECT_BOS(test, p, expected, #alloc); \
144 KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \
148 /* If the execpted size is NOT a constant value, __bos CANNOT see it. */
149 #define check_dynamic(_expected, alloc, free) do { \
150 size_t expected = (_expected); \
152 KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \
153 KUNIT_EXPECT_BOS(test, p, SIZE_MAX, #alloc); \
154 KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \
158 /* Assortment of constant-value kinda-edge cases. */
159 #define CONST_TEST_BODY(TEST_alloc) do { \
160 /* Special-case vmalloc()-family to skip 0-sized allocs. */ \
161 if (strcmp(#TEST_alloc, "TEST_vmalloc") != 0) \
162 TEST_alloc(check_const, 0, 0); \
163 TEST_alloc(check_const, 1, 1); \
164 TEST_alloc(check_const, 128, 128); \
165 TEST_alloc(check_const, 1023, 1023); \
166 TEST_alloc(check_const, 1025, 1025); \
167 TEST_alloc(check_const, 4096, 4096); \
168 TEST_alloc(check_const, 4097, 4097); \
171 static volatile size_t zero_size;
172 static volatile size_t unknown_size = 50;
174 #if !__has_builtin(__builtin_dynamic_object_size)
175 #define DYNAMIC_TEST_BODY(TEST_alloc) \
176 kunit_skip(test, "Compiler is missing __builtin_dynamic_object_size() support\n")
178 #define DYNAMIC_TEST_BODY(TEST_alloc) do { \
179 size_t size = unknown_size; \
182 * Expected size is "size" in each test, before it is then \
183 * internally incremented in each test. Requires we disable \
186 TEST_alloc(check_dynamic, size, size++); \
187 /* Make sure incrementing actually happened. */ \
188 KUNIT_EXPECT_NE(test, size, unknown_size); \
192 #define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator) \
193 static void fortify_test_alloc_size_##allocator##_const(struct kunit *test) \
195 CONST_TEST_BODY(TEST_##allocator); \
197 static void fortify_test_alloc_size_##allocator##_dynamic(struct kunit *test) \
199 DYNAMIC_TEST_BODY(TEST_##allocator); \
202 #define TEST_kmalloc(checker, expected_size, alloc_size) do { \
203 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
207 checker(expected_size, kmalloc(alloc_size, gfp), \
209 checker(expected_size, \
210 kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
212 checker(expected_size, kzalloc(alloc_size, gfp), \
214 checker(expected_size, \
215 kzalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
217 checker(expected_size, kcalloc(1, alloc_size, gfp), \
219 checker(expected_size, kcalloc(alloc_size, 1, gfp), \
221 checker(expected_size, \
222 kcalloc_node(1, alloc_size, gfp, NUMA_NO_NODE), \
224 checker(expected_size, \
225 kcalloc_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
227 checker(expected_size, kmalloc_array(1, alloc_size, gfp), \
229 checker(expected_size, kmalloc_array(alloc_size, 1, gfp), \
231 checker(expected_size, \
232 kmalloc_array_node(1, alloc_size, gfp, NUMA_NO_NODE), \
234 checker(expected_size, \
235 kmalloc_array_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
237 checker(expected_size, __kmalloc(alloc_size, gfp), \
239 checker(expected_size, \
240 __kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
243 orig = kmalloc(alloc_size, gfp); \
244 KUNIT_EXPECT_TRUE(test, orig != NULL); \
245 checker((expected_size) * 2, \
246 krealloc(orig, (alloc_size) * 2, gfp), \
248 orig = kmalloc(alloc_size, gfp); \
249 KUNIT_EXPECT_TRUE(test, orig != NULL); \
250 checker((expected_size) * 2, \
251 krealloc_array(orig, 1, (alloc_size) * 2, gfp), \
253 orig = kmalloc(alloc_size, gfp); \
254 KUNIT_EXPECT_TRUE(test, orig != NULL); \
255 checker((expected_size) * 2, \
256 krealloc_array(orig, (alloc_size) * 2, 1, gfp), \
260 /* Using memdup() with fixed size, so force unknown length. */ \
261 if (!__builtin_constant_p(expected_size)) \
263 checker(len, kmemdup("hello there", len, gfp), kfree(p)); \
265 DEFINE_ALLOC_SIZE_TEST_PAIR(kmalloc)
267 /* Sizes are in pages, not bytes. */
268 #define TEST_vmalloc(checker, expected_pages, alloc_pages) do { \
269 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
270 checker((expected_pages) * PAGE_SIZE, \
271 vmalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
272 checker((expected_pages) * PAGE_SIZE, \
273 vzalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
274 checker((expected_pages) * PAGE_SIZE, \
275 __vmalloc((alloc_pages) * PAGE_SIZE, gfp), vfree(p)); \
277 DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
279 /* Sizes are in pages (and open-coded for side-effects), not bytes. */
280 #define TEST_kvmalloc(checker, expected_pages, alloc_pages) do { \
281 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
285 checker((expected_pages) * PAGE_SIZE, \
286 kvmalloc((alloc_pages) * PAGE_SIZE, gfp), \
288 checker((expected_pages) * PAGE_SIZE, \
289 kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
291 checker((expected_pages) * PAGE_SIZE, \
292 kvzalloc((alloc_pages) * PAGE_SIZE, gfp), \
294 checker((expected_pages) * PAGE_SIZE, \
295 kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
297 checker((expected_pages) * PAGE_SIZE, \
298 kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp), \
300 checker((expected_pages) * PAGE_SIZE, \
301 kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp), \
303 checker((expected_pages) * PAGE_SIZE, \
304 kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp), \
306 checker((expected_pages) * PAGE_SIZE, \
307 kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp), \
310 prev_size = (expected_pages) * PAGE_SIZE; \
311 orig = kvmalloc(prev_size, gfp); \
312 KUNIT_EXPECT_TRUE(test, orig != NULL); \
313 checker(((expected_pages) * PAGE_SIZE) * 2, \
314 kvrealloc(orig, prev_size, \
315 ((alloc_pages) * PAGE_SIZE) * 2, gfp), \
318 DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
320 #define TEST_devm_kmalloc(checker, expected_size, alloc_size) do { \
321 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
322 const char dev_name[] = "fortify-test"; \
323 struct device *dev; \
327 /* Create dummy device for devm_kmalloc()-family tests. */ \
328 dev = kunit_device_register(test, dev_name); \
329 KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), \
330 "Cannot register test device\n"); \
332 checker(expected_size, devm_kmalloc(dev, alloc_size, gfp), \
333 devm_kfree(dev, p)); \
334 checker(expected_size, devm_kzalloc(dev, alloc_size, gfp), \
335 devm_kfree(dev, p)); \
336 checker(expected_size, \
337 devm_kmalloc_array(dev, 1, alloc_size, gfp), \
338 devm_kfree(dev, p)); \
339 checker(expected_size, \
340 devm_kmalloc_array(dev, alloc_size, 1, gfp), \
341 devm_kfree(dev, p)); \
342 checker(expected_size, \
343 devm_kcalloc(dev, 1, alloc_size, gfp), \
344 devm_kfree(dev, p)); \
345 checker(expected_size, \
346 devm_kcalloc(dev, alloc_size, 1, gfp), \
347 devm_kfree(dev, p)); \
349 orig = devm_kmalloc(dev, alloc_size, gfp); \
350 KUNIT_EXPECT_TRUE(test, orig != NULL); \
351 checker((expected_size) * 2, \
352 devm_krealloc(dev, orig, (alloc_size) * 2, gfp), \
353 devm_kfree(dev, p)); \
356 /* Using memdup() with fixed size, so force unknown length. */ \
357 if (!__builtin_constant_p(expected_size)) \
359 checker(len, devm_kmemdup(dev, "Ohai", len, gfp), \
360 devm_kfree(dev, p)); \
362 kunit_device_unregister(test, dev); \
364 DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
366 static const char * const test_strs[] = {
369 "A longer string, just for variety",
372 #define TEST_realloc(checker) do { \
373 gfp_t gfp = GFP_KERNEL; \
377 for (i = 0; i < ARRAY_SIZE(test_strs); i++) { \
378 len = strlen(test_strs[i]); \
379 KUNIT_EXPECT_EQ(test, __builtin_constant_p(len), 0); \
380 checker(len, kmemdup_array(test_strs[i], len, 1, gfp), \
382 checker(len, kmemdup(test_strs[i], len, gfp), \
386 static void fortify_test_realloc_size(struct kunit *test)
388 TEST_realloc(check_dynamic);
392 * We can't have an array at the end of a structure or else
393 * builds without -fstrict-flex-arrays=3 will report them as
394 * being an unknown length. Additionally, add bytes before
395 * and after the string to catch over/underflows if tests
398 struct fortify_padding {
399 unsigned long bytes_before;
401 unsigned long bytes_after;
403 /* Force compiler into not being able to resolve size at compile-time. */
404 static volatile int unconst;
406 static void fortify_test_strlen(struct kunit *test)
408 struct fortify_padding pad = { };
409 int i, end = sizeof(pad.buf) - 1;
411 /* Fill 31 bytes with valid characters. */
412 for (i = 0; i < sizeof(pad.buf) - 1; i++)
413 pad.buf[i] = i + '0';
414 /* Trailing bytes are still %NUL. */
415 KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
416 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
418 /* String is terminated, so strlen() is valid. */
419 KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
420 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
422 /* Make string unterminated, and recount. */
424 end = sizeof(pad.buf);
425 KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
426 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
429 static void fortify_test_strnlen(struct kunit *test)
431 struct fortify_padding pad = { };
432 int i, end = sizeof(pad.buf) - 1;
434 /* Fill 31 bytes with valid characters. */
435 for (i = 0; i < sizeof(pad.buf) - 1; i++)
436 pad.buf[i] = i + '0';
437 /* Trailing bytes are still %NUL. */
438 KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
439 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
441 /* String is terminated, so strnlen() is valid. */
442 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf)), end);
443 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
444 /* A truncated strnlen() will be safe, too. */
445 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf) / 2),
446 sizeof(pad.buf) / 2);
447 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
449 /* Make string unterminated, and recount. */
451 end = sizeof(pad.buf);
452 /* Reading beyond with strncpy() will fail. */
453 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 1), end);
454 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
455 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 2), end);
456 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
458 /* Early-truncated is safe still, though. */
459 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
460 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
462 end = sizeof(pad.buf) / 2;
463 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
464 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
467 static void fortify_test_strcpy(struct kunit *test)
469 struct fortify_padding pad = { };
470 char src[sizeof(pad.buf) + 1] = { };
473 /* Fill 31 bytes with valid characters. */
474 for (i = 0; i < sizeof(src) - 2; i++)
477 /* Destination is %NUL-filled to start with. */
478 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
479 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
480 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
481 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
482 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
484 /* Legitimate strcpy() 1 less than of max size. */
485 KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
487 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
488 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
489 /* Only last byte should be %NUL */
490 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
491 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
492 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
494 src[sizeof(src) - 2] = 'A';
495 /* But now we trip the overflow checking. */
496 KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
498 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
499 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
500 /* Trailing %NUL -- thanks to FORTIFY. */
501 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
502 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
503 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
504 /* And we will not have gone beyond. */
505 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
507 src[sizeof(src) - 1] = 'A';
508 /* And for sure now, two bytes past. */
509 KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
512 * Which trips both the strlen() on the unterminated src,
513 * and the resulting copy attempt.
515 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
516 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
517 /* Trailing %NUL -- thanks to FORTIFY. */
518 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
519 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
520 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
521 /* And we will not have gone beyond. */
522 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
525 static void fortify_test_strncpy(struct kunit *test)
527 struct fortify_padding pad = { };
528 char src[] = "Copy me fully into a small buffer and I will overflow!";
530 /* Destination is %NUL-filled to start with. */
531 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
532 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
533 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
534 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
535 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
537 /* Legitimate strncpy() 1 less than of max size. */
538 KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
539 sizeof(pad.buf) + unconst - 1)
541 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
542 /* Only last byte should be %NUL */
543 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
544 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
545 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
547 /* Legitimate (though unterminated) max-size strncpy. */
548 KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
549 sizeof(pad.buf) + unconst)
551 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
552 /* No trailing %NUL -- thanks strncpy API. */
553 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
554 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
555 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
556 /* But we will not have gone beyond. */
557 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
559 /* Now verify that FORTIFY is working... */
560 KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
561 sizeof(pad.buf) + unconst + 1)
563 /* Should catch the overflow. */
564 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
565 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
566 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
567 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
568 /* And we will not have gone beyond. */
569 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
572 KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
573 sizeof(pad.buf) + unconst + 2)
575 /* Should catch the overflow. */
576 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
577 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
578 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
579 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
580 /* And we will not have gone beyond. */
581 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
584 static void fortify_test_strscpy(struct kunit *test)
586 struct fortify_padding pad = { };
587 char src[] = "Copy me fully into a small buffer and I will overflow!";
589 /* Destination is %NUL-filled to start with. */
590 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
591 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
592 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
593 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
594 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
596 /* Legitimate strscpy() 1 less than of max size. */
597 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
598 sizeof(pad.buf) + unconst - 1),
600 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
601 /* Keeping space for %NUL, last two bytes should be %NUL */
602 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
603 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
604 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
606 /* Legitimate max-size strscpy. */
607 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
608 sizeof(pad.buf) + unconst),
610 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
611 /* A trailing %NUL will exist. */
612 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
613 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
614 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
616 /* Now verify that FORTIFY is working... */
617 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
618 sizeof(pad.buf) + unconst + 1),
620 /* Should catch the overflow. */
621 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
622 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
623 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
624 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
625 /* And we will not have gone beyond. */
626 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
628 /* And much further... */
629 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
630 sizeof(src) * 2 + unconst),
632 /* Should catch the overflow. */
633 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
634 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
635 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
636 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
637 /* And we will not have gone beyond. */
638 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
641 static void fortify_test_strcat(struct kunit *test)
643 struct fortify_padding pad = { };
644 char src[sizeof(pad.buf) / 2] = { };
649 /* Fill 15 bytes with valid characters. */
650 for (i = 0; i < sizeof(src) - 1; i++)
653 /* Destination is %NUL-filled to start with. */
654 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
655 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
656 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
657 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
658 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
660 /* Legitimate strcat() using less than half max size. */
661 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
662 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
663 /* Legitimate strcat() now 2 bytes shy of end. */
664 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
665 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
666 /* Last two bytes should be %NUL */
667 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
668 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
669 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
671 /* Add one more character to the end. */
672 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
673 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
674 /* Last byte should be %NUL */
675 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
676 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
677 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
679 /* And this one char will overflow. */
680 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
681 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
682 /* Last byte should be %NUL thanks to FORTIFY. */
683 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
684 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
685 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
686 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
688 /* And adding two will overflow more. */
689 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, two) == pad.buf);
690 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
691 /* Last byte should be %NUL thanks to FORTIFY. */
692 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
693 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
694 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
695 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
698 static void fortify_test_strncat(struct kunit *test)
700 struct fortify_padding pad = { };
701 char src[sizeof(pad.buf)] = { };
704 /* Fill 31 bytes with valid characters. */
705 partial = sizeof(src) / 2 - 1;
706 for (i = 0; i < partial; i++)
709 /* Destination is %NUL-filled to start with. */
710 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
711 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
712 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
713 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
714 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
716 /* Legitimate strncat() using less than half max size. */
717 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
718 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
719 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
720 /* Legitimate strncat() now 2 bytes shy of end. */
721 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
722 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
723 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
724 /* Last two bytes should be %NUL */
725 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
726 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
727 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
729 /* Add one more character to the end. */
730 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
731 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
732 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
733 /* Last byte should be %NUL */
734 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
735 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
736 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
738 /* And this one char will overflow. */
739 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
740 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
741 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
742 /* Last byte should be %NUL thanks to FORTIFY. */
743 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
744 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
745 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
746 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
748 /* And adding two will overflow more. */
749 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 2) == pad.buf);
750 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
751 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
752 /* Last byte should be %NUL thanks to FORTIFY. */
753 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
754 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
755 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
756 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
758 /* Force an unterminated destination, and overflow. */
759 pad.buf[sizeof(pad.buf) - 1] = 'A';
760 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
761 /* This will have tripped both strlen() and strcat(). */
762 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
763 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
764 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
765 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
766 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
767 /* But we should not go beyond the end. */
768 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
771 static void fortify_test_strlcat(struct kunit *test)
773 struct fortify_padding pad = { };
774 char src[sizeof(pad.buf)] = { };
776 int len = sizeof(pad.buf) + unconst;
778 /* Fill 15 bytes with valid characters. */
779 partial = sizeof(src) / 2 - 1;
780 for (i = 0; i < partial; i++)
783 /* Destination is %NUL-filled to start with. */
784 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
785 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
786 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
787 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
788 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
790 /* Legitimate strlcat() using less than half max size. */
791 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial);
792 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
793 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
794 /* Legitimate strlcat() now 2 bytes shy of end. */
795 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial * 2);
796 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
797 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
798 /* Last two bytes should be %NUL */
799 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
800 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
801 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
803 /* Add one more character to the end. */
804 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "Q", len), partial * 2 + 1);
805 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
806 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
807 /* Last byte should be %NUL */
808 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
809 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
810 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
812 /* And this one char will overflow. */
813 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "V", len * 2), len);
814 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
815 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
816 /* Last byte should be %NUL thanks to FORTIFY. */
817 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
818 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
819 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
820 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
822 /* And adding two will overflow more. */
823 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "QQ", len * 2), len + 1);
824 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
825 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
826 /* Last byte should be %NUL thanks to FORTIFY. */
827 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
828 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
829 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
830 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
832 /* Force an unterminated destination, and overflow. */
833 pad.buf[sizeof(pad.buf) - 1] = 'A';
834 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "TT", len * 2), len + 2);
835 /* This will have tripped both strlen() and strlcat(). */
836 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
837 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
838 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
839 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
840 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
841 /* But we should not go beyond the end. */
842 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
844 /* Force an unterminated source, and overflow. */
845 memset(src, 'B', sizeof(src));
846 pad.buf[sizeof(pad.buf) - 1] = '\0';
847 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len * 3), len - 1 + sizeof(src));
848 /* This will have tripped both strlen() and strlcat(). */
849 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
850 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
851 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
852 /* But we should not go beyond the end. */
853 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
856 /* Check for 0-sized arrays... */
857 struct fortify_zero_sized {
858 unsigned long bytes_before;
860 unsigned long bytes_after;
863 #define __fortify_test(memfunc) \
864 static void fortify_test_##memfunc(struct kunit *test) \
866 struct fortify_zero_sized zero = { }; \
867 struct fortify_padding pad = { }; \
868 char srcA[sizeof(pad.buf) + 2]; \
869 char srcB[sizeof(pad.buf) + 2]; \
870 size_t len = sizeof(pad.buf) + unconst; \
872 memset(srcA, 'A', sizeof(srcA)); \
873 KUNIT_ASSERT_EQ(test, srcA[0], 'A'); \
874 memset(srcB, 'B', sizeof(srcB)); \
875 KUNIT_ASSERT_EQ(test, srcB[0], 'B'); \
877 memfunc(pad.buf, srcA, 0 + unconst); \
878 KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
879 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
880 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
881 memfunc(pad.buf + 1, srcB, 1 + unconst); \
882 KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
883 KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
884 KUNIT_EXPECT_EQ(test, pad.buf[2], '\0'); \
885 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
886 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
887 memfunc(pad.buf, srcA, 1 + unconst); \
888 KUNIT_EXPECT_EQ(test, pad.buf[0], 'A'); \
889 KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
890 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
891 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
892 memfunc(pad.buf, srcA, len - 1); \
893 KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \
894 KUNIT_EXPECT_EQ(test, pad.buf[len - 1], '\0'); \
895 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
896 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
897 memfunc(pad.buf, srcA, len); \
898 KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \
899 KUNIT_EXPECT_EQ(test, pad.buf[len - 1], 'A'); \
900 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); \
901 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
902 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
903 memfunc(pad.buf, srcA, len + 1); \
904 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
905 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \
906 memfunc(pad.buf + 1, srcB, len); \
907 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
908 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); \
910 /* Reset error counter. */ \
911 fortify_write_overflows = 0; \
912 /* Copy nothing into nothing: no errors. */ \
913 memfunc(zero.buf, srcB, 0 + unconst); \
914 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
915 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
916 /* We currently explicitly ignore zero-sized dests. */ \
917 memfunc(zero.buf, srcB, 1 + unconst); \
918 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
919 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
921 __fortify_test(memcpy)
922 __fortify_test(memmove)
924 static void fortify_test_memscan(struct kunit *test)
926 char haystack[] = "Where oh where is my memory range?";
927 char *mem = haystack + strlen("Where oh where is ");
929 size_t len = sizeof(haystack) + unconst;
931 KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len),
933 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
934 /* Catch too-large range. */
935 KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len + 1),
937 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
938 KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len * 2),
940 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
943 static void fortify_test_memchr(struct kunit *test)
945 char haystack[] = "Where oh where is my memory range?";
946 char *mem = haystack + strlen("Where oh where is ");
948 size_t len = sizeof(haystack) + unconst;
950 KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len),
952 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
953 /* Catch too-large range. */
954 KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len + 1),
956 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
957 KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len * 2),
959 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
962 static void fortify_test_memchr_inv(struct kunit *test)
964 char haystack[] = "Where oh where is my memory range?";
965 char *mem = haystack + 1;
967 size_t len = sizeof(haystack) + unconst;
969 /* Normal search is okay. */
970 KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len),
972 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
973 /* Catch too-large range. */
974 KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len + 1),
976 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
977 KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len * 2),
979 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
982 static void fortify_test_memcmp(struct kunit *test)
984 char one[] = "My mind is going ...";
985 char two[] = "My mind is going ... I can feel it.";
986 size_t one_len = sizeof(one) + unconst - 1;
987 size_t two_len = sizeof(two) + unconst - 1;
989 /* We match the first string (ignoring the %NUL). */
990 KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len), 0);
991 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
992 /* Still in bounds, but no longer matching. */
993 KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 1), -32);
994 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
996 /* Catch too-large ranges. */
997 KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 2), INT_MIN);
998 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
1000 KUNIT_ASSERT_EQ(test, memcmp(two, one, two_len + 2), INT_MIN);
1001 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
1004 static void fortify_test_kmemdup(struct kunit *test)
1006 char src[] = "I got Doom running on it!";
1008 size_t len = sizeof(src) + unconst;
1010 /* Copy is within bounds. */
1011 copy = kmemdup(src, len, GFP_KERNEL);
1012 KUNIT_EXPECT_NOT_NULL(test, copy);
1013 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
1017 copy = kmemdup(src, len - 1, GFP_KERNEL);
1018 KUNIT_EXPECT_NOT_NULL(test, copy);
1019 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
1023 copy = kmemdup(src, 1, GFP_KERNEL);
1024 KUNIT_EXPECT_NOT_NULL(test, copy);
1025 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
1028 /* Out of bounds by 1 byte. */
1029 copy = kmemdup(src, len + 1, GFP_KERNEL);
1030 KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
1031 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
1034 /* Way out of bounds. */
1035 copy = kmemdup(src, len * 2, GFP_KERNEL);
1036 KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
1037 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
1040 /* Starting offset causing out of bounds. */
1041 copy = kmemdup(src + 1, len, GFP_KERNEL);
1042 KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
1043 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
1047 static int fortify_test_init(struct kunit *test)
1049 if (!IS_ENABLED(CONFIG_FORTIFY_SOURCE))
1050 kunit_skip(test, "Not built with CONFIG_FORTIFY_SOURCE=y");
1052 fortify_read_overflows = 0;
1053 kunit_add_named_resource(test, NULL, NULL, &read_resource,
1054 "fortify_read_overflows",
1055 &fortify_read_overflows);
1056 fortify_write_overflows = 0;
1057 kunit_add_named_resource(test, NULL, NULL, &write_resource,
1058 "fortify_write_overflows",
1059 &fortify_write_overflows);
1063 static struct kunit_case fortify_test_cases[] = {
1064 KUNIT_CASE(fortify_test_known_sizes),
1065 KUNIT_CASE(fortify_test_control_flow_split),
1066 KUNIT_CASE(fortify_test_alloc_size_kmalloc_const),
1067 KUNIT_CASE(fortify_test_alloc_size_kmalloc_dynamic),
1068 KUNIT_CASE(fortify_test_alloc_size_vmalloc_const),
1069 KUNIT_CASE(fortify_test_alloc_size_vmalloc_dynamic),
1070 KUNIT_CASE(fortify_test_alloc_size_kvmalloc_const),
1071 KUNIT_CASE(fortify_test_alloc_size_kvmalloc_dynamic),
1072 KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_const),
1073 KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_dynamic),
1074 KUNIT_CASE(fortify_test_realloc_size),
1075 KUNIT_CASE(fortify_test_strlen),
1076 KUNIT_CASE(fortify_test_strnlen),
1077 KUNIT_CASE(fortify_test_strcpy),
1078 KUNIT_CASE(fortify_test_strncpy),
1079 KUNIT_CASE(fortify_test_strscpy),
1080 KUNIT_CASE(fortify_test_strcat),
1081 KUNIT_CASE(fortify_test_strncat),
1082 KUNIT_CASE(fortify_test_strlcat),
1083 /* skip memset: performs bounds checking on whole structs */
1084 KUNIT_CASE(fortify_test_memcpy),
1085 KUNIT_CASE(fortify_test_memmove),
1086 KUNIT_CASE(fortify_test_memscan),
1087 KUNIT_CASE(fortify_test_memchr),
1088 KUNIT_CASE(fortify_test_memchr_inv),
1089 KUNIT_CASE(fortify_test_memcmp),
1090 KUNIT_CASE(fortify_test_kmemdup),
1094 static struct kunit_suite fortify_test_suite = {
1096 .init = fortify_test_init,
1097 .test_cases = fortify_test_cases,
1100 kunit_test_suite(fortify_test_suite);
1102 MODULE_LICENSE("GPL");