Merge tag 'ceph-for-6.11-rc2' of https://github.com/ceph/ceph-client
[linux-2.6-block.git] / lib / fortify_kunit.c
... / ...
CommitLineData
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Runtime test cases for CONFIG_FORTIFY_SOURCE. For additional memcpy()
4 * testing see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c).
5 *
6 * For corner cases with UBSAN, try testing with:
7 *
8 * ./tools/testing/kunit/kunit.py run --arch=x86_64 \
9 * --kconfig_add CONFIG_FORTIFY_SOURCE=y \
10 * --kconfig_add CONFIG_UBSAN=y \
11 * --kconfig_add CONFIG_UBSAN_TRAP=y \
12 * --kconfig_add CONFIG_UBSAN_BOUNDS=y \
13 * --kconfig_add CONFIG_UBSAN_LOCAL_BOUNDS=y \
14 * --make_options LLVM=1 fortify
15 */
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18/* We don't need to fill dmesg with the fortify WARNs during testing. */
19#ifdef DEBUG
20# define FORTIFY_REPORT_KUNIT(x...) __fortify_report(x)
21# define FORTIFY_WARN_KUNIT(x...) WARN_ONCE(x)
22#else
23# define FORTIFY_REPORT_KUNIT(x...) do { } while (0)
24# define FORTIFY_WARN_KUNIT(x...) do { } while (0)
25#endif
26
27/* Redefine fortify_panic() to track failures. */
28void fortify_add_kunit_error(int write);
29#define fortify_panic(func, write, avail, size, retfail) do { \
30 FORTIFY_REPORT_KUNIT(FORTIFY_REASON(func, write), avail, size); \
31 fortify_add_kunit_error(write); \
32 return (retfail); \
33} while (0)
34
35/* Redefine fortify_warn_once() to track memcpy() failures. */
36#define fortify_warn_once(chk_func, x...) do { \
37 bool __result = chk_func; \
38 FORTIFY_WARN_KUNIT(__result, x); \
39 if (__result) \
40 fortify_add_kunit_error(1); \
41} while (0)
42
43#include <kunit/device.h>
44#include <kunit/test.h>
45#include <kunit/test-bug.h>
46#include <linux/device.h>
47#include <linux/slab.h>
48#include <linux/string.h>
49#include <linux/vmalloc.h>
50
51/* Handle being built without CONFIG_FORTIFY_SOURCE */
52#ifndef __compiletime_strlen
53# define __compiletime_strlen __builtin_strlen
54#endif
55
56static struct kunit_resource read_resource;
57static struct kunit_resource write_resource;
58static int fortify_read_overflows;
59static int fortify_write_overflows;
60
61static const char array_of_10[] = "this is 10";
62static const char *ptr_of_11 = "this is 11!";
63static char array_unknown[] = "compiler thinks I might change";
64
65void fortify_add_kunit_error(int write)
66{
67 struct kunit_resource *resource;
68 struct kunit *current_test;
69
70 current_test = kunit_get_current_test();
71 if (!current_test)
72 return;
73
74 resource = kunit_find_named_resource(current_test,
75 write ? "fortify_write_overflows"
76 : "fortify_read_overflows");
77 if (!resource)
78 return;
79
80 (*(int *)resource->data)++;
81 kunit_put_resource(resource);
82}
83
84static void fortify_test_known_sizes(struct kunit *test)
85{
86 KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
87 KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10);
88 KUNIT_EXPECT_EQ(test, __compiletime_strlen(ptr_of_11), 11);
89
90 KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_unknown), SIZE_MAX);
91 /* Externally defined and dynamically sized string pointer: */
92 KUNIT_EXPECT_EQ(test, __compiletime_strlen(test->name), SIZE_MAX);
93}
94
95/* This is volatile so the optimizer can't perform DCE below. */
96static volatile int pick;
97
98/* Not inline to keep optimizer from figuring out which string we want. */
99static noinline size_t want_minus_one(int pick)
100{
101 const char *str;
102
103 switch (pick) {
104 case 1:
105 str = "4444";
106 break;
107 case 2:
108 str = "333";
109 break;
110 default:
111 str = "1";
112 break;
113 }
114 return __compiletime_strlen(str);
115}
116
117static void fortify_test_control_flow_split(struct kunit *test)
118{
119 KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX);
120}
121
122#define KUNIT_EXPECT_BOS(test, p, expected, name) \
123 KUNIT_EXPECT_EQ_MSG(test, __builtin_object_size(p, 1), \
124 expected, \
125 "__alloc_size() not working with __bos on " name "\n")
126
127#if !__has_builtin(__builtin_dynamic_object_size)
128#define KUNIT_EXPECT_BDOS(test, p, expected, name) \
129 /* Silence "unused variable 'expected'" warning. */ \
130 KUNIT_EXPECT_EQ(test, expected, expected)
131#else
132#define KUNIT_EXPECT_BDOS(test, p, expected, name) \
133 KUNIT_EXPECT_EQ_MSG(test, __builtin_dynamic_object_size(p, 1), \
134 expected, \
135 "__alloc_size() not working with __bdos on " name "\n")
136#endif
137
138/* If the execpted size is a constant value, __bos can see it. */
139#define check_const(_expected, alloc, free) do { \
140 size_t expected = (_expected); \
141 void *p = alloc; \
142 KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \
143 KUNIT_EXPECT_BOS(test, p, expected, #alloc); \
144 KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \
145 free; \
146} while (0)
147
148/* If the execpted size is NOT a constant value, __bos CANNOT see it. */
149#define check_dynamic(_expected, alloc, free) do { \
150 size_t expected = (_expected); \
151 void *p = alloc; \
152 KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \
153 KUNIT_EXPECT_BOS(test, p, SIZE_MAX, #alloc); \
154 KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \
155 free; \
156} while (0)
157
158/* Assortment of constant-value kinda-edge cases. */
159#define CONST_TEST_BODY(TEST_alloc) do { \
160 /* Special-case vmalloc()-family to skip 0-sized allocs. */ \
161 if (strcmp(#TEST_alloc, "TEST_vmalloc") != 0) \
162 TEST_alloc(check_const, 0, 0); \
163 TEST_alloc(check_const, 1, 1); \
164 TEST_alloc(check_const, 128, 128); \
165 TEST_alloc(check_const, 1023, 1023); \
166 TEST_alloc(check_const, 1025, 1025); \
167 TEST_alloc(check_const, 4096, 4096); \
168 TEST_alloc(check_const, 4097, 4097); \
169} while (0)
170
171static volatile size_t zero_size;
172static volatile size_t unknown_size = 50;
173
174#if !__has_builtin(__builtin_dynamic_object_size)
175#define DYNAMIC_TEST_BODY(TEST_alloc) \
176 kunit_skip(test, "Compiler is missing __builtin_dynamic_object_size() support\n")
177#else
178#define DYNAMIC_TEST_BODY(TEST_alloc) do { \
179 size_t size = unknown_size; \
180 \
181 /* \
182 * Expected size is "size" in each test, before it is then \
183 * internally incremented in each test. Requires we disable \
184 * -Wunsequenced. \
185 */ \
186 TEST_alloc(check_dynamic, size, size++); \
187 /* Make sure incrementing actually happened. */ \
188 KUNIT_EXPECT_NE(test, size, unknown_size); \
189} while (0)
190#endif
191
192#define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator) \
193static void fortify_test_alloc_size_##allocator##_const(struct kunit *test) \
194{ \
195 CONST_TEST_BODY(TEST_##allocator); \
196} \
197static void fortify_test_alloc_size_##allocator##_dynamic(struct kunit *test) \
198{ \
199 DYNAMIC_TEST_BODY(TEST_##allocator); \
200}
201
202#define TEST_kmalloc(checker, expected_size, alloc_size) do { \
203 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
204 void *orig; \
205 size_t len; \
206 \
207 checker(expected_size, kmalloc(alloc_size, gfp), \
208 kfree(p)); \
209 checker(expected_size, \
210 kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
211 kfree(p)); \
212 checker(expected_size, kzalloc(alloc_size, gfp), \
213 kfree(p)); \
214 checker(expected_size, \
215 kzalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
216 kfree(p)); \
217 checker(expected_size, kcalloc(1, alloc_size, gfp), \
218 kfree(p)); \
219 checker(expected_size, kcalloc(alloc_size, 1, gfp), \
220 kfree(p)); \
221 checker(expected_size, \
222 kcalloc_node(1, alloc_size, gfp, NUMA_NO_NODE), \
223 kfree(p)); \
224 checker(expected_size, \
225 kcalloc_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
226 kfree(p)); \
227 checker(expected_size, kmalloc_array(1, alloc_size, gfp), \
228 kfree(p)); \
229 checker(expected_size, kmalloc_array(alloc_size, 1, gfp), \
230 kfree(p)); \
231 checker(expected_size, \
232 kmalloc_array_node(1, alloc_size, gfp, NUMA_NO_NODE), \
233 kfree(p)); \
234 checker(expected_size, \
235 kmalloc_array_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
236 kfree(p)); \
237 \
238 orig = kmalloc(alloc_size, gfp); \
239 KUNIT_EXPECT_TRUE(test, orig != NULL); \
240 checker((expected_size) * 2, \
241 krealloc(orig, (alloc_size) * 2, gfp), \
242 kfree(p)); \
243 orig = kmalloc(alloc_size, gfp); \
244 KUNIT_EXPECT_TRUE(test, orig != NULL); \
245 checker((expected_size) * 2, \
246 krealloc_array(orig, 1, (alloc_size) * 2, gfp), \
247 kfree(p)); \
248 orig = kmalloc(alloc_size, gfp); \
249 KUNIT_EXPECT_TRUE(test, orig != NULL); \
250 checker((expected_size) * 2, \
251 krealloc_array(orig, (alloc_size) * 2, 1, gfp), \
252 kfree(p)); \
253 \
254 len = 11; \
255 /* Using memdup() with fixed size, so force unknown length. */ \
256 if (!__builtin_constant_p(expected_size)) \
257 len += zero_size; \
258 checker(len, kmemdup("hello there", len, gfp), kfree(p)); \
259} while (0)
260DEFINE_ALLOC_SIZE_TEST_PAIR(kmalloc)
261
262/* Sizes are in pages, not bytes. */
263#define TEST_vmalloc(checker, expected_pages, alloc_pages) do { \
264 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
265 checker((expected_pages) * PAGE_SIZE, \
266 vmalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
267 checker((expected_pages) * PAGE_SIZE, \
268 vzalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
269 checker((expected_pages) * PAGE_SIZE, \
270 __vmalloc((alloc_pages) * PAGE_SIZE, gfp), vfree(p)); \
271} while (0)
272DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
273
274/* Sizes are in pages (and open-coded for side-effects), not bytes. */
275#define TEST_kvmalloc(checker, expected_pages, alloc_pages) do { \
276 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
277 size_t prev_size; \
278 void *orig; \
279 \
280 checker((expected_pages) * PAGE_SIZE, \
281 kvmalloc((alloc_pages) * PAGE_SIZE, gfp), \
282 kvfree(p)); \
283 checker((expected_pages) * PAGE_SIZE, \
284 kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
285 kvfree(p)); \
286 checker((expected_pages) * PAGE_SIZE, \
287 kvzalloc((alloc_pages) * PAGE_SIZE, gfp), \
288 kvfree(p)); \
289 checker((expected_pages) * PAGE_SIZE, \
290 kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
291 kvfree(p)); \
292 checker((expected_pages) * PAGE_SIZE, \
293 kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp), \
294 kvfree(p)); \
295 checker((expected_pages) * PAGE_SIZE, \
296 kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp), \
297 kvfree(p)); \
298 checker((expected_pages) * PAGE_SIZE, \
299 kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp), \
300 kvfree(p)); \
301 checker((expected_pages) * PAGE_SIZE, \
302 kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp), \
303 kvfree(p)); \
304 \
305 prev_size = (expected_pages) * PAGE_SIZE; \
306 orig = kvmalloc(prev_size, gfp); \
307 KUNIT_EXPECT_TRUE(test, orig != NULL); \
308 checker(((expected_pages) * PAGE_SIZE) * 2, \
309 kvrealloc(orig, prev_size, \
310 ((alloc_pages) * PAGE_SIZE) * 2, gfp), \
311 kvfree(p)); \
312} while (0)
313DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
314
315#define TEST_devm_kmalloc(checker, expected_size, alloc_size) do { \
316 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
317 const char dev_name[] = "fortify-test"; \
318 struct device *dev; \
319 void *orig; \
320 size_t len; \
321 \
322 /* Create dummy device for devm_kmalloc()-family tests. */ \
323 dev = kunit_device_register(test, dev_name); \
324 KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), \
325 "Cannot register test device\n"); \
326 \
327 checker(expected_size, devm_kmalloc(dev, alloc_size, gfp), \
328 devm_kfree(dev, p)); \
329 checker(expected_size, devm_kzalloc(dev, alloc_size, gfp), \
330 devm_kfree(dev, p)); \
331 checker(expected_size, \
332 devm_kmalloc_array(dev, 1, alloc_size, gfp), \
333 devm_kfree(dev, p)); \
334 checker(expected_size, \
335 devm_kmalloc_array(dev, alloc_size, 1, gfp), \
336 devm_kfree(dev, p)); \
337 checker(expected_size, \
338 devm_kcalloc(dev, 1, alloc_size, gfp), \
339 devm_kfree(dev, p)); \
340 checker(expected_size, \
341 devm_kcalloc(dev, alloc_size, 1, gfp), \
342 devm_kfree(dev, p)); \
343 \
344 orig = devm_kmalloc(dev, alloc_size, gfp); \
345 KUNIT_EXPECT_TRUE(test, orig != NULL); \
346 checker((expected_size) * 2, \
347 devm_krealloc(dev, orig, (alloc_size) * 2, gfp), \
348 devm_kfree(dev, p)); \
349 \
350 len = 4; \
351 /* Using memdup() with fixed size, so force unknown length. */ \
352 if (!__builtin_constant_p(expected_size)) \
353 len += zero_size; \
354 checker(len, devm_kmemdup(dev, "Ohai", len, gfp), \
355 devm_kfree(dev, p)); \
356 \
357 kunit_device_unregister(test, dev); \
358} while (0)
359DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
360
361static const char * const test_strs[] = {
362 "",
363 "Hello there",
364 "A longer string, just for variety",
365};
366
367#define TEST_realloc(checker) do { \
368 gfp_t gfp = GFP_KERNEL; \
369 size_t len; \
370 int i; \
371 \
372 for (i = 0; i < ARRAY_SIZE(test_strs); i++) { \
373 len = strlen(test_strs[i]); \
374 KUNIT_EXPECT_EQ(test, __builtin_constant_p(len), 0); \
375 checker(len, kmemdup_array(test_strs[i], 1, len, gfp), \
376 kfree(p)); \
377 checker(len, kmemdup(test_strs[i], len, gfp), \
378 kfree(p)); \
379 } \
380} while (0)
381static void fortify_test_realloc_size(struct kunit *test)
382{
383 TEST_realloc(check_dynamic);
384}
385
386/*
387 * We can't have an array at the end of a structure or else
388 * builds without -fstrict-flex-arrays=3 will report them as
389 * being an unknown length. Additionally, add bytes before
390 * and after the string to catch over/underflows if tests
391 * fail.
392 */
393struct fortify_padding {
394 unsigned long bytes_before;
395 char buf[32];
396 unsigned long bytes_after;
397};
398/* Force compiler into not being able to resolve size at compile-time. */
399static volatile int unconst;
400
401static void fortify_test_strlen(struct kunit *test)
402{
403 struct fortify_padding pad = { };
404 int i, end = sizeof(pad.buf) - 1;
405
406 /* Fill 31 bytes with valid characters. */
407 for (i = 0; i < sizeof(pad.buf) - 1; i++)
408 pad.buf[i] = i + '0';
409 /* Trailing bytes are still %NUL. */
410 KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
411 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
412
413 /* String is terminated, so strlen() is valid. */
414 KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
415 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
416
417 /* Make string unterminated, and recount. */
418 pad.buf[end] = 'A';
419 end = sizeof(pad.buf);
420 KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
421 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
422}
423
424static void fortify_test_strnlen(struct kunit *test)
425{
426 struct fortify_padding pad = { };
427 int i, end = sizeof(pad.buf) - 1;
428
429 /* Fill 31 bytes with valid characters. */
430 for (i = 0; i < sizeof(pad.buf) - 1; i++)
431 pad.buf[i] = i + '0';
432 /* Trailing bytes are still %NUL. */
433 KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
434 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
435
436 /* String is terminated, so strnlen() is valid. */
437 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf)), end);
438 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
439 /* A truncated strnlen() will be safe, too. */
440 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf) / 2),
441 sizeof(pad.buf) / 2);
442 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
443
444 /* Make string unterminated, and recount. */
445 pad.buf[end] = 'A';
446 end = sizeof(pad.buf);
447 /* Reading beyond with strncpy() will fail. */
448 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 1), end);
449 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
450 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 2), end);
451 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
452
453 /* Early-truncated is safe still, though. */
454 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
455 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
456
457 end = sizeof(pad.buf) / 2;
458 KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
459 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
460}
461
462static void fortify_test_strcpy(struct kunit *test)
463{
464 struct fortify_padding pad = { };
465 char src[sizeof(pad.buf) + 1] = { };
466 int i;
467
468 /* Fill 31 bytes with valid characters. */
469 for (i = 0; i < sizeof(src) - 2; i++)
470 src[i] = i + '0';
471
472 /* Destination is %NUL-filled to start with. */
473 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
474 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
475 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
476 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
477 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
478
479 /* Legitimate strcpy() 1 less than of max size. */
480 KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
481 == pad.buf);
482 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
483 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
484 /* Only last byte should be %NUL */
485 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
486 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
487 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
488
489 src[sizeof(src) - 2] = 'A';
490 /* But now we trip the overflow checking. */
491 KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
492 == pad.buf);
493 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
494 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
495 /* Trailing %NUL -- thanks to FORTIFY. */
496 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
497 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
498 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
499 /* And we will not have gone beyond. */
500 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
501
502 src[sizeof(src) - 1] = 'A';
503 /* And for sure now, two bytes past. */
504 KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
505 == pad.buf);
506 /*
507 * Which trips both the strlen() on the unterminated src,
508 * and the resulting copy attempt.
509 */
510 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
511 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
512 /* Trailing %NUL -- thanks to FORTIFY. */
513 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
514 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
515 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
516 /* And we will not have gone beyond. */
517 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
518}
519
520static void fortify_test_strncpy(struct kunit *test)
521{
522 struct fortify_padding pad = { };
523 char src[] = "Copy me fully into a small buffer and I will overflow!";
524
525 /* Destination is %NUL-filled to start with. */
526 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
527 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
528 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
529 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
530 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
531
532 /* Legitimate strncpy() 1 less than of max size. */
533 KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
534 sizeof(pad.buf) + unconst - 1)
535 == pad.buf);
536 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
537 /* Only last byte should be %NUL */
538 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
539 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
540 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
541
542 /* Legitimate (though unterminated) max-size strncpy. */
543 KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
544 sizeof(pad.buf) + unconst)
545 == pad.buf);
546 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
547 /* No trailing %NUL -- thanks strncpy API. */
548 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
549 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
550 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
551 /* But we will not have gone beyond. */
552 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
553
554 /* Now verify that FORTIFY is working... */
555 KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
556 sizeof(pad.buf) + unconst + 1)
557 == pad.buf);
558 /* Should catch the overflow. */
559 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
560 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
561 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
562 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
563 /* And we will not have gone beyond. */
564 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
565
566 /* And further... */
567 KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
568 sizeof(pad.buf) + unconst + 2)
569 == pad.buf);
570 /* Should catch the overflow. */
571 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
572 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
573 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
574 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
575 /* And we will not have gone beyond. */
576 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
577}
578
579static void fortify_test_strscpy(struct kunit *test)
580{
581 struct fortify_padding pad = { };
582 char src[] = "Copy me fully into a small buffer and I will overflow!";
583
584 /* Destination is %NUL-filled to start with. */
585 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
586 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
587 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
588 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
589 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
590
591 /* Legitimate strscpy() 1 less than of max size. */
592 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
593 sizeof(pad.buf) + unconst - 1),
594 -E2BIG);
595 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
596 /* Keeping space for %NUL, last two bytes should be %NUL */
597 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
598 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
599 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
600
601 /* Legitimate max-size strscpy. */
602 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
603 sizeof(pad.buf) + unconst),
604 -E2BIG);
605 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
606 /* A trailing %NUL will exist. */
607 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
608 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
609 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
610
611 /* Now verify that FORTIFY is working... */
612 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
613 sizeof(pad.buf) + unconst + 1),
614 -E2BIG);
615 /* Should catch the overflow. */
616 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
617 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
618 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
619 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
620 /* And we will not have gone beyond. */
621 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
622
623 /* And much further... */
624 KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
625 sizeof(src) * 2 + unconst),
626 -E2BIG);
627 /* Should catch the overflow. */
628 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
629 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
630 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
631 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
632 /* And we will not have gone beyond. */
633 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
634}
635
636static void fortify_test_strcat(struct kunit *test)
637{
638 struct fortify_padding pad = { };
639 char src[sizeof(pad.buf) / 2] = { };
640 char one[] = "A";
641 char two[] = "BC";
642 int i;
643
644 /* Fill 15 bytes with valid characters. */
645 for (i = 0; i < sizeof(src) - 1; i++)
646 src[i] = i + 'A';
647
648 /* Destination is %NUL-filled to start with. */
649 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
650 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
651 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
652 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
653 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
654
655 /* Legitimate strcat() using less than half max size. */
656 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
657 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
658 /* Legitimate strcat() now 2 bytes shy of end. */
659 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
660 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
661 /* Last two bytes should be %NUL */
662 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
663 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
664 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
665
666 /* Add one more character to the end. */
667 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
668 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
669 /* Last byte should be %NUL */
670 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
671 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
672 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
673
674 /* And this one char will overflow. */
675 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
676 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
677 /* Last byte should be %NUL thanks to FORTIFY. */
678 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
679 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
680 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
681 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
682
683 /* And adding two will overflow more. */
684 KUNIT_ASSERT_TRUE(test, strcat(pad.buf, two) == pad.buf);
685 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
686 /* Last byte should be %NUL thanks to FORTIFY. */
687 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
688 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
689 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
690 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
691}
692
693static void fortify_test_strncat(struct kunit *test)
694{
695 struct fortify_padding pad = { };
696 char src[sizeof(pad.buf)] = { };
697 int i, partial;
698
699 /* Fill 31 bytes with valid characters. */
700 partial = sizeof(src) / 2 - 1;
701 for (i = 0; i < partial; i++)
702 src[i] = i + 'A';
703
704 /* Destination is %NUL-filled to start with. */
705 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
706 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
707 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
708 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
709 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
710
711 /* Legitimate strncat() using less than half max size. */
712 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
713 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
714 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
715 /* Legitimate strncat() now 2 bytes shy of end. */
716 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
717 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
718 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
719 /* Last two bytes should be %NUL */
720 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
721 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
722 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
723
724 /* Add one more character to the end. */
725 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
726 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
727 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
728 /* Last byte should be %NUL */
729 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
730 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
731 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
732
733 /* And this one char will overflow. */
734 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
735 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
736 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
737 /* Last byte should be %NUL thanks to FORTIFY. */
738 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
739 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
740 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
741 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
742
743 /* And adding two will overflow more. */
744 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 2) == pad.buf);
745 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
746 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
747 /* Last byte should be %NUL thanks to FORTIFY. */
748 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
749 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
750 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
751 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
752
753 /* Force an unterminated destination, and overflow. */
754 pad.buf[sizeof(pad.buf) - 1] = 'A';
755 KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
756 /* This will have tripped both strlen() and strcat(). */
757 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
758 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
759 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
760 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
761 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
762 /* But we should not go beyond the end. */
763 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
764}
765
766static void fortify_test_strlcat(struct kunit *test)
767{
768 struct fortify_padding pad = { };
769 char src[sizeof(pad.buf)] = { };
770 int i, partial;
771 int len = sizeof(pad.buf) + unconst;
772
773 /* Fill 15 bytes with valid characters. */
774 partial = sizeof(src) / 2 - 1;
775 for (i = 0; i < partial; i++)
776 src[i] = i + 'A';
777
778 /* Destination is %NUL-filled to start with. */
779 KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
780 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
781 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
782 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
783 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
784
785 /* Legitimate strlcat() using less than half max size. */
786 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial);
787 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
788 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
789 /* Legitimate strlcat() now 2 bytes shy of end. */
790 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial * 2);
791 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
792 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
793 /* Last two bytes should be %NUL */
794 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
795 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
796 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
797
798 /* Add one more character to the end. */
799 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "Q", len), partial * 2 + 1);
800 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
801 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
802 /* Last byte should be %NUL */
803 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
804 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
805 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
806
807 /* And this one char will overflow. */
808 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "V", len * 2), len);
809 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
810 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
811 /* Last byte should be %NUL thanks to FORTIFY. */
812 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
813 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
814 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
815 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
816
817 /* And adding two will overflow more. */
818 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "QQ", len * 2), len + 1);
819 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
820 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
821 /* Last byte should be %NUL thanks to FORTIFY. */
822 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
823 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
824 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
825 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
826
827 /* Force an unterminated destination, and overflow. */
828 pad.buf[sizeof(pad.buf) - 1] = 'A';
829 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "TT", len * 2), len + 2);
830 /* This will have tripped both strlen() and strlcat(). */
831 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
832 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
833 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
834 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
835 KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
836 /* But we should not go beyond the end. */
837 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
838
839 /* Force an unterminated source, and overflow. */
840 memset(src, 'B', sizeof(src));
841 pad.buf[sizeof(pad.buf) - 1] = '\0';
842 KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len * 3), len - 1 + sizeof(src));
843 /* This will have tripped both strlen() and strlcat(). */
844 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
845 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
846 KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
847 /* But we should not go beyond the end. */
848 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
849}
850
851/* Check for 0-sized arrays... */
852struct fortify_zero_sized {
853 unsigned long bytes_before;
854 char buf[0];
855 unsigned long bytes_after;
856};
857
858#define __fortify_test(memfunc) \
859static void fortify_test_##memfunc(struct kunit *test) \
860{ \
861 struct fortify_zero_sized zero = { }; \
862 struct fortify_padding pad = { }; \
863 char srcA[sizeof(pad.buf) + 2]; \
864 char srcB[sizeof(pad.buf) + 2]; \
865 size_t len = sizeof(pad.buf) + unconst; \
866 \
867 memset(srcA, 'A', sizeof(srcA)); \
868 KUNIT_ASSERT_EQ(test, srcA[0], 'A'); \
869 memset(srcB, 'B', sizeof(srcB)); \
870 KUNIT_ASSERT_EQ(test, srcB[0], 'B'); \
871 \
872 memfunc(pad.buf, srcA, 0 + unconst); \
873 KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
874 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
875 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
876 memfunc(pad.buf + 1, srcB, 1 + unconst); \
877 KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
878 KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
879 KUNIT_EXPECT_EQ(test, pad.buf[2], '\0'); \
880 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
881 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
882 memfunc(pad.buf, srcA, 1 + unconst); \
883 KUNIT_EXPECT_EQ(test, pad.buf[0], 'A'); \
884 KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
885 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
886 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
887 memfunc(pad.buf, srcA, len - 1); \
888 KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \
889 KUNIT_EXPECT_EQ(test, pad.buf[len - 1], '\0'); \
890 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
891 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
892 memfunc(pad.buf, srcA, len); \
893 KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \
894 KUNIT_EXPECT_EQ(test, pad.buf[len - 1], 'A'); \
895 KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); \
896 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
897 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
898 memfunc(pad.buf, srcA, len + 1); \
899 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
900 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \
901 memfunc(pad.buf + 1, srcB, len); \
902 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
903 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); \
904 \
905 /* Reset error counter. */ \
906 fortify_write_overflows = 0; \
907 /* Copy nothing into nothing: no errors. */ \
908 memfunc(zero.buf, srcB, 0 + unconst); \
909 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
910 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
911 memfunc(zero.buf, srcB, 1 + unconst); \
912 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
913 KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \
914}
915__fortify_test(memcpy)
916__fortify_test(memmove)
917
918static void fortify_test_memscan(struct kunit *test)
919{
920 char haystack[] = "Where oh where is my memory range?";
921 char *mem = haystack + strlen("Where oh where is ");
922 char needle = 'm';
923 size_t len = sizeof(haystack) + unconst;
924
925 KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len),
926 mem);
927 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
928 /* Catch too-large range. */
929 KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len + 1),
930 NULL);
931 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
932 KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len * 2),
933 NULL);
934 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
935}
936
937static void fortify_test_memchr(struct kunit *test)
938{
939 char haystack[] = "Where oh where is my memory range?";
940 char *mem = haystack + strlen("Where oh where is ");
941 char needle = 'm';
942 size_t len = sizeof(haystack) + unconst;
943
944 KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len),
945 mem);
946 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
947 /* Catch too-large range. */
948 KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len + 1),
949 NULL);
950 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
951 KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len * 2),
952 NULL);
953 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
954}
955
956static void fortify_test_memchr_inv(struct kunit *test)
957{
958 char haystack[] = "Where oh where is my memory range?";
959 char *mem = haystack + 1;
960 char needle = 'W';
961 size_t len = sizeof(haystack) + unconst;
962
963 /* Normal search is okay. */
964 KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len),
965 mem);
966 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
967 /* Catch too-large range. */
968 KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len + 1),
969 NULL);
970 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
971 KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len * 2),
972 NULL);
973 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
974}
975
976static void fortify_test_memcmp(struct kunit *test)
977{
978 char one[] = "My mind is going ...";
979 char two[] = "My mind is going ... I can feel it.";
980 size_t one_len = sizeof(one) + unconst - 1;
981 size_t two_len = sizeof(two) + unconst - 1;
982
983 /* We match the first string (ignoring the %NUL). */
984 KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len), 0);
985 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
986 /* Still in bounds, but no longer matching. */
987 KUNIT_ASSERT_LT(test, memcmp(one, two, one_len + 1), 0);
988 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
989
990 /* Catch too-large ranges. */
991 KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 2), INT_MIN);
992 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
993
994 KUNIT_ASSERT_EQ(test, memcmp(two, one, two_len + 2), INT_MIN);
995 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
996}
997
998static void fortify_test_kmemdup(struct kunit *test)
999{
1000 char src[] = "I got Doom running on it!";
1001 char *copy;
1002 size_t len = sizeof(src) + unconst;
1003
1004 /* Copy is within bounds. */
1005 copy = kmemdup(src, len, GFP_KERNEL);
1006 KUNIT_EXPECT_NOT_NULL(test, copy);
1007 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
1008 kfree(copy);
1009
1010 /* Without %NUL. */
1011 copy = kmemdup(src, len - 1, GFP_KERNEL);
1012 KUNIT_EXPECT_NOT_NULL(test, copy);
1013 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
1014 kfree(copy);
1015
1016 /* Tiny bounds. */
1017 copy = kmemdup(src, 1, GFP_KERNEL);
1018 KUNIT_EXPECT_NOT_NULL(test, copy);
1019 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
1020 kfree(copy);
1021
1022 /* Out of bounds by 1 byte. */
1023 copy = kmemdup(src, len + 1, GFP_KERNEL);
1024 KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
1025 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
1026 kfree(copy);
1027
1028 /* Way out of bounds. */
1029 copy = kmemdup(src, len * 2, GFP_KERNEL);
1030 KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
1031 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
1032 kfree(copy);
1033
1034 /* Starting offset causing out of bounds. */
1035 copy = kmemdup(src + 1, len, GFP_KERNEL);
1036 KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
1037 KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
1038 kfree(copy);
1039}
1040
1041static int fortify_test_init(struct kunit *test)
1042{
1043 if (!IS_ENABLED(CONFIG_FORTIFY_SOURCE))
1044 kunit_skip(test, "Not built with CONFIG_FORTIFY_SOURCE=y");
1045
1046 fortify_read_overflows = 0;
1047 kunit_add_named_resource(test, NULL, NULL, &read_resource,
1048 "fortify_read_overflows",
1049 &fortify_read_overflows);
1050 fortify_write_overflows = 0;
1051 kunit_add_named_resource(test, NULL, NULL, &write_resource,
1052 "fortify_write_overflows",
1053 &fortify_write_overflows);
1054 return 0;
1055}
1056
1057static struct kunit_case fortify_test_cases[] = {
1058 KUNIT_CASE(fortify_test_known_sizes),
1059 KUNIT_CASE(fortify_test_control_flow_split),
1060 KUNIT_CASE(fortify_test_alloc_size_kmalloc_const),
1061 KUNIT_CASE(fortify_test_alloc_size_kmalloc_dynamic),
1062 KUNIT_CASE(fortify_test_alloc_size_vmalloc_const),
1063 KUNIT_CASE(fortify_test_alloc_size_vmalloc_dynamic),
1064 KUNIT_CASE(fortify_test_alloc_size_kvmalloc_const),
1065 KUNIT_CASE(fortify_test_alloc_size_kvmalloc_dynamic),
1066 KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_const),
1067 KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_dynamic),
1068 KUNIT_CASE(fortify_test_realloc_size),
1069 KUNIT_CASE(fortify_test_strlen),
1070 KUNIT_CASE(fortify_test_strnlen),
1071 KUNIT_CASE(fortify_test_strcpy),
1072 KUNIT_CASE(fortify_test_strncpy),
1073 KUNIT_CASE(fortify_test_strscpy),
1074 KUNIT_CASE(fortify_test_strcat),
1075 KUNIT_CASE(fortify_test_strncat),
1076 KUNIT_CASE(fortify_test_strlcat),
1077 /* skip memset: performs bounds checking on whole structs */
1078 KUNIT_CASE(fortify_test_memcpy),
1079 KUNIT_CASE(fortify_test_memmove),
1080 KUNIT_CASE(fortify_test_memscan),
1081 KUNIT_CASE(fortify_test_memchr),
1082 KUNIT_CASE(fortify_test_memchr_inv),
1083 KUNIT_CASE(fortify_test_memcmp),
1084 KUNIT_CASE(fortify_test_kmemdup),
1085 {}
1086};
1087
1088static struct kunit_suite fortify_test_suite = {
1089 .name = "fortify",
1090 .init = fortify_test_init,
1091 .test_cases = fortify_test_cases,
1092};
1093
1094kunit_test_suite(fortify_test_suite);
1095
1096MODULE_DESCRIPTION("Runtime test cases for CONFIG_FORTIFY_SOURCE");
1097MODULE_LICENSE("GPL");