Commit | Line | Data |
---|---|---|
1fe84fd4 ME |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * KCSAN test with various race scenarious to test runtime behaviour. Since the | |
4 | * interface with which KCSAN's reports are obtained is via the console, this is | |
5 | * the output we should verify. For each test case checks the presence (or | |
6 | * absence) of generated reports. Relies on 'console' tracepoint to capture | |
7 | * reports as they appear in the kernel log. | |
8 | * | |
9 | * Makes use of KUnit for test organization, and the Torture framework for test | |
10 | * thread control. | |
11 | * | |
12 | * Copyright (C) 2020, Google LLC. | |
13 | * Author: Marco Elver <elver@google.com> | |
14 | */ | |
15 | ||
f6a14914 ME |
16 | #define pr_fmt(fmt) "kcsan_test: " fmt |
17 | ||
1fe84fd4 ME |
18 | #include <kunit/test.h> |
19 | #include <linux/jiffies.h> | |
20 | #include <linux/kcsan-checks.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/sched.h> | |
23 | #include <linux/seqlock.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <linux/string.h> | |
26 | #include <linux/timer.h> | |
27 | #include <linux/torture.h> | |
28 | #include <linux/tracepoint.h> | |
29 | #include <linux/types.h> | |
30 | #include <trace/events/printk.h> | |
31 | ||
bec4a247 ME |
32 | #ifdef CONFIG_CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE |
33 | #define __KCSAN_ACCESS_RW(alt) (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE) | |
34 | #else | |
35 | #define __KCSAN_ACCESS_RW(alt) (alt) | |
36 | #endif | |
37 | ||
1fe84fd4 ME |
38 | /* Points to current test-case memory access "kernels". */ |
39 | static void (*access_kernels[2])(void); | |
40 | ||
41 | static struct task_struct **threads; /* Lists of threads. */ | |
42 | static unsigned long end_time; /* End time of test. */ | |
43 | ||
44 | /* Report as observed from console. */ | |
45 | static struct { | |
46 | spinlock_t lock; | |
47 | int nlines; | |
48 | char lines[3][512]; | |
49 | } observed = { | |
50 | .lock = __SPIN_LOCK_UNLOCKED(observed.lock), | |
51 | }; | |
52 | ||
53 | /* Setup test checking loop. */ | |
2888557f | 54 | static __no_kcsan inline void |
1fe84fd4 ME |
55 | begin_test_checks(void (*func1)(void), void (*func2)(void)) |
56 | { | |
57 | kcsan_disable_current(); | |
58 | ||
59 | /* | |
60 | * Require at least as long as KCSAN_REPORT_ONCE_IN_MS, to ensure at | |
61 | * least one race is reported. | |
62 | */ | |
63 | end_time = jiffies + msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS + 500); | |
64 | ||
65 | /* Signal start; release potential initialization of shared data. */ | |
66 | smp_store_release(&access_kernels[0], func1); | |
67 | smp_store_release(&access_kernels[1], func2); | |
68 | } | |
69 | ||
70 | /* End test checking loop. */ | |
2888557f | 71 | static __no_kcsan inline bool |
1fe84fd4 ME |
72 | end_test_checks(bool stop) |
73 | { | |
74 | if (!stop && time_before(jiffies, end_time)) { | |
75 | /* Continue checking */ | |
76 | might_sleep(); | |
77 | return false; | |
78 | } | |
79 | ||
80 | kcsan_enable_current(); | |
81 | return true; | |
82 | } | |
83 | ||
84 | /* | |
85 | * Probe for console output: checks if a race was reported, and obtains observed | |
86 | * lines of interest. | |
87 | */ | |
88 | __no_kcsan | |
89 | static void probe_console(void *ignore, const char *buf, size_t len) | |
90 | { | |
91 | unsigned long flags; | |
92 | int nlines; | |
93 | ||
94 | /* | |
95 | * Note that KCSAN reports under a global lock, so we do not risk the | |
96 | * possibility of having multiple reports interleaved. If that were the | |
97 | * case, we'd expect tests to fail. | |
98 | */ | |
99 | ||
100 | spin_lock_irqsave(&observed.lock, flags); | |
101 | nlines = observed.nlines; | |
102 | ||
103 | if (strnstr(buf, "BUG: KCSAN: ", len) && strnstr(buf, "test_", len)) { | |
104 | /* | |
105 | * KCSAN report and related to the test. | |
106 | * | |
107 | * The provided @buf is not NUL-terminated; copy no more than | |
108 | * @len bytes and let strscpy() add the missing NUL-terminator. | |
109 | */ | |
110 | strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0]))); | |
111 | nlines = 1; | |
112 | } else if ((nlines == 1 || nlines == 2) && strnstr(buf, "bytes by", len)) { | |
113 | strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0]))); | |
114 | ||
115 | if (strnstr(buf, "race at unknown origin", len)) { | |
116 | if (WARN_ON(nlines != 2)) | |
117 | goto out; | |
118 | ||
119 | /* No second line of interest. */ | |
120 | strcpy(observed.lines[nlines++], "<none>"); | |
121 | } | |
122 | } | |
123 | ||
124 | out: | |
125 | WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */ | |
126 | spin_unlock_irqrestore(&observed.lock, flags); | |
127 | } | |
128 | ||
129 | /* Check if a report related to the test exists. */ | |
130 | __no_kcsan | |
131 | static bool report_available(void) | |
132 | { | |
133 | return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines); | |
134 | } | |
135 | ||
136 | /* Report information we expect in a report. */ | |
137 | struct expect_report { | |
138 | /* Access information of both accesses. */ | |
139 | struct { | |
140 | void *fn; /* Function pointer to expected function of top frame. */ | |
141 | void *addr; /* Address of access; unchecked if NULL. */ | |
142 | size_t size; /* Size of access; unchecked if @addr is NULL. */ | |
143 | int type; /* Access type, see KCSAN_ACCESS definitions. */ | |
144 | } access[2]; | |
145 | }; | |
146 | ||
147 | /* Check observed report matches information in @r. */ | |
148 | __no_kcsan | |
149 | static bool report_matches(const struct expect_report *r) | |
150 | { | |
151 | const bool is_assert = (r->access[0].type | r->access[1].type) & KCSAN_ACCESS_ASSERT; | |
152 | bool ret = false; | |
153 | unsigned long flags; | |
154 | typeof(observed.lines) expect; | |
155 | const char *end; | |
156 | char *cur; | |
157 | int i; | |
158 | ||
159 | /* Doubled-checked locking. */ | |
160 | if (!report_available()) | |
161 | return false; | |
162 | ||
163 | /* Generate expected report contents. */ | |
164 | ||
165 | /* Title */ | |
166 | cur = expect[0]; | |
167 | end = &expect[0][sizeof(expect[0]) - 1]; | |
168 | cur += scnprintf(cur, end - cur, "BUG: KCSAN: %s in ", | |
169 | is_assert ? "assert: race" : "data-race"); | |
170 | if (r->access[1].fn) { | |
171 | char tmp[2][64]; | |
172 | int cmp; | |
173 | ||
174 | /* Expect lexographically sorted function names in title. */ | |
175 | scnprintf(tmp[0], sizeof(tmp[0]), "%pS", r->access[0].fn); | |
176 | scnprintf(tmp[1], sizeof(tmp[1]), "%pS", r->access[1].fn); | |
177 | cmp = strcmp(tmp[0], tmp[1]); | |
178 | cur += scnprintf(cur, end - cur, "%ps / %ps", | |
179 | cmp < 0 ? r->access[0].fn : r->access[1].fn, | |
180 | cmp < 0 ? r->access[1].fn : r->access[0].fn); | |
181 | } else { | |
182 | scnprintf(cur, end - cur, "%pS", r->access[0].fn); | |
183 | /* The exact offset won't match, remove it. */ | |
184 | cur = strchr(expect[0], '+'); | |
185 | if (cur) | |
186 | *cur = '\0'; | |
187 | } | |
188 | ||
189 | /* Access 1 */ | |
190 | cur = expect[1]; | |
191 | end = &expect[1][sizeof(expect[1]) - 1]; | |
192 | if (!r->access[1].fn) | |
193 | cur += scnprintf(cur, end - cur, "race at unknown origin, with "); | |
194 | ||
195 | /* Access 1 & 2 */ | |
196 | for (i = 0; i < 2; ++i) { | |
bec4a247 | 197 | const int ty = r->access[i].type; |
1fe84fd4 | 198 | const char *const access_type = |
bec4a247 ME |
199 | (ty & KCSAN_ACCESS_ASSERT) ? |
200 | ((ty & KCSAN_ACCESS_WRITE) ? | |
201 | "assert no accesses" : | |
202 | "assert no writes") : | |
203 | ((ty & KCSAN_ACCESS_WRITE) ? | |
204 | ((ty & KCSAN_ACCESS_COMPOUND) ? | |
205 | "read-write" : | |
206 | "write") : | |
207 | "read"); | |
1fe84fd4 | 208 | const char *const access_type_aux = |
bec4a247 ME |
209 | (ty & KCSAN_ACCESS_ATOMIC) ? |
210 | " (marked)" : | |
211 | ((ty & KCSAN_ACCESS_SCOPED) ? " (scoped)" : ""); | |
1fe84fd4 ME |
212 | |
213 | if (i == 1) { | |
214 | /* Access 2 */ | |
215 | cur = expect[2]; | |
216 | end = &expect[2][sizeof(expect[2]) - 1]; | |
217 | ||
218 | if (!r->access[1].fn) { | |
219 | /* Dummy string if no second access is available. */ | |
220 | strcpy(cur, "<none>"); | |
221 | break; | |
222 | } | |
223 | } | |
224 | ||
225 | cur += scnprintf(cur, end - cur, "%s%s to ", access_type, | |
226 | access_type_aux); | |
227 | ||
228 | if (r->access[i].addr) /* Address is optional. */ | |
229 | cur += scnprintf(cur, end - cur, "0x%px of %zu bytes", | |
230 | r->access[i].addr, r->access[i].size); | |
231 | } | |
232 | ||
233 | spin_lock_irqsave(&observed.lock, flags); | |
234 | if (!report_available()) | |
235 | goto out; /* A new report is being captured. */ | |
236 | ||
237 | /* Finally match expected output to what we actually observed. */ | |
238 | ret = strstr(observed.lines[0], expect[0]) && | |
239 | /* Access info may appear in any order. */ | |
240 | ((strstr(observed.lines[1], expect[1]) && | |
241 | strstr(observed.lines[2], expect[2])) || | |
242 | (strstr(observed.lines[1], expect[2]) && | |
243 | strstr(observed.lines[2], expect[1]))); | |
244 | out: | |
245 | spin_unlock_irqrestore(&observed.lock, flags); | |
246 | return ret; | |
247 | } | |
248 | ||
249 | /* ===== Test kernels ===== */ | |
250 | ||
251 | static long test_sink; | |
252 | static long test_var; | |
253 | /* @test_array should be large enough to fall into multiple watchpoint slots. */ | |
254 | static long test_array[3 * PAGE_SIZE / sizeof(long)]; | |
255 | static struct { | |
256 | long val[8]; | |
257 | } test_struct; | |
258 | static DEFINE_SEQLOCK(test_seqlock); | |
259 | ||
260 | /* | |
261 | * Helper to avoid compiler optimizing out reads, and to generate source values | |
262 | * for writes. | |
263 | */ | |
264 | __no_kcsan | |
265 | static noinline void sink_value(long v) { WRITE_ONCE(test_sink, v); } | |
266 | ||
267 | static noinline void test_kernel_read(void) { sink_value(test_var); } | |
268 | ||
269 | static noinline void test_kernel_write(void) | |
270 | { | |
271 | test_var = READ_ONCE_NOCHECK(test_sink) + 1; | |
272 | } | |
273 | ||
274 | static noinline void test_kernel_write_nochange(void) { test_var = 42; } | |
275 | ||
276 | /* Suffixed by value-change exception filter. */ | |
277 | static noinline void test_kernel_write_nochange_rcu(void) { test_var = 42; } | |
278 | ||
279 | static noinline void test_kernel_read_atomic(void) | |
280 | { | |
281 | sink_value(READ_ONCE(test_var)); | |
282 | } | |
283 | ||
284 | static noinline void test_kernel_write_atomic(void) | |
285 | { | |
286 | WRITE_ONCE(test_var, READ_ONCE_NOCHECK(test_sink) + 1); | |
287 | } | |
288 | ||
bec4a247 ME |
289 | static noinline void test_kernel_atomic_rmw(void) |
290 | { | |
291 | /* Use builtin, so we can set up the "bad" atomic/non-atomic scenario. */ | |
292 | __atomic_fetch_add(&test_var, 1, __ATOMIC_RELAXED); | |
293 | } | |
294 | ||
1fe84fd4 ME |
295 | __no_kcsan |
296 | static noinline void test_kernel_write_uninstrumented(void) { test_var++; } | |
297 | ||
298 | static noinline void test_kernel_data_race(void) { data_race(test_var++); } | |
299 | ||
300 | static noinline void test_kernel_assert_writer(void) | |
301 | { | |
302 | ASSERT_EXCLUSIVE_WRITER(test_var); | |
303 | } | |
304 | ||
305 | static noinline void test_kernel_assert_access(void) | |
306 | { | |
307 | ASSERT_EXCLUSIVE_ACCESS(test_var); | |
308 | } | |
309 | ||
310 | #define TEST_CHANGE_BITS 0xff00ff00 | |
311 | ||
312 | static noinline void test_kernel_change_bits(void) | |
313 | { | |
314 | if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { | |
315 | /* | |
316 | * Avoid race of unknown origin for this test, just pretend they | |
317 | * are atomic. | |
318 | */ | |
319 | kcsan_nestable_atomic_begin(); | |
320 | test_var ^= TEST_CHANGE_BITS; | |
321 | kcsan_nestable_atomic_end(); | |
322 | } else | |
323 | WRITE_ONCE(test_var, READ_ONCE(test_var) ^ TEST_CHANGE_BITS); | |
324 | } | |
325 | ||
326 | static noinline void test_kernel_assert_bits_change(void) | |
327 | { | |
328 | ASSERT_EXCLUSIVE_BITS(test_var, TEST_CHANGE_BITS); | |
329 | } | |
330 | ||
331 | static noinline void test_kernel_assert_bits_nochange(void) | |
332 | { | |
333 | ASSERT_EXCLUSIVE_BITS(test_var, ~TEST_CHANGE_BITS); | |
334 | } | |
335 | ||
336 | /* To check that scoped assertions do trigger anywhere in scope. */ | |
337 | static noinline void test_enter_scope(void) | |
338 | { | |
339 | int x = 0; | |
340 | ||
341 | /* Unrelated accesses to scoped assert. */ | |
342 | READ_ONCE(test_sink); | |
343 | kcsan_check_read(&x, sizeof(x)); | |
344 | } | |
345 | ||
346 | static noinline void test_kernel_assert_writer_scoped(void) | |
347 | { | |
348 | ASSERT_EXCLUSIVE_WRITER_SCOPED(test_var); | |
349 | test_enter_scope(); | |
350 | } | |
351 | ||
352 | static noinline void test_kernel_assert_access_scoped(void) | |
353 | { | |
354 | ASSERT_EXCLUSIVE_ACCESS_SCOPED(test_var); | |
355 | test_enter_scope(); | |
356 | } | |
357 | ||
358 | static noinline void test_kernel_rmw_array(void) | |
359 | { | |
360 | int i; | |
361 | ||
362 | for (i = 0; i < ARRAY_SIZE(test_array); ++i) | |
363 | test_array[i]++; | |
364 | } | |
365 | ||
366 | static noinline void test_kernel_write_struct(void) | |
367 | { | |
368 | kcsan_check_write(&test_struct, sizeof(test_struct)); | |
369 | kcsan_disable_current(); | |
370 | test_struct.val[3]++; /* induce value change */ | |
371 | kcsan_enable_current(); | |
372 | } | |
373 | ||
374 | static noinline void test_kernel_write_struct_part(void) | |
375 | { | |
376 | test_struct.val[3] = 42; | |
377 | } | |
378 | ||
379 | static noinline void test_kernel_read_struct_zero_size(void) | |
380 | { | |
381 | kcsan_check_read(&test_struct.val[3], 0); | |
382 | } | |
383 | ||
56b031f0 ME |
384 | static noinline void test_kernel_jiffies_reader(void) |
385 | { | |
386 | sink_value((long)jiffies); | |
387 | } | |
388 | ||
1fe84fd4 ME |
389 | static noinline void test_kernel_seqlock_reader(void) |
390 | { | |
391 | unsigned int seq; | |
392 | ||
393 | do { | |
394 | seq = read_seqbegin(&test_seqlock); | |
395 | sink_value(test_var); | |
396 | } while (read_seqretry(&test_seqlock, seq)); | |
397 | } | |
398 | ||
399 | static noinline void test_kernel_seqlock_writer(void) | |
400 | { | |
401 | unsigned long flags; | |
402 | ||
403 | write_seqlock_irqsave(&test_seqlock, flags); | |
404 | test_var++; | |
405 | write_sequnlock_irqrestore(&test_seqlock, flags); | |
406 | } | |
407 | ||
f9ea6319 ME |
408 | static noinline void test_kernel_atomic_builtins(void) |
409 | { | |
410 | /* | |
411 | * Generate concurrent accesses, expecting no reports, ensuring KCSAN | |
412 | * treats builtin atomics as actually atomic. | |
413 | */ | |
414 | __atomic_load_n(&test_var, __ATOMIC_RELAXED); | |
415 | } | |
416 | ||
d8fd74d3 ME |
417 | static noinline void test_kernel_xor_1bit(void) |
418 | { | |
419 | /* Do not report data races between the read-writes. */ | |
420 | kcsan_nestable_atomic_begin(); | |
421 | test_var ^= 0x10000; | |
422 | kcsan_nestable_atomic_end(); | |
423 | } | |
424 | ||
1fe84fd4 ME |
425 | /* ===== Test cases ===== */ |
426 | ||
427 | /* Simple test with normal data race. */ | |
428 | __no_kcsan | |
429 | static void test_basic(struct kunit *test) | |
430 | { | |
431 | const struct expect_report expect = { | |
432 | .access = { | |
433 | { test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, | |
434 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, | |
435 | }, | |
436 | }; | |
437 | static const struct expect_report never = { | |
438 | .access = { | |
439 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, | |
440 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, | |
441 | }, | |
442 | }; | |
443 | bool match_expect = false; | |
444 | bool match_never = false; | |
445 | ||
446 | begin_test_checks(test_kernel_write, test_kernel_read); | |
447 | do { | |
448 | match_expect |= report_matches(&expect); | |
449 | match_never = report_matches(&never); | |
450 | } while (!end_test_checks(match_never)); | |
451 | KUNIT_EXPECT_TRUE(test, match_expect); | |
452 | KUNIT_EXPECT_FALSE(test, match_never); | |
453 | } | |
454 | ||
455 | /* | |
456 | * Stress KCSAN with lots of concurrent races on different addresses until | |
457 | * timeout. | |
458 | */ | |
459 | __no_kcsan | |
460 | static void test_concurrent_races(struct kunit *test) | |
461 | { | |
462 | const struct expect_report expect = { | |
463 | .access = { | |
464 | /* NULL will match any address. */ | |
bec4a247 ME |
465 | { test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) }, |
466 | { test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(0) }, | |
1fe84fd4 ME |
467 | }, |
468 | }; | |
469 | static const struct expect_report never = { | |
470 | .access = { | |
471 | { test_kernel_rmw_array, NULL, 0, 0 }, | |
472 | { test_kernel_rmw_array, NULL, 0, 0 }, | |
473 | }, | |
474 | }; | |
475 | bool match_expect = false; | |
476 | bool match_never = false; | |
477 | ||
478 | begin_test_checks(test_kernel_rmw_array, test_kernel_rmw_array); | |
479 | do { | |
480 | match_expect |= report_matches(&expect); | |
481 | match_never |= report_matches(&never); | |
482 | } while (!end_test_checks(false)); | |
483 | KUNIT_EXPECT_TRUE(test, match_expect); /* Sanity check matches exist. */ | |
484 | KUNIT_EXPECT_FALSE(test, match_never); | |
485 | } | |
486 | ||
487 | /* Test the KCSAN_REPORT_VALUE_CHANGE_ONLY option. */ | |
488 | __no_kcsan | |
489 | static void test_novalue_change(struct kunit *test) | |
490 | { | |
491 | const struct expect_report expect = { | |
492 | .access = { | |
493 | { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, | |
494 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, | |
495 | }, | |
496 | }; | |
497 | bool match_expect = false; | |
498 | ||
499 | begin_test_checks(test_kernel_write_nochange, test_kernel_read); | |
500 | do { | |
501 | match_expect = report_matches(&expect); | |
502 | } while (!end_test_checks(match_expect)); | |
503 | if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY)) | |
504 | KUNIT_EXPECT_FALSE(test, match_expect); | |
505 | else | |
506 | KUNIT_EXPECT_TRUE(test, match_expect); | |
507 | } | |
508 | ||
509 | /* | |
510 | * Test that the rules where the KCSAN_REPORT_VALUE_CHANGE_ONLY option should | |
511 | * never apply work. | |
512 | */ | |
513 | __no_kcsan | |
514 | static void test_novalue_change_exception(struct kunit *test) | |
515 | { | |
516 | const struct expect_report expect = { | |
517 | .access = { | |
518 | { test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, | |
519 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, | |
520 | }, | |
521 | }; | |
522 | bool match_expect = false; | |
523 | ||
524 | begin_test_checks(test_kernel_write_nochange_rcu, test_kernel_read); | |
525 | do { | |
526 | match_expect = report_matches(&expect); | |
527 | } while (!end_test_checks(match_expect)); | |
528 | KUNIT_EXPECT_TRUE(test, match_expect); | |
529 | } | |
530 | ||
531 | /* Test that data races of unknown origin are reported. */ | |
532 | __no_kcsan | |
533 | static void test_unknown_origin(struct kunit *test) | |
534 | { | |
535 | const struct expect_report expect = { | |
536 | .access = { | |
537 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, | |
538 | { NULL }, | |
539 | }, | |
540 | }; | |
541 | bool match_expect = false; | |
542 | ||
543 | begin_test_checks(test_kernel_write_uninstrumented, test_kernel_read); | |
544 | do { | |
545 | match_expect = report_matches(&expect); | |
546 | } while (!end_test_checks(match_expect)); | |
547 | if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN)) | |
548 | KUNIT_EXPECT_TRUE(test, match_expect); | |
549 | else | |
550 | KUNIT_EXPECT_FALSE(test, match_expect); | |
551 | } | |
552 | ||
553 | /* Test KCSAN_ASSUME_PLAIN_WRITES_ATOMIC if it is selected. */ | |
554 | __no_kcsan | |
555 | static void test_write_write_assume_atomic(struct kunit *test) | |
556 | { | |
557 | const struct expect_report expect = { | |
558 | .access = { | |
559 | { test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, | |
560 | { test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, | |
561 | }, | |
562 | }; | |
563 | bool match_expect = false; | |
564 | ||
565 | begin_test_checks(test_kernel_write, test_kernel_write); | |
566 | do { | |
567 | sink_value(READ_ONCE(test_var)); /* induce value-change */ | |
568 | match_expect = report_matches(&expect); | |
569 | } while (!end_test_checks(match_expect)); | |
570 | if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC)) | |
571 | KUNIT_EXPECT_FALSE(test, match_expect); | |
572 | else | |
573 | KUNIT_EXPECT_TRUE(test, match_expect); | |
574 | } | |
575 | ||
576 | /* | |
577 | * Test that data races with writes larger than word-size are always reported, | |
578 | * even if KCSAN_ASSUME_PLAIN_WRITES_ATOMIC is selected. | |
579 | */ | |
580 | __no_kcsan | |
581 | static void test_write_write_struct(struct kunit *test) | |
582 | { | |
583 | const struct expect_report expect = { | |
584 | .access = { | |
585 | { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE }, | |
586 | { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE }, | |
587 | }, | |
588 | }; | |
589 | bool match_expect = false; | |
590 | ||
591 | begin_test_checks(test_kernel_write_struct, test_kernel_write_struct); | |
592 | do { | |
593 | match_expect = report_matches(&expect); | |
594 | } while (!end_test_checks(match_expect)); | |
595 | KUNIT_EXPECT_TRUE(test, match_expect); | |
596 | } | |
597 | ||
598 | /* | |
599 | * Test that data races where only one write is larger than word-size are always | |
600 | * reported, even if KCSAN_ASSUME_PLAIN_WRITES_ATOMIC is selected. | |
601 | */ | |
602 | __no_kcsan | |
603 | static void test_write_write_struct_part(struct kunit *test) | |
604 | { | |
605 | const struct expect_report expect = { | |
606 | .access = { | |
607 | { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE }, | |
608 | { test_kernel_write_struct_part, &test_struct.val[3], sizeof(test_struct.val[3]), KCSAN_ACCESS_WRITE }, | |
609 | }, | |
610 | }; | |
611 | bool match_expect = false; | |
612 | ||
613 | begin_test_checks(test_kernel_write_struct, test_kernel_write_struct_part); | |
614 | do { | |
615 | match_expect = report_matches(&expect); | |
616 | } while (!end_test_checks(match_expect)); | |
617 | KUNIT_EXPECT_TRUE(test, match_expect); | |
618 | } | |
619 | ||
620 | /* Test that races with atomic accesses never result in reports. */ | |
621 | __no_kcsan | |
622 | static void test_read_atomic_write_atomic(struct kunit *test) | |
623 | { | |
624 | bool match_never = false; | |
625 | ||
626 | begin_test_checks(test_kernel_read_atomic, test_kernel_write_atomic); | |
627 | do { | |
628 | match_never = report_available(); | |
629 | } while (!end_test_checks(match_never)); | |
630 | KUNIT_EXPECT_FALSE(test, match_never); | |
631 | } | |
632 | ||
633 | /* Test that a race with an atomic and plain access result in reports. */ | |
634 | __no_kcsan | |
635 | static void test_read_plain_atomic_write(struct kunit *test) | |
636 | { | |
637 | const struct expect_report expect = { | |
638 | .access = { | |
639 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, | |
640 | { test_kernel_write_atomic, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC }, | |
641 | }, | |
642 | }; | |
643 | bool match_expect = false; | |
644 | ||
645 | if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) | |
646 | return; | |
647 | ||
648 | begin_test_checks(test_kernel_read, test_kernel_write_atomic); | |
649 | do { | |
650 | match_expect = report_matches(&expect); | |
651 | } while (!end_test_checks(match_expect)); | |
652 | KUNIT_EXPECT_TRUE(test, match_expect); | |
653 | } | |
654 | ||
bec4a247 ME |
655 | /* Test that atomic RMWs generate correct report. */ |
656 | __no_kcsan | |
657 | static void test_read_plain_atomic_rmw(struct kunit *test) | |
658 | { | |
659 | const struct expect_report expect = { | |
660 | .access = { | |
661 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, | |
662 | { test_kernel_atomic_rmw, &test_var, sizeof(test_var), | |
663 | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC }, | |
664 | }, | |
665 | }; | |
666 | bool match_expect = false; | |
667 | ||
668 | if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) | |
669 | return; | |
670 | ||
671 | begin_test_checks(test_kernel_read, test_kernel_atomic_rmw); | |
672 | do { | |
673 | match_expect = report_matches(&expect); | |
674 | } while (!end_test_checks(match_expect)); | |
675 | KUNIT_EXPECT_TRUE(test, match_expect); | |
676 | } | |
677 | ||
1fe84fd4 ME |
678 | /* Zero-sized accesses should never cause data race reports. */ |
679 | __no_kcsan | |
680 | static void test_zero_size_access(struct kunit *test) | |
681 | { | |
682 | const struct expect_report expect = { | |
683 | .access = { | |
684 | { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE }, | |
685 | { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE }, | |
686 | }, | |
687 | }; | |
688 | const struct expect_report never = { | |
689 | .access = { | |
690 | { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE }, | |
691 | { test_kernel_read_struct_zero_size, &test_struct.val[3], 0, 0 }, | |
692 | }, | |
693 | }; | |
694 | bool match_expect = false; | |
695 | bool match_never = false; | |
696 | ||
697 | begin_test_checks(test_kernel_write_struct, test_kernel_read_struct_zero_size); | |
698 | do { | |
699 | match_expect |= report_matches(&expect); | |
700 | match_never = report_matches(&never); | |
701 | } while (!end_test_checks(match_never)); | |
702 | KUNIT_EXPECT_TRUE(test, match_expect); /* Sanity check. */ | |
703 | KUNIT_EXPECT_FALSE(test, match_never); | |
704 | } | |
705 | ||
706 | /* Test the data_race() macro. */ | |
707 | __no_kcsan | |
708 | static void test_data_race(struct kunit *test) | |
709 | { | |
710 | bool match_never = false; | |
711 | ||
712 | begin_test_checks(test_kernel_data_race, test_kernel_data_race); | |
713 | do { | |
714 | match_never = report_available(); | |
715 | } while (!end_test_checks(match_never)); | |
716 | KUNIT_EXPECT_FALSE(test, match_never); | |
717 | } | |
718 | ||
719 | __no_kcsan | |
720 | static void test_assert_exclusive_writer(struct kunit *test) | |
721 | { | |
722 | const struct expect_report expect = { | |
723 | .access = { | |
724 | { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT }, | |
725 | { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, | |
726 | }, | |
727 | }; | |
728 | bool match_expect = false; | |
729 | ||
730 | begin_test_checks(test_kernel_assert_writer, test_kernel_write_nochange); | |
731 | do { | |
732 | match_expect = report_matches(&expect); | |
733 | } while (!end_test_checks(match_expect)); | |
734 | KUNIT_EXPECT_TRUE(test, match_expect); | |
735 | } | |
736 | ||
737 | __no_kcsan | |
738 | static void test_assert_exclusive_access(struct kunit *test) | |
739 | { | |
740 | const struct expect_report expect = { | |
741 | .access = { | |
742 | { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE }, | |
743 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, | |
744 | }, | |
745 | }; | |
746 | bool match_expect = false; | |
747 | ||
748 | begin_test_checks(test_kernel_assert_access, test_kernel_read); | |
749 | do { | |
750 | match_expect = report_matches(&expect); | |
751 | } while (!end_test_checks(match_expect)); | |
752 | KUNIT_EXPECT_TRUE(test, match_expect); | |
753 | } | |
754 | ||
755 | __no_kcsan | |
756 | static void test_assert_exclusive_access_writer(struct kunit *test) | |
757 | { | |
758 | const struct expect_report expect_access_writer = { | |
759 | .access = { | |
760 | { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE }, | |
761 | { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT }, | |
762 | }, | |
763 | }; | |
764 | const struct expect_report expect_access_access = { | |
765 | .access = { | |
766 | { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE }, | |
767 | { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE }, | |
768 | }, | |
769 | }; | |
770 | const struct expect_report never = { | |
771 | .access = { | |
772 | { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT }, | |
773 | { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT }, | |
774 | }, | |
775 | }; | |
776 | bool match_expect_access_writer = false; | |
777 | bool match_expect_access_access = false; | |
778 | bool match_never = false; | |
779 | ||
780 | begin_test_checks(test_kernel_assert_access, test_kernel_assert_writer); | |
781 | do { | |
782 | match_expect_access_writer |= report_matches(&expect_access_writer); | |
783 | match_expect_access_access |= report_matches(&expect_access_access); | |
784 | match_never |= report_matches(&never); | |
785 | } while (!end_test_checks(match_never)); | |
786 | KUNIT_EXPECT_TRUE(test, match_expect_access_writer); | |
787 | KUNIT_EXPECT_TRUE(test, match_expect_access_access); | |
788 | KUNIT_EXPECT_FALSE(test, match_never); | |
789 | } | |
790 | ||
791 | __no_kcsan | |
792 | static void test_assert_exclusive_bits_change(struct kunit *test) | |
793 | { | |
794 | const struct expect_report expect = { | |
795 | .access = { | |
796 | { test_kernel_assert_bits_change, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT }, | |
797 | { test_kernel_change_bits, &test_var, sizeof(test_var), | |
798 | KCSAN_ACCESS_WRITE | (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) ? 0 : KCSAN_ACCESS_ATOMIC) }, | |
799 | }, | |
800 | }; | |
801 | bool match_expect = false; | |
802 | ||
803 | begin_test_checks(test_kernel_assert_bits_change, test_kernel_change_bits); | |
804 | do { | |
805 | match_expect = report_matches(&expect); | |
806 | } while (!end_test_checks(match_expect)); | |
807 | KUNIT_EXPECT_TRUE(test, match_expect); | |
808 | } | |
809 | ||
810 | __no_kcsan | |
811 | static void test_assert_exclusive_bits_nochange(struct kunit *test) | |
812 | { | |
813 | bool match_never = false; | |
814 | ||
815 | begin_test_checks(test_kernel_assert_bits_nochange, test_kernel_change_bits); | |
816 | do { | |
817 | match_never = report_available(); | |
818 | } while (!end_test_checks(match_never)); | |
819 | KUNIT_EXPECT_FALSE(test, match_never); | |
820 | } | |
821 | ||
822 | __no_kcsan | |
823 | static void test_assert_exclusive_writer_scoped(struct kunit *test) | |
824 | { | |
825 | const struct expect_report expect_start = { | |
826 | .access = { | |
827 | { test_kernel_assert_writer_scoped, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED }, | |
828 | { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, | |
829 | }, | |
830 | }; | |
831 | const struct expect_report expect_anywhere = { | |
832 | .access = { | |
833 | { test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED }, | |
834 | { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE }, | |
835 | }, | |
836 | }; | |
837 | bool match_expect_start = false; | |
838 | bool match_expect_anywhere = false; | |
839 | ||
840 | begin_test_checks(test_kernel_assert_writer_scoped, test_kernel_write_nochange); | |
841 | do { | |
842 | match_expect_start |= report_matches(&expect_start); | |
843 | match_expect_anywhere |= report_matches(&expect_anywhere); | |
844 | } while (!end_test_checks(match_expect_start && match_expect_anywhere)); | |
845 | KUNIT_EXPECT_TRUE(test, match_expect_start); | |
846 | KUNIT_EXPECT_TRUE(test, match_expect_anywhere); | |
847 | } | |
848 | ||
849 | __no_kcsan | |
850 | static void test_assert_exclusive_access_scoped(struct kunit *test) | |
851 | { | |
852 | const struct expect_report expect_start1 = { | |
853 | .access = { | |
854 | { test_kernel_assert_access_scoped, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_SCOPED }, | |
855 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, | |
856 | }, | |
857 | }; | |
858 | const struct expect_report expect_start2 = { | |
859 | .access = { expect_start1.access[0], expect_start1.access[0] }, | |
860 | }; | |
861 | const struct expect_report expect_inscope = { | |
862 | .access = { | |
863 | { test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_SCOPED }, | |
864 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, | |
865 | }, | |
866 | }; | |
867 | bool match_expect_start = false; | |
868 | bool match_expect_inscope = false; | |
869 | ||
870 | begin_test_checks(test_kernel_assert_access_scoped, test_kernel_read); | |
871 | end_time += msecs_to_jiffies(1000); /* This test requires a bit more time. */ | |
872 | do { | |
873 | match_expect_start |= report_matches(&expect_start1) || report_matches(&expect_start2); | |
874 | match_expect_inscope |= report_matches(&expect_inscope); | |
875 | } while (!end_test_checks(match_expect_start && match_expect_inscope)); | |
876 | KUNIT_EXPECT_TRUE(test, match_expect_start); | |
877 | KUNIT_EXPECT_TRUE(test, match_expect_inscope); | |
878 | } | |
879 | ||
56b031f0 ME |
880 | /* |
881 | * jiffies is special (declared to be volatile) and its accesses are typically | |
882 | * not marked; this test ensures that the compiler nor KCSAN gets confused about | |
883 | * jiffies's declaration on different architectures. | |
884 | */ | |
885 | __no_kcsan | |
886 | static void test_jiffies_noreport(struct kunit *test) | |
887 | { | |
888 | bool match_never = false; | |
889 | ||
890 | begin_test_checks(test_kernel_jiffies_reader, test_kernel_jiffies_reader); | |
891 | do { | |
892 | match_never = report_available(); | |
893 | } while (!end_test_checks(match_never)); | |
894 | KUNIT_EXPECT_FALSE(test, match_never); | |
895 | } | |
896 | ||
1fe84fd4 ME |
897 | /* Test that racing accesses in seqlock critical sections are not reported. */ |
898 | __no_kcsan | |
899 | static void test_seqlock_noreport(struct kunit *test) | |
900 | { | |
901 | bool match_never = false; | |
902 | ||
903 | begin_test_checks(test_kernel_seqlock_reader, test_kernel_seqlock_writer); | |
904 | do { | |
905 | match_never = report_available(); | |
906 | } while (!end_test_checks(match_never)); | |
907 | KUNIT_EXPECT_FALSE(test, match_never); | |
908 | } | |
909 | ||
f9ea6319 ME |
910 | /* |
911 | * Test atomic builtins work and required instrumentation functions exist. We | |
912 | * also test that KCSAN understands they're atomic by racing with them via | |
913 | * test_kernel_atomic_builtins(), and expect no reports. | |
914 | * | |
915 | * The atomic builtins _SHOULD NOT_ be used in normal kernel code! | |
916 | */ | |
917 | static void test_atomic_builtins(struct kunit *test) | |
918 | { | |
919 | bool match_never = false; | |
920 | ||
921 | begin_test_checks(test_kernel_atomic_builtins, test_kernel_atomic_builtins); | |
922 | do { | |
923 | long tmp; | |
924 | ||
925 | kcsan_enable_current(); | |
926 | ||
927 | __atomic_store_n(&test_var, 42L, __ATOMIC_RELAXED); | |
928 | KUNIT_EXPECT_EQ(test, 42L, __atomic_load_n(&test_var, __ATOMIC_RELAXED)); | |
929 | ||
930 | KUNIT_EXPECT_EQ(test, 42L, __atomic_exchange_n(&test_var, 20, __ATOMIC_RELAXED)); | |
931 | KUNIT_EXPECT_EQ(test, 20L, test_var); | |
932 | ||
933 | tmp = 20L; | |
934 | KUNIT_EXPECT_TRUE(test, __atomic_compare_exchange_n(&test_var, &tmp, 30L, | |
935 | 0, __ATOMIC_RELAXED, | |
936 | __ATOMIC_RELAXED)); | |
937 | KUNIT_EXPECT_EQ(test, tmp, 20L); | |
938 | KUNIT_EXPECT_EQ(test, test_var, 30L); | |
939 | KUNIT_EXPECT_FALSE(test, __atomic_compare_exchange_n(&test_var, &tmp, 40L, | |
940 | 1, __ATOMIC_RELAXED, | |
941 | __ATOMIC_RELAXED)); | |
942 | KUNIT_EXPECT_EQ(test, tmp, 30L); | |
943 | KUNIT_EXPECT_EQ(test, test_var, 30L); | |
944 | ||
945 | KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_add(&test_var, 1, __ATOMIC_RELAXED)); | |
946 | KUNIT_EXPECT_EQ(test, 31L, __atomic_fetch_sub(&test_var, 1, __ATOMIC_RELAXED)); | |
947 | KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_and(&test_var, 0xf, __ATOMIC_RELAXED)); | |
948 | KUNIT_EXPECT_EQ(test, 14L, __atomic_fetch_xor(&test_var, 0xf, __ATOMIC_RELAXED)); | |
949 | KUNIT_EXPECT_EQ(test, 1L, __atomic_fetch_or(&test_var, 0xf0, __ATOMIC_RELAXED)); | |
950 | KUNIT_EXPECT_EQ(test, 241L, __atomic_fetch_nand(&test_var, 0xf, __ATOMIC_RELAXED)); | |
951 | KUNIT_EXPECT_EQ(test, -2L, test_var); | |
952 | ||
953 | __atomic_thread_fence(__ATOMIC_SEQ_CST); | |
954 | __atomic_signal_fence(__ATOMIC_SEQ_CST); | |
955 | ||
956 | kcsan_disable_current(); | |
957 | ||
958 | match_never = report_available(); | |
959 | } while (!end_test_checks(match_never)); | |
960 | KUNIT_EXPECT_FALSE(test, match_never); | |
961 | } | |
962 | ||
d8fd74d3 ME |
963 | __no_kcsan |
964 | static void test_1bit_value_change(struct kunit *test) | |
965 | { | |
966 | const struct expect_report expect = { | |
967 | .access = { | |
968 | { test_kernel_read, &test_var, sizeof(test_var), 0 }, | |
969 | { test_kernel_xor_1bit, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) }, | |
970 | }, | |
971 | }; | |
972 | bool match = false; | |
973 | ||
974 | begin_test_checks(test_kernel_read, test_kernel_xor_1bit); | |
975 | do { | |
976 | match = IS_ENABLED(CONFIG_KCSAN_PERMISSIVE) | |
977 | ? report_available() | |
978 | : report_matches(&expect); | |
979 | } while (!end_test_checks(match)); | |
980 | if (IS_ENABLED(CONFIG_KCSAN_PERMISSIVE)) | |
981 | KUNIT_EXPECT_FALSE(test, match); | |
982 | else | |
983 | KUNIT_EXPECT_TRUE(test, match); | |
984 | } | |
985 | ||
1fe84fd4 | 986 | /* |
f6a14914 ME |
987 | * Generate thread counts for all test cases. Values generated are in interval |
988 | * [2, 5] followed by exponentially increasing thread counts from 8 to 32. | |
1fe84fd4 ME |
989 | * |
990 | * The thread counts are chosen to cover potentially interesting boundaries and | |
f6a14914 | 991 | * corner cases (2 to 5), and then stress the system with larger counts. |
1fe84fd4 | 992 | */ |
f6a14914 ME |
993 | static const void *nthreads_gen_params(const void *prev, char *desc) |
994 | { | |
995 | long nthreads = (long)prev; | |
996 | ||
997 | if (nthreads < 0 || nthreads >= 32) | |
998 | nthreads = 0; /* stop */ | |
999 | else if (!nthreads) | |
1000 | nthreads = 2; /* initial value */ | |
1001 | else if (nthreads < 5) | |
1002 | nthreads++; | |
1003 | else if (nthreads == 5) | |
1004 | nthreads = 8; | |
1005 | else | |
1006 | nthreads *= 2; | |
1fe84fd4 | 1007 | |
f6a14914 ME |
1008 | if (!IS_ENABLED(CONFIG_PREEMPT) || !IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER)) { |
1009 | /* | |
1010 | * Without any preemption, keep 2 CPUs free for other tasks, one | |
1011 | * of which is the main test case function checking for | |
1012 | * completion or failure. | |
1013 | */ | |
1014 | const long min_unused_cpus = IS_ENABLED(CONFIG_PREEMPT_NONE) ? 2 : 0; | |
1015 | const long min_required_cpus = 2 + min_unused_cpus; | |
1016 | ||
1017 | if (num_online_cpus() < min_required_cpus) { | |
f4abe996 | 1018 | pr_err_once("Too few online CPUs (%u < %ld) for test\n", |
f6a14914 ME |
1019 | num_online_cpus(), min_required_cpus); |
1020 | nthreads = 0; | |
1021 | } else if (nthreads >= num_online_cpus() - min_unused_cpus) { | |
1022 | /* Use negative value to indicate last param. */ | |
1023 | nthreads = -(num_online_cpus() - min_unused_cpus); | |
1024 | pr_warn_once("Limiting number of threads to %ld (only %d online CPUs)\n", | |
1025 | -nthreads, num_online_cpus()); | |
1026 | } | |
1027 | } | |
1028 | ||
1029 | snprintf(desc, KUNIT_PARAM_DESC_SIZE, "threads=%ld", abs(nthreads)); | |
1030 | return (void *)nthreads; | |
1031 | } | |
1032 | ||
1033 | #define KCSAN_KUNIT_CASE(test_name) KUNIT_CASE_PARAM(test_name, nthreads_gen_params) | |
1fe84fd4 ME |
1034 | static struct kunit_case kcsan_test_cases[] = { |
1035 | KCSAN_KUNIT_CASE(test_basic), | |
1036 | KCSAN_KUNIT_CASE(test_concurrent_races), | |
1037 | KCSAN_KUNIT_CASE(test_novalue_change), | |
1038 | KCSAN_KUNIT_CASE(test_novalue_change_exception), | |
1039 | KCSAN_KUNIT_CASE(test_unknown_origin), | |
1040 | KCSAN_KUNIT_CASE(test_write_write_assume_atomic), | |
1041 | KCSAN_KUNIT_CASE(test_write_write_struct), | |
1042 | KCSAN_KUNIT_CASE(test_write_write_struct_part), | |
1043 | KCSAN_KUNIT_CASE(test_read_atomic_write_atomic), | |
1044 | KCSAN_KUNIT_CASE(test_read_plain_atomic_write), | |
bec4a247 | 1045 | KCSAN_KUNIT_CASE(test_read_plain_atomic_rmw), |
1fe84fd4 ME |
1046 | KCSAN_KUNIT_CASE(test_zero_size_access), |
1047 | KCSAN_KUNIT_CASE(test_data_race), | |
1048 | KCSAN_KUNIT_CASE(test_assert_exclusive_writer), | |
1049 | KCSAN_KUNIT_CASE(test_assert_exclusive_access), | |
1050 | KCSAN_KUNIT_CASE(test_assert_exclusive_access_writer), | |
1051 | KCSAN_KUNIT_CASE(test_assert_exclusive_bits_change), | |
1052 | KCSAN_KUNIT_CASE(test_assert_exclusive_bits_nochange), | |
1053 | KCSAN_KUNIT_CASE(test_assert_exclusive_writer_scoped), | |
1054 | KCSAN_KUNIT_CASE(test_assert_exclusive_access_scoped), | |
56b031f0 | 1055 | KCSAN_KUNIT_CASE(test_jiffies_noreport), |
1fe84fd4 | 1056 | KCSAN_KUNIT_CASE(test_seqlock_noreport), |
f9ea6319 | 1057 | KCSAN_KUNIT_CASE(test_atomic_builtins), |
d8fd74d3 | 1058 | KCSAN_KUNIT_CASE(test_1bit_value_change), |
1fe84fd4 ME |
1059 | {}, |
1060 | }; | |
1061 | ||
1062 | /* ===== End test cases ===== */ | |
1063 | ||
1fe84fd4 ME |
1064 | /* Concurrent accesses from interrupts. */ |
1065 | __no_kcsan | |
1066 | static void access_thread_timer(struct timer_list *timer) | |
1067 | { | |
1068 | static atomic_t cnt = ATOMIC_INIT(0); | |
1069 | unsigned int idx; | |
1070 | void (*func)(void); | |
1071 | ||
1072 | idx = (unsigned int)atomic_inc_return(&cnt) % ARRAY_SIZE(access_kernels); | |
1073 | /* Acquire potential initialization. */ | |
1074 | func = smp_load_acquire(&access_kernels[idx]); | |
1075 | if (func) | |
1076 | func(); | |
1077 | } | |
1078 | ||
1079 | /* The main loop for each thread. */ | |
1080 | __no_kcsan | |
1081 | static int access_thread(void *arg) | |
1082 | { | |
1083 | struct timer_list timer; | |
1084 | unsigned int cnt = 0; | |
1085 | unsigned int idx; | |
1086 | void (*func)(void); | |
1087 | ||
1088 | timer_setup_on_stack(&timer, access_thread_timer, 0); | |
1089 | do { | |
1090 | might_sleep(); | |
1091 | ||
1092 | if (!timer_pending(&timer)) | |
1093 | mod_timer(&timer, jiffies + 1); | |
1094 | else { | |
1095 | /* Iterate through all kernels. */ | |
1096 | idx = cnt++ % ARRAY_SIZE(access_kernels); | |
1097 | /* Acquire potential initialization. */ | |
1098 | func = smp_load_acquire(&access_kernels[idx]); | |
1099 | if (func) | |
1100 | func(); | |
1101 | } | |
1102 | } while (!torture_must_stop()); | |
1103 | del_timer_sync(&timer); | |
1104 | destroy_timer_on_stack(&timer); | |
1105 | ||
1106 | torture_kthread_stopping("access_thread"); | |
1107 | return 0; | |
1108 | } | |
1109 | ||
1110 | __no_kcsan | |
1111 | static int test_init(struct kunit *test) | |
1112 | { | |
1113 | unsigned long flags; | |
1114 | int nthreads; | |
1115 | int i; | |
1116 | ||
1117 | spin_lock_irqsave(&observed.lock, flags); | |
1118 | for (i = 0; i < ARRAY_SIZE(observed.lines); ++i) | |
1119 | observed.lines[i][0] = '\0'; | |
1120 | observed.nlines = 0; | |
1121 | spin_unlock_irqrestore(&observed.lock, flags); | |
1122 | ||
1123 | if (!torture_init_begin((char *)test->name, 1)) | |
1124 | return -EBUSY; | |
1125 | ||
1fe84fd4 ME |
1126 | if (WARN_ON(threads)) |
1127 | goto err; | |
1128 | ||
1129 | for (i = 0; i < ARRAY_SIZE(access_kernels); ++i) { | |
1130 | if (WARN_ON(access_kernels[i])) | |
1131 | goto err; | |
1132 | } | |
1133 | ||
f6a14914 ME |
1134 | nthreads = abs((long)test->param_value); |
1135 | if (WARN_ON(!nthreads)) | |
1136 | goto err; | |
1fe84fd4 | 1137 | |
f6a14914 ME |
1138 | threads = kcalloc(nthreads + 1, sizeof(struct task_struct *), GFP_KERNEL); |
1139 | if (WARN_ON(!threads)) | |
1140 | goto err; | |
1fe84fd4 | 1141 | |
f6a14914 ME |
1142 | threads[nthreads] = NULL; |
1143 | for (i = 0; i < nthreads; ++i) { | |
1144 | if (torture_create_kthread(access_thread, NULL, threads[i])) | |
1fe84fd4 | 1145 | goto err; |
1fe84fd4 ME |
1146 | } |
1147 | ||
1148 | torture_init_end(); | |
1149 | ||
1150 | return 0; | |
1151 | ||
1152 | err: | |
1153 | kfree(threads); | |
1154 | threads = NULL; | |
1155 | torture_init_end(); | |
1156 | return -EINVAL; | |
1157 | } | |
1158 | ||
1159 | __no_kcsan | |
1160 | static void test_exit(struct kunit *test) | |
1161 | { | |
1162 | struct task_struct **stop_thread; | |
1163 | int i; | |
1164 | ||
1165 | if (torture_cleanup_begin()) | |
1166 | return; | |
1167 | ||
1168 | for (i = 0; i < ARRAY_SIZE(access_kernels); ++i) | |
1169 | WRITE_ONCE(access_kernels[i], NULL); | |
1170 | ||
1171 | if (threads) { | |
1172 | for (stop_thread = threads; *stop_thread; stop_thread++) | |
1173 | torture_stop_kthread(reader_thread, *stop_thread); | |
1174 | ||
1175 | kfree(threads); | |
1176 | threads = NULL; | |
1177 | } | |
1178 | ||
1179 | torture_cleanup_end(); | |
1180 | } | |
1181 | ||
1182 | static struct kunit_suite kcsan_test_suite = { | |
a146fed5 | 1183 | .name = "kcsan", |
1fe84fd4 ME |
1184 | .test_cases = kcsan_test_cases, |
1185 | .init = test_init, | |
1186 | .exit = test_exit, | |
1187 | }; | |
1188 | static struct kunit_suite *kcsan_test_suites[] = { &kcsan_test_suite, NULL }; | |
1189 | ||
1190 | __no_kcsan | |
1191 | static void register_tracepoints(struct tracepoint *tp, void *ignore) | |
1192 | { | |
1193 | check_trace_callback_type_console(probe_console); | |
1194 | if (!strcmp(tp->name, "console")) | |
1195 | WARN_ON(tracepoint_probe_register(tp, probe_console, NULL)); | |
1196 | } | |
1197 | ||
1198 | __no_kcsan | |
1199 | static void unregister_tracepoints(struct tracepoint *tp, void *ignore) | |
1200 | { | |
1201 | if (!strcmp(tp->name, "console")) | |
1202 | tracepoint_probe_unregister(tp, probe_console, NULL); | |
1203 | } | |
1204 | ||
1205 | /* | |
1206 | * We only want to do tracepoints setup and teardown once, therefore we have to | |
1207 | * customize the init and exit functions and cannot rely on kunit_test_suite(). | |
1208 | */ | |
1209 | static int __init kcsan_test_init(void) | |
1210 | { | |
1211 | /* | |
1212 | * Because we want to be able to build the test as a module, we need to | |
1213 | * iterate through all known tracepoints, since the static registration | |
1214 | * won't work here. | |
1215 | */ | |
1216 | for_each_kernel_tracepoint(register_tracepoints, NULL); | |
1217 | return __kunit_test_suites_init(kcsan_test_suites); | |
1218 | } | |
1219 | ||
1220 | static void kcsan_test_exit(void) | |
1221 | { | |
1222 | __kunit_test_suites_exit(kcsan_test_suites); | |
1223 | for_each_kernel_tracepoint(unregister_tracepoints, NULL); | |
1224 | tracepoint_synchronize_unregister(); | |
1225 | } | |
1226 | ||
1227 | late_initcall(kcsan_test_init); | |
1228 | module_exit(kcsan_test_exit); | |
1229 | ||
1230 | MODULE_LICENSE("GPL v2"); | |
1231 | MODULE_AUTHOR("Marco Elver <elver@google.com>"); |