1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2019, Google LLC.
8 #include <linux/debug_locks.h>
9 #include <linux/delay.h>
10 #include <linux/jiffies.h>
11 #include <linux/kernel.h>
12 #include <linux/lockdep.h>
13 #include <linux/preempt.h>
14 #include <linux/printk.h>
15 #include <linux/sched.h>
16 #include <linux/spinlock.h>
17 #include <linux/stacktrace.h>
23 * Max. number of stack entries to show in the report.
25 #define NUM_STACK_ENTRIES 64
27 /* Common access info. */
29 const volatile void *ptr;
37 * Other thread info: communicated from other racing thread to thread that set
38 * up the watchpoint, which then prints the complete report atomically.
41 struct access_info ai;
42 unsigned long stack_entries[NUM_STACK_ENTRIES];
43 int num_stack_entries;
46 * Optionally pass @current. Typically we do not need to pass @current
47 * via @other_info since just @task_pid is sufficient. Passing @current
48 * has additional overhead.
50 * To safely pass @current, we must either use get_task_struct/
51 * put_task_struct, or stall the thread that populated @other_info.
53 * We cannot rely on get_task_struct/put_task_struct in case
54 * release_report() races with a task being released, and would have to
55 * free it in release_report(). This may result in deadlock if we want
56 * to use KCSAN on the allocators.
58 * Since we also want to reliably print held locks for
59 * CONFIG_KCSAN_VERBOSE, the current implementation stalls the thread
60 * that populated @other_info until it has been consumed.
62 struct task_struct *task;
66 * To never block any producers of struct other_info, we need as many elements
67 * as we have watchpoints (upper bound on concurrent races to report).
69 static struct other_info other_infos[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
72 * Information about reported races; used to rate limit reporting.
76 * The last time the race was reported.
81 * The frames of the 2 threads; if only 1 thread is known, one frame
89 * Since we also want to be able to debug allocators with KCSAN, to avoid
90 * deadlock, report_times cannot be dynamically resized with krealloc in
93 * Therefore, we use a fixed-size array, which at most will occupy a page. This
94 * still adequately rate limits reports, assuming that a) number of unique data
95 * races is not excessive, and b) occurrence of unique races within the
96 * same time window is limited.
98 #define REPORT_TIMES_MAX (PAGE_SIZE / sizeof(struct report_time))
99 #define REPORT_TIMES_SIZE \
100 (CONFIG_KCSAN_REPORT_ONCE_IN_MS > REPORT_TIMES_MAX ? \
102 CONFIG_KCSAN_REPORT_ONCE_IN_MS)
103 static struct report_time report_times[REPORT_TIMES_SIZE];
106 * Spinlock serializing report generation, and access to @other_infos. Although
107 * it could make sense to have a finer-grained locking story for @other_infos,
108 * report generation needs to be serialized either way, so not much is gained.
110 static DEFINE_RAW_SPINLOCK(report_lock);
113 * Checks if the race identified by thread frames frame1 and frame2 has
114 * been reported since (now - KCSAN_REPORT_ONCE_IN_MS).
116 static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
118 struct report_time *use_entry = &report_times[0];
119 unsigned long invalid_before;
122 BUILD_BUG_ON(CONFIG_KCSAN_REPORT_ONCE_IN_MS != 0 && REPORT_TIMES_SIZE == 0);
124 if (CONFIG_KCSAN_REPORT_ONCE_IN_MS == 0)
127 invalid_before = jiffies - msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS);
129 /* Check if a matching race report exists. */
130 for (i = 0; i < REPORT_TIMES_SIZE; ++i) {
131 struct report_time *rt = &report_times[i];
134 * Must always select an entry for use to store info as we
135 * cannot resize report_times; at the end of the scan, use_entry
136 * will be the oldest entry, which ideally also happened before
137 * KCSAN_REPORT_ONCE_IN_MS ago.
139 if (time_before(rt->time, use_entry->time))
143 * Initially, no need to check any further as this entry as well
144 * as following entries have never been used.
149 /* Check if entry expired. */
150 if (time_before(rt->time, invalid_before))
151 continue; /* before KCSAN_REPORT_ONCE_IN_MS ago */
153 /* Reported recently, check if race matches. */
154 if ((rt->frame1 == frame1 && rt->frame2 == frame2) ||
155 (rt->frame1 == frame2 && rt->frame2 == frame1))
159 use_entry->time = jiffies;
160 use_entry->frame1 = frame1;
161 use_entry->frame2 = frame2;
166 * Special rules to skip reporting.
169 skip_report(enum kcsan_value_change value_change, unsigned long top_frame)
171 /* Should never get here if value_change==FALSE. */
172 WARN_ON_ONCE(value_change == KCSAN_VALUE_CHANGE_FALSE);
175 * The first call to skip_report always has value_change==TRUE, since we
176 * cannot know the value written of an instrumented access. For the 2nd
177 * call there are 6 cases with CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY:
179 * 1. read watchpoint, conflicting write (value_change==TRUE): report;
180 * 2. read watchpoint, conflicting write (value_change==MAYBE): skip;
181 * 3. write watchpoint, conflicting write (value_change==TRUE): report;
182 * 4. write watchpoint, conflicting write (value_change==MAYBE): skip;
183 * 5. write watchpoint, conflicting read (value_change==MAYBE): skip;
184 * 6. write watchpoint, conflicting read (value_change==TRUE): report;
186 * Cases 1-4 are intuitive and expected; case 5 ensures we do not report
187 * data races where the write may have rewritten the same value; case 6
188 * is possible either if the size is larger than what we check value
189 * changes for or the access type is KCSAN_ACCESS_ASSERT.
191 if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) &&
192 value_change == KCSAN_VALUE_CHANGE_MAYBE) {
194 * The access is a write, but the data value did not change.
196 * We opt-out of this filter for certain functions at request of
200 int len = scnprintf(buf, sizeof(buf), "%ps", (void *)top_frame);
202 if (!strnstr(buf, "rcu_", len) &&
203 !strnstr(buf, "_rcu", len) &&
204 !strnstr(buf, "_srcu", len))
208 return kcsan_skip_report_debugfs(top_frame);
211 static const char *get_access_type(int type)
213 if (type & KCSAN_ACCESS_ASSERT) {
214 if (type & KCSAN_ACCESS_SCOPED) {
215 if (type & KCSAN_ACCESS_WRITE)
216 return "assert no accesses (scoped)";
218 return "assert no writes (scoped)";
220 if (type & KCSAN_ACCESS_WRITE)
221 return "assert no accesses";
223 return "assert no writes";
230 case KCSAN_ACCESS_ATOMIC:
231 return "read (marked)";
232 case KCSAN_ACCESS_WRITE:
234 case KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
235 return "write (marked)";
236 case KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE:
238 case KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
239 return "read-write (marked)";
240 case KCSAN_ACCESS_SCOPED:
241 return "read (scoped)";
242 case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_ATOMIC:
243 return "read (marked, scoped)";
244 case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE:
245 return "write (scoped)";
246 case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
247 return "write (marked, scoped)";
253 static const char *get_bug_type(int type)
255 return (type & KCSAN_ACCESS_ASSERT) != 0 ? "assert: race" : "data-race";
258 /* Return thread description: in task or interrupt. */
259 static const char *get_thread_desc(int task_id)
262 static char buf[32]; /* safe: protected by report_lock */
264 snprintf(buf, sizeof(buf), "task %i", task_id);
270 /* Helper to skip KCSAN-related functions in stack-trace. */
271 static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries)
277 for (skip = 0; skip < num_entries; ++skip) {
278 len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skip]);
280 /* Never show tsan_* or {read,write}_once_size. */
281 if (strnstr(buf, "tsan_", len) ||
282 strnstr(buf, "_once_size", len))
285 cur = strnstr(buf, "kcsan_", len);
287 cur += strlen("kcsan_");
288 if (!str_has_prefix(cur, "test"))
289 continue; /* KCSAN runtime function. */
290 /* KCSAN related test. */
294 * No match for runtime functions -- @skip entries to skip to
295 * get to first frame of interest.
303 /* Compares symbolized strings of addr1 and addr2. */
304 static int sym_strcmp(void *addr1, void *addr2)
309 snprintf(buf1, sizeof(buf1), "%pS", addr1);
310 snprintf(buf2, sizeof(buf2), "%pS", addr2);
312 return strncmp(buf1, buf2, sizeof(buf1));
315 static void print_verbose_info(struct task_struct *task)
320 /* Restore IRQ state trace for printing. */
321 kcsan_restore_irqtrace(task);
324 debug_show_held_locks(task);
325 print_irqtrace_events(task);
328 static void print_report(enum kcsan_value_change value_change,
329 const struct access_info *ai,
330 const struct other_info *other_info,
331 u64 old, u64 new, u64 mask)
333 unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 };
334 int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1);
335 int skipnr = get_stack_skipnr(stack_entries, num_stack_entries);
336 unsigned long this_frame = stack_entries[skipnr];
337 unsigned long other_frame = 0;
338 int other_skipnr = 0; /* silence uninit warnings */
341 * Must check report filter rules before starting to print.
343 if (skip_report(KCSAN_VALUE_CHANGE_TRUE, stack_entries[skipnr]))
347 other_skipnr = get_stack_skipnr(other_info->stack_entries,
348 other_info->num_stack_entries);
349 other_frame = other_info->stack_entries[other_skipnr];
351 /* @value_change is only known for the other thread */
352 if (skip_report(value_change, other_frame))
356 if (rate_limit_report(this_frame, other_frame))
359 /* Print report header. */
360 pr_err("==================================================================\n");
365 * Order functions lexographically for consistent bug titles.
366 * Do not print offset of functions to keep title short.
368 cmp = sym_strcmp((void *)other_frame, (void *)this_frame);
369 pr_err("BUG: KCSAN: %s in %ps / %ps\n",
370 get_bug_type(ai->access_type | other_info->ai.access_type),
371 (void *)(cmp < 0 ? other_frame : this_frame),
372 (void *)(cmp < 0 ? this_frame : other_frame));
374 pr_err("BUG: KCSAN: %s in %pS\n", get_bug_type(ai->access_type),
380 /* Print information about the racing accesses. */
382 pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
383 get_access_type(other_info->ai.access_type), other_info->ai.ptr,
384 other_info->ai.size, get_thread_desc(other_info->ai.task_pid),
385 other_info->ai.cpu_id);
387 /* Print the other thread's stack trace. */
388 stack_trace_print(other_info->stack_entries + other_skipnr,
389 other_info->num_stack_entries - other_skipnr,
392 if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
393 print_verbose_info(other_info->task);
396 pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
397 get_access_type(ai->access_type), ai->ptr, ai->size,
398 get_thread_desc(ai->task_pid), ai->cpu_id);
400 pr_err("race at unknown origin, with %s to 0x%px of %zu bytes by %s on cpu %i:\n",
401 get_access_type(ai->access_type), ai->ptr, ai->size,
402 get_thread_desc(ai->task_pid), ai->cpu_id);
404 /* Print stack trace of this thread. */
405 stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr,
408 if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
409 print_verbose_info(current);
411 /* Print observed value change. */
413 int hex_len = ai->size * 2;
414 u64 diff = old ^ new;
420 pr_err("value changed: 0x%0*llx -> 0x%0*llx\n",
421 hex_len, old, hex_len, new);
423 pr_err(" bits changed: 0x%0*llx with mask 0x%0*llx\n",
424 hex_len, diff, hex_len, mask);
429 /* Print report footer. */
431 pr_err("Reported by Kernel Concurrency Sanitizer on:\n");
432 dump_stack_print_info(KERN_DEFAULT);
433 pr_err("==================================================================\n");
436 panic("panic_on_warn set ...\n");
439 static void release_report(unsigned long *flags, struct other_info *other_info)
442 * Use size to denote valid/invalid, since KCSAN entirely ignores
445 other_info->ai.size = 0;
446 raw_spin_unlock_irqrestore(&report_lock, *flags);
450 * Sets @other_info->task and awaits consumption of @other_info.
452 * Precondition: report_lock is held.
453 * Postcondition: report_lock is held.
455 static void set_other_info_task_blocking(unsigned long *flags,
456 const struct access_info *ai,
457 struct other_info *other_info)
460 * We may be instrumenting a code-path where current->state is already
461 * something other than TASK_RUNNING.
463 const bool is_running = task_is_running(current);
465 * To avoid deadlock in case we are in an interrupt here and this is a
466 * race with a task on the same CPU (KCSAN_INTERRUPT_WATCHER), provide a
467 * timeout to ensure this works in all contexts.
469 * Await approximately the worst case delay of the reporting thread (if
470 * we are not interrupted).
472 int timeout = max(kcsan_udelay_task, kcsan_udelay_interrupt);
474 other_info->task = current;
478 * Let lockdep know the real task is sleeping, to print
479 * the held locks (recall we turned lockdep off, so
480 * locking/unlocking @report_lock won't be recorded).
482 set_current_state(TASK_UNINTERRUPTIBLE);
484 raw_spin_unlock_irqrestore(&report_lock, *flags);
486 * We cannot call schedule() since we also cannot reliably
487 * determine if sleeping here is permitted -- see in_atomic().
491 raw_spin_lock_irqsave(&report_lock, *flags);
494 * Abort. Reset @other_info->task to NULL, since it
495 * appears the other thread is still going to consume
496 * it. It will result in no verbose info printed for
499 other_info->task = NULL;
503 * If invalid, or @ptr nor @current matches, then @other_info
504 * has been consumed and we may continue. If not, retry.
506 } while (other_info->ai.size && other_info->ai.ptr == ai->ptr &&
507 other_info->task == current);
509 set_current_state(TASK_RUNNING);
512 /* Populate @other_info; requires that the provided @other_info not in use. */
513 static void prepare_report_producer(unsigned long *flags,
514 const struct access_info *ai,
515 struct other_info *other_info)
517 raw_spin_lock_irqsave(&report_lock, *flags);
520 * The same @other_infos entry cannot be used concurrently, because
521 * there is a one-to-one mapping to watchpoint slots (@watchpoints in
522 * core.c), and a watchpoint is only released for reuse after reporting
523 * is done by the consumer of @other_info. Therefore, it is impossible
524 * for another concurrent prepare_report_producer() to set the same
525 * @other_info, and are guaranteed exclusivity for the @other_infos
526 * entry pointed to by @other_info.
528 * To check this property holds, size should never be non-zero here,
529 * because every consumer of struct other_info resets size to 0 in
532 WARN_ON(other_info->ai.size);
534 other_info->ai = *ai;
535 other_info->num_stack_entries = stack_trace_save(other_info->stack_entries, NUM_STACK_ENTRIES, 2);
537 if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
538 set_other_info_task_blocking(flags, ai, other_info);
540 raw_spin_unlock_irqrestore(&report_lock, *flags);
543 /* Awaits producer to fill @other_info and then returns. */
544 static bool prepare_report_consumer(unsigned long *flags,
545 const struct access_info *ai,
546 struct other_info *other_info)
549 raw_spin_lock_irqsave(&report_lock, *flags);
550 while (!other_info->ai.size) { /* Await valid @other_info. */
551 raw_spin_unlock_irqrestore(&report_lock, *flags);
553 raw_spin_lock_irqsave(&report_lock, *flags);
556 /* Should always have a matching access based on watchpoint encoding. */
557 if (WARN_ON(!matching_access((unsigned long)other_info->ai.ptr & WATCHPOINT_ADDR_MASK, other_info->ai.size,
558 (unsigned long)ai->ptr & WATCHPOINT_ADDR_MASK, ai->size)))
561 if (!matching_access((unsigned long)other_info->ai.ptr, other_info->ai.size,
562 (unsigned long)ai->ptr, ai->size)) {
564 * If the actual accesses to not match, this was a false
565 * positive due to watchpoint encoding.
567 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ENCODING_FALSE_POSITIVES]);
574 release_report(flags, other_info);
578 static struct access_info prepare_access_info(const volatile void *ptr, size_t size,
581 return (struct access_info) {
584 .access_type = access_type,
585 .task_pid = in_task() ? task_pid_nr(current) : -1,
586 .cpu_id = raw_smp_processor_id()
590 void kcsan_report_set_info(const volatile void *ptr, size_t size, int access_type,
593 const struct access_info ai = prepare_access_info(ptr, size, access_type);
596 kcsan_disable_current();
597 lockdep_off(); /* See kcsan_report_known_origin(). */
599 prepare_report_producer(&flags, &ai, &other_infos[watchpoint_idx]);
602 kcsan_enable_current();
605 void kcsan_report_known_origin(const volatile void *ptr, size_t size, int access_type,
606 enum kcsan_value_change value_change, int watchpoint_idx,
607 u64 old, u64 new, u64 mask)
609 const struct access_info ai = prepare_access_info(ptr, size, access_type);
610 struct other_info *other_info = &other_infos[watchpoint_idx];
611 unsigned long flags = 0;
613 kcsan_disable_current();
615 * Because we may generate reports when we're in scheduler code, the use
616 * of printk() could deadlock. Until such time that all printing code
617 * called in print_report() is scheduler-safe, accept the risk, and just
618 * get our message out. As such, also disable lockdep to hide the
619 * warning, and avoid disabling lockdep for the rest of the kernel.
623 if (!prepare_report_consumer(&flags, &ai, other_info))
626 * Never report if value_change is FALSE, only when it is
627 * either TRUE or MAYBE. In case of MAYBE, further filtering may
628 * be done once we know the full stack trace in print_report().
630 if (value_change != KCSAN_VALUE_CHANGE_FALSE)
631 print_report(value_change, &ai, other_info, old, new, mask);
633 release_report(&flags, other_info);
636 kcsan_enable_current();
639 void kcsan_report_unknown_origin(const volatile void *ptr, size_t size, int access_type,
640 u64 old, u64 new, u64 mask)
642 const struct access_info ai = prepare_access_info(ptr, size, access_type);
645 kcsan_disable_current();
646 lockdep_off(); /* See kcsan_report_known_origin(). */
648 raw_spin_lock_irqsave(&report_lock, flags);
649 print_report(KCSAN_VALUE_CHANGE_TRUE, &ai, NULL, old, new, mask);
650 raw_spin_unlock_irqrestore(&report_lock, flags);
653 kcsan_enable_current();