Commit | Line | Data |
---|---|---|
d5f177d3 PM |
1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
2 | /* | |
3 | * Read-Copy Update mechanism for mutual exclusion, adapted for tracing. | |
4 | * | |
5 | * Copyright (C) 2020 Paul E. McKenney. | |
6 | */ | |
7 | ||
8 | #ifndef __LINUX_RCUPDATE_TRACE_H | |
9 | #define __LINUX_RCUPDATE_TRACE_H | |
10 | ||
11 | #include <linux/sched.h> | |
12 | #include <linux/rcupdate.h> | |
13 | ||
d5f177d3 PM |
14 | extern struct lockdep_map rcu_trace_lock_map; |
15 | ||
891cd1f9 JK |
16 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
17 | ||
d5f177d3 PM |
18 | static inline int rcu_read_lock_trace_held(void) |
19 | { | |
20 | return lock_is_held(&rcu_trace_lock_map); | |
21 | } | |
22 | ||
23 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
24 | ||
25 | static inline int rcu_read_lock_trace_held(void) | |
26 | { | |
27 | return 1; | |
28 | } | |
29 | ||
30 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | |
31 | ||
32 | #ifdef CONFIG_TASKS_TRACE_RCU | |
33 | ||
a5c071cc | 34 | void rcu_read_unlock_trace_special(struct task_struct *t); |
d5f177d3 PM |
35 | |
36 | /** | |
37 | * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section | |
38 | * | |
c7dcf810 PM |
39 | * When synchronize_rcu_tasks_trace() is invoked by one task, then that |
40 | * task is guaranteed to block until all other tasks exit their read-side | |
d5f177d3 PM |
41 | * critical sections. Similarly, if call_rcu_trace() is invoked on one |
42 | * task while other tasks are within RCU read-side critical sections, | |
43 | * invocation of the corresponding RCU callback is deferred until after | |
44 | * the all the other tasks exit their critical sections. | |
45 | * | |
46 | * For more details, please see the documentation for rcu_read_lock(). | |
47 | */ | |
48 | static inline void rcu_read_lock_trace(void) | |
49 | { | |
50 | struct task_struct *t = current; | |
51 | ||
52 | WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1); | |
ba3a86e4 | 53 | barrier(); |
9ae58d7b PM |
54 | if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && |
55 | t->trc_reader_special.b.need_mb) | |
276c4104 | 56 | smp_mb(); // Pairs with update-side barriers |
d5f177d3 PM |
57 | rcu_lock_acquire(&rcu_trace_lock_map); |
58 | } | |
59 | ||
60 | /** | |
61 | * rcu_read_unlock_trace - mark end of RCU-trace read-side critical section | |
62 | * | |
63 | * Pairs with a preceding call to rcu_read_lock_trace(), and nesting is | |
64 | * allowed. Invoking a rcu_read_unlock_trace() when there is no matching | |
65 | * rcu_read_lock_trace() is verboten, and will result in lockdep complaints. | |
66 | * | |
67 | * For more details, please see the documentation for rcu_read_unlock(). | |
68 | */ | |
69 | static inline void rcu_read_unlock_trace(void) | |
70 | { | |
71 | int nesting; | |
72 | struct task_struct *t = current; | |
73 | ||
74 | rcu_lock_release(&rcu_trace_lock_map); | |
75 | nesting = READ_ONCE(t->trc_reader_nesting) - 1; | |
ba3a86e4 PM |
76 | barrier(); // Critical section before disabling. |
77 | // Disable IPI-based setting of .need_qs. | |
0356d4e6 | 78 | WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting); |
276c4104 PM |
79 | if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) { |
80 | WRITE_ONCE(t->trc_reader_nesting, nesting); | |
d5f177d3 | 81 | return; // We assume shallow reader nesting. |
276c4104 | 82 | } |
a5c071cc PM |
83 | WARN_ON_ONCE(nesting != 0); |
84 | rcu_read_unlock_trace_special(t); | |
d5f177d3 PM |
85 | } |
86 | ||
87 | void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); | |
88 | void synchronize_rcu_tasks_trace(void); | |
89 | void rcu_barrier_tasks_trace(void); | |
9667305c AS |
90 | #else |
91 | /* | |
92 | * The BPF JIT forms these addresses even when it doesn't call these | |
93 | * functions, so provide definitions that result in runtime errors. | |
94 | */ | |
95 | static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) { BUG(); } | |
96 | static inline void rcu_read_lock_trace(void) { BUG(); } | |
97 | static inline void rcu_read_unlock_trace(void) { BUG(); } | |
d5f177d3 PM |
98 | #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ |
99 | ||
100 | #endif /* __LINUX_RCUPDATE_TRACE_H */ |