Merge tag 'perf-tools-for-v6.4-3-2023-05-06' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / include / linux / mmap_lock.h
1 #ifndef _LINUX_MMAP_LOCK_H
2 #define _LINUX_MMAP_LOCK_H
3
4 #include <linux/lockdep.h>
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
7 #include <linux/rwsem.h>
8 #include <linux/tracepoint-defs.h>
9 #include <linux/types.h>
10
11 #define MMAP_LOCK_INITIALIZER(name) \
12         .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
13
14 DECLARE_TRACEPOINT(mmap_lock_start_locking);
15 DECLARE_TRACEPOINT(mmap_lock_acquire_returned);
16 DECLARE_TRACEPOINT(mmap_lock_released);
17
18 #ifdef CONFIG_TRACING
19
20 void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write);
21 void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
22                                            bool success);
23 void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write);
24
25 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
26                                                    bool write)
27 {
28         if (tracepoint_enabled(mmap_lock_start_locking))
29                 __mmap_lock_do_trace_start_locking(mm, write);
30 }
31
32 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
33                                                       bool write, bool success)
34 {
35         if (tracepoint_enabled(mmap_lock_acquire_returned))
36                 __mmap_lock_do_trace_acquire_returned(mm, write, success);
37 }
38
39 static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
40 {
41         if (tracepoint_enabled(mmap_lock_released))
42                 __mmap_lock_do_trace_released(mm, write);
43 }
44
45 #else /* !CONFIG_TRACING */
46
47 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
48                                                    bool write)
49 {
50 }
51
52 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
53                                                       bool write, bool success)
54 {
55 }
56
57 static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
58 {
59 }
60
61 #endif /* CONFIG_TRACING */
62
63 static inline void mmap_assert_locked(struct mm_struct *mm)
64 {
65         lockdep_assert_held(&mm->mmap_lock);
66         VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
67 }
68
69 static inline void mmap_assert_write_locked(struct mm_struct *mm)
70 {
71         lockdep_assert_held_write(&mm->mmap_lock);
72         VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
73 }
74
75 #ifdef CONFIG_PER_VMA_LOCK
76 static inline void vma_end_write_all(struct mm_struct *mm)
77 {
78         mmap_assert_write_locked(mm);
79         /* No races during update due to exclusive mmap_lock being held */
80         WRITE_ONCE(mm->mm_lock_seq, mm->mm_lock_seq + 1);
81 }
82 #else
83 static inline void vma_end_write_all(struct mm_struct *mm) {}
84 #endif
85
86 static inline void mmap_init_lock(struct mm_struct *mm)
87 {
88         init_rwsem(&mm->mmap_lock);
89 }
90
91 static inline void mmap_write_lock(struct mm_struct *mm)
92 {
93         __mmap_lock_trace_start_locking(mm, true);
94         down_write(&mm->mmap_lock);
95         __mmap_lock_trace_acquire_returned(mm, true, true);
96 }
97
98 static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
99 {
100         __mmap_lock_trace_start_locking(mm, true);
101         down_write_nested(&mm->mmap_lock, subclass);
102         __mmap_lock_trace_acquire_returned(mm, true, true);
103 }
104
105 static inline int mmap_write_lock_killable(struct mm_struct *mm)
106 {
107         int ret;
108
109         __mmap_lock_trace_start_locking(mm, true);
110         ret = down_write_killable(&mm->mmap_lock);
111         __mmap_lock_trace_acquire_returned(mm, true, ret == 0);
112         return ret;
113 }
114
115 static inline bool mmap_write_trylock(struct mm_struct *mm)
116 {
117         bool ret;
118
119         __mmap_lock_trace_start_locking(mm, true);
120         ret = down_write_trylock(&mm->mmap_lock) != 0;
121         __mmap_lock_trace_acquire_returned(mm, true, ret);
122         return ret;
123 }
124
125 static inline void mmap_write_unlock(struct mm_struct *mm)
126 {
127         __mmap_lock_trace_released(mm, true);
128         vma_end_write_all(mm);
129         up_write(&mm->mmap_lock);
130 }
131
132 static inline void mmap_write_downgrade(struct mm_struct *mm)
133 {
134         __mmap_lock_trace_acquire_returned(mm, false, true);
135         vma_end_write_all(mm);
136         downgrade_write(&mm->mmap_lock);
137 }
138
139 static inline void mmap_read_lock(struct mm_struct *mm)
140 {
141         __mmap_lock_trace_start_locking(mm, false);
142         down_read(&mm->mmap_lock);
143         __mmap_lock_trace_acquire_returned(mm, false, true);
144 }
145
146 static inline int mmap_read_lock_killable(struct mm_struct *mm)
147 {
148         int ret;
149
150         __mmap_lock_trace_start_locking(mm, false);
151         ret = down_read_killable(&mm->mmap_lock);
152         __mmap_lock_trace_acquire_returned(mm, false, ret == 0);
153         return ret;
154 }
155
156 static inline bool mmap_read_trylock(struct mm_struct *mm)
157 {
158         bool ret;
159
160         __mmap_lock_trace_start_locking(mm, false);
161         ret = down_read_trylock(&mm->mmap_lock) != 0;
162         __mmap_lock_trace_acquire_returned(mm, false, ret);
163         return ret;
164 }
165
166 static inline void mmap_read_unlock(struct mm_struct *mm)
167 {
168         __mmap_lock_trace_released(mm, false);
169         up_read(&mm->mmap_lock);
170 }
171
172 static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
173 {
174         __mmap_lock_trace_released(mm, false);
175         up_read_non_owner(&mm->mmap_lock);
176 }
177
178 static inline int mmap_lock_is_contended(struct mm_struct *mm)
179 {
180         return rwsem_is_contended(&mm->mmap_lock);
181 }
182
183 #endif /* _LINUX_MMAP_LOCK_H */