Commit | Line | Data |
---|---|---|
2b5067a8 AR |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #define CREATE_TRACE_POINTS | |
3 | #include <trace/events/mmap_lock.h> | |
4 | ||
5 | #include <linux/mm.h> | |
6 | #include <linux/cgroup.h> | |
7 | #include <linux/memcontrol.h> | |
8 | #include <linux/mmap_lock.h> | |
9 | #include <linux/mutex.h> | |
10 | #include <linux/percpu.h> | |
11 | #include <linux/rcupdate.h> | |
12 | #include <linux/smp.h> | |
13 | #include <linux/trace_events.h> | |
832b5072 | 14 | #include <linux/local_lock.h> |
2b5067a8 AR |
15 | |
16 | EXPORT_TRACEPOINT_SYMBOL(mmap_lock_start_locking); | |
17 | EXPORT_TRACEPOINT_SYMBOL(mmap_lock_acquire_returned); | |
18 | EXPORT_TRACEPOINT_SYMBOL(mmap_lock_released); | |
19 | ||
20 | #ifdef CONFIG_MEMCG | |
21 | ||
22 | /* | |
23 | * Our various events all share the same buffer (because we don't want or need | |
24 | * to allocate a set of buffers *per event type*), so we need to protect against | |
25 | * concurrent _reg() and _unreg() calls, and count how many _reg() calls have | |
26 | * been made. | |
27 | */ | |
28 | static DEFINE_MUTEX(reg_lock); | |
29 | static int reg_refcount; /* Protected by reg_lock. */ | |
30 | ||
31 | /* | |
32 | * Size of the buffer for memcg path names. Ignoring stack trace support, | |
33 | * trace_events_hist.c uses MAX_FILTER_STR_VAL for this, so we also use it. | |
34 | */ | |
35 | #define MEMCG_PATH_BUF_SIZE MAX_FILTER_STR_VAL | |
36 | ||
37 | /* | |
38 | * How many contexts our trace events might be called in: normal, softirq, irq, | |
39 | * and NMI. | |
40 | */ | |
41 | #define CONTEXT_COUNT 4 | |
42 | ||
832b5072 NSJ |
43 | struct memcg_path { |
44 | local_lock_t lock; | |
45 | char __rcu *buf; | |
46 | local_t buf_idx; | |
47 | }; | |
48 | static DEFINE_PER_CPU(struct memcg_path, memcg_paths) = { | |
49 | .lock = INIT_LOCAL_LOCK(lock), | |
50 | .buf_idx = LOCAL_INIT(0), | |
51 | }; | |
52 | ||
2b5067a8 | 53 | static char **tmp_bufs; |
2b5067a8 AR |
54 | |
55 | /* Called with reg_lock held. */ | |
56 | static void free_memcg_path_bufs(void) | |
57 | { | |
832b5072 | 58 | struct memcg_path *memcg_path; |
2b5067a8 AR |
59 | int cpu; |
60 | char **old = tmp_bufs; | |
61 | ||
62 | for_each_possible_cpu(cpu) { | |
832b5072 NSJ |
63 | memcg_path = per_cpu_ptr(&memcg_paths, cpu); |
64 | *(old++) = rcu_dereference_protected(memcg_path->buf, | |
2b5067a8 | 65 | lockdep_is_held(®_lock)); |
832b5072 | 66 | rcu_assign_pointer(memcg_path->buf, NULL); |
2b5067a8 AR |
67 | } |
68 | ||
69 | /* Wait for inflight memcg_path_buf users to finish. */ | |
70 | synchronize_rcu(); | |
71 | ||
72 | old = tmp_bufs; | |
73 | for_each_possible_cpu(cpu) { | |
74 | kfree(*(old++)); | |
75 | } | |
76 | ||
77 | kfree(tmp_bufs); | |
78 | tmp_bufs = NULL; | |
79 | } | |
80 | ||
81 | int trace_mmap_lock_reg(void) | |
82 | { | |
83 | int cpu; | |
84 | char *new; | |
85 | ||
86 | mutex_lock(®_lock); | |
87 | ||
88 | /* If the refcount is going 0->1, proceed with allocating buffers. */ | |
89 | if (reg_refcount++) | |
90 | goto out; | |
91 | ||
92 | tmp_bufs = kmalloc_array(num_possible_cpus(), sizeof(*tmp_bufs), | |
93 | GFP_KERNEL); | |
94 | if (tmp_bufs == NULL) | |
95 | goto out_fail; | |
96 | ||
97 | for_each_possible_cpu(cpu) { | |
98 | new = kmalloc(MEMCG_PATH_BUF_SIZE * CONTEXT_COUNT, GFP_KERNEL); | |
99 | if (new == NULL) | |
100 | goto out_fail_free; | |
832b5072 | 101 | rcu_assign_pointer(per_cpu_ptr(&memcg_paths, cpu)->buf, new); |
2b5067a8 AR |
102 | /* Don't need to wait for inflights, they'd have gotten NULL. */ |
103 | } | |
104 | ||
105 | out: | |
106 | mutex_unlock(®_lock); | |
107 | return 0; | |
108 | ||
109 | out_fail_free: | |
110 | free_memcg_path_bufs(); | |
111 | out_fail: | |
112 | /* Since we failed, undo the earlier ref increment. */ | |
113 | --reg_refcount; | |
114 | ||
115 | mutex_unlock(®_lock); | |
116 | return -ENOMEM; | |
117 | } | |
118 | ||
119 | void trace_mmap_lock_unreg(void) | |
120 | { | |
121 | mutex_lock(®_lock); | |
122 | ||
123 | /* If the refcount is going 1->0, proceed with freeing buffers. */ | |
124 | if (--reg_refcount) | |
125 | goto out; | |
126 | ||
127 | free_memcg_path_bufs(); | |
128 | ||
129 | out: | |
130 | mutex_unlock(®_lock); | |
131 | } | |
132 | ||
133 | static inline char *get_memcg_path_buf(void) | |
134 | { | |
832b5072 | 135 | struct memcg_path *memcg_path = this_cpu_ptr(&memcg_paths); |
2b5067a8 AR |
136 | char *buf; |
137 | int idx; | |
138 | ||
139 | rcu_read_lock(); | |
832b5072 | 140 | buf = rcu_dereference(memcg_path->buf); |
2b5067a8 AR |
141 | if (buf == NULL) { |
142 | rcu_read_unlock(); | |
143 | return NULL; | |
144 | } | |
832b5072 | 145 | idx = local_add_return(MEMCG_PATH_BUF_SIZE, &memcg_path->buf_idx) - |
2b5067a8 AR |
146 | MEMCG_PATH_BUF_SIZE; |
147 | return &buf[idx]; | |
148 | } | |
149 | ||
150 | static inline void put_memcg_path_buf(void) | |
151 | { | |
832b5072 | 152 | local_sub(MEMCG_PATH_BUF_SIZE, &this_cpu_ptr(&memcg_paths)->buf_idx); |
2b5067a8 AR |
153 | rcu_read_unlock(); |
154 | } | |
155 | ||
156 | /* | |
157 | * Write the given mm_struct's memcg path to a percpu buffer, and return a | |
158 | * pointer to it. If the path cannot be determined, or no buffer was available | |
159 | * (because the trace event is being unregistered), NULL is returned. | |
160 | * | |
161 | * Note: buffers are allocated per-cpu to avoid locking, so preemption must be | |
162 | * disabled by the caller before calling us, and re-enabled only after the | |
163 | * caller is done with the pointer. | |
164 | * | |
165 | * The caller must call put_memcg_path_buf() once the buffer is no longer | |
166 | * needed. This must be done while preemption is still disabled. | |
167 | */ | |
168 | static const char *get_mm_memcg_path(struct mm_struct *mm) | |
169 | { | |
170 | char *buf = NULL; | |
171 | struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm); | |
172 | ||
173 | if (memcg == NULL) | |
174 | goto out; | |
175 | if (unlikely(memcg->css.cgroup == NULL)) | |
176 | goto out_put; | |
177 | ||
178 | buf = get_memcg_path_buf(); | |
179 | if (buf == NULL) | |
180 | goto out_put; | |
181 | ||
182 | cgroup_path(memcg->css.cgroup, buf, MEMCG_PATH_BUF_SIZE); | |
183 | ||
184 | out_put: | |
185 | css_put(&memcg->css); | |
186 | out: | |
187 | return buf; | |
188 | } | |
189 | ||
190 | #define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \ | |
191 | do { \ | |
192 | const char *memcg_path; \ | |
832b5072 | 193 | local_lock(&memcg_paths.lock); \ |
2b5067a8 AR |
194 | memcg_path = get_mm_memcg_path(mm); \ |
195 | trace_mmap_lock_##type(mm, \ | |
196 | memcg_path != NULL ? memcg_path : "", \ | |
197 | ##__VA_ARGS__); \ | |
198 | if (likely(memcg_path != NULL)) \ | |
199 | put_memcg_path_buf(); \ | |
832b5072 | 200 | local_unlock(&memcg_paths.lock); \ |
2b5067a8 AR |
201 | } while (0) |
202 | ||
203 | #else /* !CONFIG_MEMCG */ | |
204 | ||
205 | int trace_mmap_lock_reg(void) | |
206 | { | |
207 | return 0; | |
208 | } | |
209 | ||
210 | void trace_mmap_lock_unreg(void) | |
211 | { | |
212 | } | |
213 | ||
214 | #define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \ | |
215 | trace_mmap_lock_##type(mm, "", ##__VA_ARGS__) | |
216 | ||
217 | #endif /* CONFIG_MEMCG */ | |
218 | ||
219 | /* | |
220 | * Trace calls must be in a separate file, as otherwise there's a circular | |
221 | * dependency between linux/mmap_lock.h and trace/events/mmap_lock.h. | |
222 | */ | |
223 | ||
224 | void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write) | |
225 | { | |
226 | TRACE_MMAP_LOCK_EVENT(start_locking, mm, write); | |
227 | } | |
228 | EXPORT_SYMBOL(__mmap_lock_do_trace_start_locking); | |
229 | ||
230 | void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write, | |
231 | bool success) | |
232 | { | |
233 | TRACE_MMAP_LOCK_EVENT(acquire_returned, mm, write, success); | |
234 | } | |
235 | EXPORT_SYMBOL(__mmap_lock_do_trace_acquire_returned); | |
236 | ||
237 | void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write) | |
238 | { | |
239 | TRACE_MMAP_LOCK_EVENT(released, mm, write); | |
240 | } | |
241 | EXPORT_SYMBOL(__mmap_lock_do_trace_released); |