Commit | Line | Data |
---|---|---|
7f5a08c7 BB |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * Copyright (c) 2021, Microsoft Corporation. | |
4 | * | |
5 | * Authors: | |
6 | * Beau Belgrave <beaub@linux.microsoft.com> | |
7 | */ | |
8 | ||
9 | #include <linux/bitmap.h> | |
10 | #include <linux/cdev.h> | |
11 | #include <linux/hashtable.h> | |
12 | #include <linux/list.h> | |
13 | #include <linux/io.h> | |
14 | #include <linux/uio.h> | |
15 | #include <linux/ioctl.h> | |
16 | #include <linux/jhash.h> | |
d401b724 | 17 | #include <linux/refcount.h> |
7f5a08c7 BB |
18 | #include <linux/trace_events.h> |
19 | #include <linux/tracefs.h> | |
20 | #include <linux/types.h> | |
21 | #include <linux/uaccess.h> | |
72357590 | 22 | #include <linux/highmem.h> |
ce58e96e | 23 | #include <linux/init.h> |
5cfff569 | 24 | #include <linux/user_events.h> |
7f5a08c7 | 25 | #include "trace_dynevent.h" |
4bec284c SRG |
26 | #include "trace_output.h" |
27 | #include "trace.h" | |
7f5a08c7 BB |
28 | |
29 | #define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1) | |
30 | ||
31 | #define FIELD_DEPTH_TYPE 0 | |
32 | #define FIELD_DEPTH_NAME 1 | |
33 | #define FIELD_DEPTH_SIZE 2 | |
34 | ||
7f5a08c7 BB |
35 | /* Limit how long of an event name plus args within the subsystem. */ |
36 | #define MAX_EVENT_DESC 512 | |
64805e40 BB |
37 | #define EVENT_NAME(user_event) ((user_event)->reg_name) |
38 | #define EVENT_TP_NAME(user_event) ((user_event)->tracepoint.name) | |
7f5a08c7 BB |
39 | #define MAX_FIELD_ARRAY_SIZE 1024 |
40 | ||
39d6d08b BB |
41 | /* |
42 | * Internal bits (kernel side only) to keep track of connected probes: | |
43 | * These are used when status is requested in text form about an event. These | |
44 | * bits are compared against an internal byte on the event to determine which | |
45 | * probes to print out to the user. | |
46 | * | |
47 | * These do not reflect the mapped bytes between the user and kernel space. | |
48 | */ | |
49 | #define EVENT_STATUS_FTRACE BIT(0) | |
50 | #define EVENT_STATUS_PERF BIT(1) | |
51 | #define EVENT_STATUS_OTHER BIT(7) | |
52 | ||
e5d27181 | 53 | /* |
72357590 BB |
54 | * Stores the system name, tables, and locks for a group of events. This |
55 | * allows isolation for events by various means. | |
e5d27181 BB |
56 | */ |
57 | struct user_event_group { | |
64805e40 BB |
58 | char *system_name; |
59 | char *system_multi_name; | |
60 | struct hlist_node node; | |
61 | struct mutex reg_mutex; | |
e5d27181 | 62 | DECLARE_HASHTABLE(register_table, 8); |
64805e40 BB |
63 | /* ID that moves forward within the group for multi-event names */ |
64 | u64 multi_id; | |
e5d27181 | 65 | }; |
7f5a08c7 | 66 | |
e5d27181 BB |
67 | /* Group for init_user_ns mapping, top-most group */ |
68 | static struct user_event_group *init_group; | |
7f5a08c7 | 69 | |
ce58e96e BB |
70 | /* Max allowed events for the whole system */ |
71 | static unsigned int max_user_events = 32768; | |
72 | ||
73 | /* Current number of events on the whole system */ | |
74 | static unsigned int current_user_events; | |
75 | ||
7f5a08c7 BB |
76 | /* |
77 | * Stores per-event properties, as users register events | |
78 | * within a file a user_event might be created if it does not | |
79 | * already exist. These are globally used and their lifetime | |
80 | * is tied to the refcnt member. These cannot go away until the | |
d401b724 | 81 | * refcnt reaches one. |
7f5a08c7 BB |
82 | */ |
83 | struct user_event { | |
a4c40c13 | 84 | struct user_event_group *group; |
64805e40 | 85 | char *reg_name; |
a4c40c13 BB |
86 | struct tracepoint tracepoint; |
87 | struct trace_event_call call; | |
88 | struct trace_event_class class; | |
89 | struct dyn_event devent; | |
90 | struct hlist_node node; | |
91 | struct list_head fields; | |
92 | struct list_head validators; | |
a65442ed | 93 | struct work_struct put_work; |
a4c40c13 BB |
94 | refcount_t refcnt; |
95 | int min_size; | |
b08d7258 | 96 | int reg_flags; |
a4c40c13 | 97 | char status; |
7f5a08c7 BB |
98 | }; |
99 | ||
72357590 BB |
100 | /* |
101 | * Stores per-mm/event properties that enable an address to be | |
102 | * updated properly for each task. As tasks are forked, we use | |
103 | * these to track enablement sites that are tied to an event. | |
104 | */ | |
105 | struct user_event_enabler { | |
dcbd1ac2 | 106 | struct list_head mm_enablers_link; |
a4c40c13 BB |
107 | struct user_event *event; |
108 | unsigned long addr; | |
72357590 BB |
109 | |
110 | /* Track enable bit, flags, etc. Aligned for bitops. */ | |
ee7751b5 | 111 | unsigned long values; |
72357590 BB |
112 | }; |
113 | ||
114 | /* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */ | |
115 | #define ENABLE_VAL_BIT_MASK 0x3F | |
116 | ||
81f8fb65 BB |
117 | /* Bit 6 is for faulting status of enablement */ |
118 | #define ENABLE_VAL_FAULTING_BIT 6 | |
119 | ||
dcb8177c BB |
120 | /* Bit 7 is for freeing status of enablement */ |
121 | #define ENABLE_VAL_FREEING_BIT 7 | |
122 | ||
2de9ee94 BB |
123 | /* Bit 8 is for marking 32-bit on 64-bit */ |
124 | #define ENABLE_VAL_32_ON_64_BIT 8 | |
125 | ||
126 | #define ENABLE_VAL_COMPAT_MASK (1 << ENABLE_VAL_32_ON_64_BIT) | |
127 | ||
128 | /* Only duplicate the bit and compat values */ | |
129 | #define ENABLE_VAL_DUP_MASK (ENABLE_VAL_BIT_MASK | ENABLE_VAL_COMPAT_MASK) | |
72357590 | 130 | |
ee7751b5 BB |
131 | #define ENABLE_BITOPS(e) (&(e)->values) |
132 | ||
133 | #define ENABLE_BIT(e) ((int)((e)->values & ENABLE_VAL_BIT_MASK)) | |
81f8fb65 | 134 | |
64805e40 BB |
135 | #define EVENT_MULTI_FORMAT(f) ((f) & USER_EVENT_REG_MULTI_FORMAT) |
136 | ||
81f8fb65 BB |
137 | /* Used for asynchronous faulting in of pages */ |
138 | struct user_event_enabler_fault { | |
a4c40c13 BB |
139 | struct work_struct work; |
140 | struct user_event_mm *mm; | |
141 | struct user_event_enabler *enabler; | |
41d8fba1 | 142 | int attempt; |
81f8fb65 BB |
143 | }; |
144 | ||
145 | static struct kmem_cache *fault_cache; | |
146 | ||
72357590 BB |
147 | /* Global list of memory descriptors using user_events */ |
148 | static LIST_HEAD(user_event_mms); | |
149 | static DEFINE_SPINLOCK(user_event_mms_lock); | |
150 | ||
7f5a08c7 BB |
151 | /* |
152 | * Stores per-file events references, as users register events | |
153 | * within a file this structure is modified and freed via RCU. | |
154 | * The lifetime of this struct is tied to the lifetime of the file. | |
155 | * These are not shared and only accessible by the file that created it. | |
156 | */ | |
157 | struct user_event_refs { | |
a4c40c13 BB |
158 | struct rcu_head rcu; |
159 | int count; | |
160 | struct user_event *events[]; | |
7f5a08c7 BB |
161 | }; |
162 | ||
e5d27181 | 163 | struct user_event_file_info { |
a4c40c13 BB |
164 | struct user_event_group *group; |
165 | struct user_event_refs *refs; | |
e5d27181 BB |
166 | }; |
167 | ||
2467cda1 BB |
168 | #define VALIDATOR_ENSURE_NULL (1 << 0) |
169 | #define VALIDATOR_REL (1 << 1) | |
170 | ||
171 | struct user_event_validator { | |
dcbd1ac2 | 172 | struct list_head user_event_link; |
a4c40c13 BB |
173 | int offset; |
174 | int flags; | |
2467cda1 BB |
175 | }; |
176 | ||
2de9ee94 BB |
177 | static inline void align_addr_bit(unsigned long *addr, int *bit, |
178 | unsigned long *flags) | |
179 | { | |
180 | if (IS_ALIGNED(*addr, sizeof(long))) { | |
181 | #ifdef __BIG_ENDIAN | |
182 | /* 32 bit on BE 64 bit requires a 32 bit offset when aligned. */ | |
183 | if (test_bit(ENABLE_VAL_32_ON_64_BIT, flags)) | |
184 | *bit += 32; | |
185 | #endif | |
186 | return; | |
187 | } | |
188 | ||
189 | *addr = ALIGN_DOWN(*addr, sizeof(long)); | |
190 | ||
191 | /* | |
192 | * We only support 32 and 64 bit values. The only time we need | |
193 | * to align is a 32 bit value on a 64 bit kernel, which on LE | |
194 | * is always 32 bits, and on BE requires no change when unaligned. | |
195 | */ | |
196 | #ifdef __LITTLE_ENDIAN | |
197 | *bit += 32; | |
198 | #endif | |
199 | } | |
200 | ||
0279400a | 201 | typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i, |
2467cda1 | 202 | void *tpdata, bool *faulted); |
7f5a08c7 | 203 | |
e5d27181 BB |
204 | static int user_event_parse(struct user_event_group *group, char *name, |
205 | char *args, char *flags, | |
b08d7258 | 206 | struct user_event **newuser, int reg_flags); |
7f5a08c7 | 207 | |
72357590 BB |
208 | static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm); |
209 | static struct user_event_mm *user_event_mm_get_all(struct user_event *user); | |
210 | static void user_event_mm_put(struct user_event_mm *mm); | |
a65442ed | 211 | static int destroy_user_event(struct user_event *user); |
1e953de9 BB |
212 | static bool user_fields_match(struct user_event *user, int argc, |
213 | const char **argv); | |
72357590 | 214 | |
7f5a08c7 BB |
215 | static u32 user_event_key(char *name) |
216 | { | |
217 | return jhash(name, strlen(name), 0); | |
218 | } | |
219 | ||
5dbd04ed BB |
220 | static bool user_event_capable(u16 reg_flags) |
221 | { | |
222 | /* Persistent events require CAP_PERFMON / CAP_SYS_ADMIN */ | |
223 | if (reg_flags & USER_EVENT_REG_PERSIST) { | |
224 | if (!perfmon_capable()) | |
225 | return false; | |
226 | } | |
227 | ||
228 | return true; | |
229 | } | |
230 | ||
f0dbf6fd BB |
231 | static struct user_event *user_event_get(struct user_event *user) |
232 | { | |
233 | refcount_inc(&user->refcnt); | |
234 | ||
235 | return user; | |
236 | } | |
237 | ||
a65442ed BB |
238 | static void delayed_destroy_user_event(struct work_struct *work) |
239 | { | |
240 | struct user_event *user = container_of( | |
241 | work, struct user_event, put_work); | |
242 | ||
243 | mutex_lock(&event_mutex); | |
244 | ||
245 | if (!refcount_dec_and_test(&user->refcnt)) | |
246 | goto out; | |
247 | ||
248 | if (destroy_user_event(user)) { | |
249 | /* | |
250 | * The only reason this would fail here is if we cannot | |
251 | * update the visibility of the event. In this case the | |
252 | * event stays in the hashtable, waiting for someone to | |
253 | * attempt to delete it later. | |
254 | */ | |
255 | pr_warn("user_events: Unable to delete event\n"); | |
256 | refcount_set(&user->refcnt, 1); | |
257 | } | |
258 | out: | |
259 | mutex_unlock(&event_mutex); | |
260 | } | |
261 | ||
f0dbf6fd BB |
262 | static void user_event_put(struct user_event *user, bool locked) |
263 | { | |
a65442ed | 264 | bool delete; |
f0dbf6fd BB |
265 | |
266 | if (unlikely(!user)) | |
267 | return; | |
268 | ||
a65442ed BB |
269 | /* |
270 | * When the event is not enabled for auto-delete there will always | |
271 | * be at least 1 reference to the event. During the event creation | |
272 | * we initially set the refcnt to 2 to achieve this. In those cases | |
273 | * the caller must acquire event_mutex and after decrement check if | |
274 | * the refcnt is 1, meaning this is the last reference. When auto | |
275 | * delete is enabled, there will only be 1 ref, IE: refcnt will be | |
276 | * only set to 1 during creation to allow the below checks to go | |
277 | * through upon the last put. The last put must always be done with | |
278 | * the event mutex held. | |
279 | */ | |
280 | if (!locked) { | |
281 | lockdep_assert_not_held(&event_mutex); | |
282 | delete = refcount_dec_and_mutex_lock(&user->refcnt, &event_mutex); | |
283 | } else { | |
284 | lockdep_assert_held(&event_mutex); | |
285 | delete = refcount_dec_and_test(&user->refcnt); | |
286 | } | |
287 | ||
288 | if (!delete) | |
289 | return; | |
290 | ||
291 | /* | |
292 | * We now have the event_mutex in all cases, which ensures that | |
293 | * no new references will be taken until event_mutex is released. | |
294 | * New references come through find_user_event(), which requires | |
295 | * the event_mutex to be held. | |
296 | */ | |
297 | ||
298 | if (user->reg_flags & USER_EVENT_REG_PERSIST) { | |
299 | /* We should not get here when persist flag is set */ | |
300 | pr_alert("BUG: Auto-delete engaged on persistent event\n"); | |
301 | goto out; | |
302 | } | |
303 | ||
304 | /* | |
305 | * Unfortunately we have to attempt the actual destroy in a work | |
306 | * queue. This is because not all cases handle a trace_event_call | |
307 | * being removed within the class->reg() operation for unregister. | |
308 | */ | |
309 | INIT_WORK(&user->put_work, delayed_destroy_user_event); | |
310 | ||
311 | /* | |
312 | * Since the event is still in the hashtable, we have to re-inc | |
313 | * the ref count to 1. This count will be decremented and checked | |
314 | * in the work queue to ensure it's still the last ref. This is | |
315 | * needed because a user-process could register the same event in | |
316 | * between the time of event_mutex release and the work queue | |
317 | * running the delayed destroy. If we removed the item now from | |
318 | * the hashtable, this would result in a timing window where a | |
319 | * user process would fail a register because the trace_event_call | |
320 | * register would fail in the tracing layers. | |
321 | */ | |
322 | refcount_set(&user->refcnt, 1); | |
323 | ||
324 | if (WARN_ON_ONCE(!schedule_work(&user->put_work))) { | |
325 | /* | |
326 | * If we fail we must wait for an admin to attempt delete or | |
327 | * another register/close of the event, whichever is first. | |
328 | */ | |
329 | pr_warn("user_events: Unable to queue delayed destroy\n"); | |
330 | } | |
331 | out: | |
332 | /* Ensure if we didn't have event_mutex before we unlock it */ | |
333 | if (!locked) | |
334 | mutex_unlock(&event_mutex); | |
f0dbf6fd BB |
335 | } |
336 | ||
e5d27181 BB |
337 | static void user_event_group_destroy(struct user_event_group *group) |
338 | { | |
e5d27181 | 339 | kfree(group->system_name); |
64805e40 | 340 | kfree(group->system_multi_name); |
e5d27181 BB |
341 | kfree(group); |
342 | } | |
343 | ||
ed0e0ae0 | 344 | static char *user_event_group_system_name(void) |
e5d27181 BB |
345 | { |
346 | char *system_name; | |
347 | int len = sizeof(USER_EVENTS_SYSTEM) + 1; | |
348 | ||
e5d27181 BB |
349 | system_name = kmalloc(len, GFP_KERNEL); |
350 | ||
351 | if (!system_name) | |
352 | return NULL; | |
353 | ||
354 | snprintf(system_name, len, "%s", USER_EVENTS_SYSTEM); | |
355 | ||
356 | return system_name; | |
357 | } | |
358 | ||
64805e40 BB |
359 | static char *user_event_group_system_multi_name(void) |
360 | { | |
361 | return kstrdup(USER_EVENTS_MULTI_SYSTEM, GFP_KERNEL); | |
362 | } | |
363 | ||
e5d27181 BB |
364 | static struct user_event_group *current_user_event_group(void) |
365 | { | |
ed0e0ae0 | 366 | return init_group; |
e5d27181 BB |
367 | } |
368 | ||
ed0e0ae0 | 369 | static struct user_event_group *user_event_group_create(void) |
e5d27181 BB |
370 | { |
371 | struct user_event_group *group; | |
372 | ||
373 | group = kzalloc(sizeof(*group), GFP_KERNEL); | |
374 | ||
375 | if (!group) | |
376 | return NULL; | |
377 | ||
ed0e0ae0 | 378 | group->system_name = user_event_group_system_name(); |
e5d27181 BB |
379 | |
380 | if (!group->system_name) | |
381 | goto error; | |
382 | ||
64805e40 BB |
383 | group->system_multi_name = user_event_group_system_multi_name(); |
384 | ||
385 | if (!group->system_multi_name) | |
386 | goto error; | |
387 | ||
e5d27181 BB |
388 | mutex_init(&group->reg_mutex); |
389 | hash_init(group->register_table); | |
390 | ||
391 | return group; | |
392 | error: | |
393 | if (group) | |
394 | user_event_group_destroy(group); | |
395 | ||
396 | return NULL; | |
397 | }; | |
398 | ||
f0dbf6fd BB |
399 | static void user_event_enabler_destroy(struct user_event_enabler *enabler, |
400 | bool locked) | |
72357590 | 401 | { |
dcbd1ac2 | 402 | list_del_rcu(&enabler->mm_enablers_link); |
72357590 BB |
403 | |
404 | /* No longer tracking the event via the enabler */ | |
f0dbf6fd | 405 | user_event_put(enabler->event, locked); |
72357590 BB |
406 | |
407 | kfree(enabler); | |
408 | } | |
409 | ||
41d8fba1 BB |
410 | static int user_event_mm_fault_in(struct user_event_mm *mm, unsigned long uaddr, |
411 | int attempt) | |
72357590 BB |
412 | { |
413 | bool unlocked; | |
414 | int ret; | |
415 | ||
41d8fba1 BB |
416 | /* |
417 | * Normally this is low, ensure that it cannot be taken advantage of by | |
418 | * bad user processes to cause excessive looping. | |
419 | */ | |
420 | if (attempt > 10) | |
421 | return -EFAULT; | |
422 | ||
72357590 BB |
423 | mmap_read_lock(mm->mm); |
424 | ||
425 | /* Ensure MM has tasks, cannot use after exit_mm() */ | |
426 | if (refcount_read(&mm->tasks) == 0) { | |
427 | ret = -ENOENT; | |
428 | goto out; | |
429 | } | |
430 | ||
431 | ret = fixup_user_fault(mm->mm, uaddr, FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE, | |
432 | &unlocked); | |
433 | out: | |
434 | mmap_read_unlock(mm->mm); | |
435 | ||
436 | return ret; | |
437 | } | |
438 | ||
439 | static int user_event_enabler_write(struct user_event_mm *mm, | |
81f8fb65 | 440 | struct user_event_enabler *enabler, |
41d8fba1 | 441 | bool fixup_fault, int *attempt); |
81f8fb65 BB |
442 | |
443 | static void user_event_enabler_fault_fixup(struct work_struct *work) | |
444 | { | |
445 | struct user_event_enabler_fault *fault = container_of( | |
446 | work, struct user_event_enabler_fault, work); | |
447 | struct user_event_enabler *enabler = fault->enabler; | |
448 | struct user_event_mm *mm = fault->mm; | |
449 | unsigned long uaddr = enabler->addr; | |
41d8fba1 | 450 | int attempt = fault->attempt; |
81f8fb65 BB |
451 | int ret; |
452 | ||
41d8fba1 | 453 | ret = user_event_mm_fault_in(mm, uaddr, attempt); |
81f8fb65 BB |
454 | |
455 | if (ret && ret != -ENOENT) { | |
456 | struct user_event *user = enabler->event; | |
457 | ||
458 | pr_warn("user_events: Fault for mm: 0x%pK @ 0x%llx event: %s\n", | |
459 | mm->mm, (unsigned long long)uaddr, EVENT_NAME(user)); | |
460 | } | |
461 | ||
462 | /* Prevent state changes from racing */ | |
463 | mutex_lock(&event_mutex); | |
464 | ||
dcb8177c BB |
465 | /* User asked for enabler to be removed during fault */ |
466 | if (test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))) { | |
f0dbf6fd | 467 | user_event_enabler_destroy(enabler, true); |
dcb8177c BB |
468 | goto out; |
469 | } | |
470 | ||
81f8fb65 BB |
471 | /* |
472 | * If we managed to get the page, re-issue the write. We do not | |
473 | * want to get into a possible infinite loop, which is why we only | |
474 | * attempt again directly if the page came in. If we couldn't get | |
475 | * the page here, then we will try again the next time the event is | |
476 | * enabled/disabled. | |
477 | */ | |
478 | clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)); | |
479 | ||
480 | if (!ret) { | |
481 | mmap_read_lock(mm->mm); | |
41d8fba1 | 482 | user_event_enabler_write(mm, enabler, true, &attempt); |
81f8fb65 BB |
483 | mmap_read_unlock(mm->mm); |
484 | } | |
dcb8177c | 485 | out: |
81f8fb65 BB |
486 | mutex_unlock(&event_mutex); |
487 | ||
488 | /* In all cases we no longer need the mm or fault */ | |
489 | user_event_mm_put(mm); | |
490 | kmem_cache_free(fault_cache, fault); | |
491 | } | |
492 | ||
493 | static bool user_event_enabler_queue_fault(struct user_event_mm *mm, | |
41d8fba1 BB |
494 | struct user_event_enabler *enabler, |
495 | int attempt) | |
81f8fb65 BB |
496 | { |
497 | struct user_event_enabler_fault *fault; | |
498 | ||
499 | fault = kmem_cache_zalloc(fault_cache, GFP_NOWAIT | __GFP_NOWARN); | |
500 | ||
501 | if (!fault) | |
502 | return false; | |
503 | ||
504 | INIT_WORK(&fault->work, user_event_enabler_fault_fixup); | |
505 | fault->mm = user_event_mm_get(mm); | |
506 | fault->enabler = enabler; | |
41d8fba1 | 507 | fault->attempt = attempt; |
81f8fb65 BB |
508 | |
509 | /* Don't try to queue in again while we have a pending fault */ | |
510 | set_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)); | |
511 | ||
512 | if (!schedule_work(&fault->work)) { | |
513 | /* Allow another attempt later */ | |
514 | clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)); | |
515 | ||
516 | user_event_mm_put(mm); | |
517 | kmem_cache_free(fault_cache, fault); | |
518 | ||
519 | return false; | |
520 | } | |
521 | ||
522 | return true; | |
523 | } | |
524 | ||
525 | static int user_event_enabler_write(struct user_event_mm *mm, | |
526 | struct user_event_enabler *enabler, | |
41d8fba1 | 527 | bool fixup_fault, int *attempt) |
72357590 BB |
528 | { |
529 | unsigned long uaddr = enabler->addr; | |
530 | unsigned long *ptr; | |
531 | struct page *page; | |
532 | void *kaddr; | |
2de9ee94 | 533 | int bit = ENABLE_BIT(enabler); |
72357590 BB |
534 | int ret; |
535 | ||
536 | lockdep_assert_held(&event_mutex); | |
537 | mmap_assert_locked(mm->mm); | |
538 | ||
41d8fba1 BB |
539 | *attempt += 1; |
540 | ||
72357590 BB |
541 | /* Ensure MM has tasks, cannot use after exit_mm() */ |
542 | if (refcount_read(&mm->tasks) == 0) | |
543 | return -ENOENT; | |
544 | ||
dcb8177c BB |
545 | if (unlikely(test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)) || |
546 | test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler)))) | |
81f8fb65 BB |
547 | return -EBUSY; |
548 | ||
2de9ee94 BB |
549 | align_addr_bit(&uaddr, &bit, ENABLE_BITOPS(enabler)); |
550 | ||
72357590 | 551 | ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT, |
0b295316 | 552 | &page, NULL); |
72357590 | 553 | |
81f8fb65 BB |
554 | if (unlikely(ret <= 0)) { |
555 | if (!fixup_fault) | |
556 | return -EFAULT; | |
557 | ||
41d8fba1 | 558 | if (!user_event_enabler_queue_fault(mm, enabler, *attempt)) |
81f8fb65 BB |
559 | pr_warn("user_events: Unable to queue fault handler\n"); |
560 | ||
72357590 BB |
561 | return -EFAULT; |
562 | } | |
563 | ||
564 | kaddr = kmap_local_page(page); | |
565 | ptr = kaddr + (uaddr & ~PAGE_MASK); | |
566 | ||
567 | /* Update bit atomically, user tracers must be atomic as well */ | |
568 | if (enabler->event && enabler->event->status) | |
2de9ee94 | 569 | set_bit(bit, ptr); |
72357590 | 570 | else |
2de9ee94 | 571 | clear_bit(bit, ptr); |
72357590 BB |
572 | |
573 | kunmap_local(kaddr); | |
574 | unpin_user_pages_dirty_lock(&page, 1, true); | |
575 | ||
576 | return 0; | |
577 | } | |
578 | ||
97bbce89 BB |
579 | static bool user_event_enabler_exists(struct user_event_mm *mm, |
580 | unsigned long uaddr, unsigned char bit) | |
581 | { | |
582 | struct user_event_enabler *enabler; | |
97bbce89 | 583 | |
dcbd1ac2 | 584 | list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) { |
ee7751b5 | 585 | if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit) |
97bbce89 BB |
586 | return true; |
587 | } | |
588 | ||
589 | return false; | |
590 | } | |
591 | ||
72357590 BB |
592 | static void user_event_enabler_update(struct user_event *user) |
593 | { | |
594 | struct user_event_enabler *enabler; | |
72357590 | 595 | struct user_event_mm *next; |
ff9e1632 | 596 | struct user_event_mm *mm; |
41d8fba1 | 597 | int attempt; |
72357590 | 598 | |
aaecdaf9 LT |
599 | lockdep_assert_held(&event_mutex); |
600 | ||
ff9e1632 BB |
601 | /* |
602 | * We need to build a one-shot list of all the mms that have an | |
603 | * enabler for the user_event passed in. This list is only valid | |
604 | * while holding the event_mutex. The only reason for this is due | |
605 | * to the global mm list being RCU protected and we use methods | |
606 | * which can wait (mmap_read_lock and pin_user_pages_remote). | |
607 | * | |
608 | * NOTE: user_event_mm_get_all() increments the ref count of each | |
609 | * mm that is added to the list to prevent removal timing windows. | |
610 | * We must always put each mm after they are used, which may wait. | |
611 | */ | |
612 | mm = user_event_mm_get_all(user); | |
613 | ||
72357590 BB |
614 | while (mm) { |
615 | next = mm->next; | |
616 | mmap_read_lock(mm->mm); | |
72357590 | 617 | |
dcbd1ac2 | 618 | list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) { |
41d8fba1 BB |
619 | if (enabler->event == user) { |
620 | attempt = 0; | |
621 | user_event_enabler_write(mm, enabler, true, &attempt); | |
622 | } | |
623 | } | |
72357590 | 624 | |
72357590 BB |
625 | mmap_read_unlock(mm->mm); |
626 | user_event_mm_put(mm); | |
627 | mm = next; | |
628 | } | |
629 | } | |
630 | ||
631 | static bool user_event_enabler_dup(struct user_event_enabler *orig, | |
632 | struct user_event_mm *mm) | |
633 | { | |
634 | struct user_event_enabler *enabler; | |
635 | ||
dcb8177c BB |
636 | /* Skip pending frees */ |
637 | if (unlikely(test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(orig)))) | |
638 | return true; | |
639 | ||
f9cce238 | 640 | enabler = kzalloc(sizeof(*enabler), GFP_NOWAIT | __GFP_ACCOUNT); |
72357590 BB |
641 | |
642 | if (!enabler) | |
643 | return false; | |
644 | ||
f0dbf6fd | 645 | enabler->event = user_event_get(orig->event); |
72357590 BB |
646 | enabler->addr = orig->addr; |
647 | ||
648 | /* Only dup part of value (ignore future flags, etc) */ | |
649 | enabler->values = orig->values & ENABLE_VAL_DUP_MASK; | |
650 | ||
aaecdaf9 | 651 | /* Enablers not exposed yet, RCU not required */ |
dcbd1ac2 | 652 | list_add(&enabler->mm_enablers_link, &mm->enablers); |
72357590 BB |
653 | |
654 | return true; | |
655 | } | |
656 | ||
657 | static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm) | |
658 | { | |
659 | refcount_inc(&mm->refcnt); | |
660 | ||
661 | return mm; | |
662 | } | |
663 | ||
664 | static struct user_event_mm *user_event_mm_get_all(struct user_event *user) | |
665 | { | |
666 | struct user_event_mm *found = NULL; | |
667 | struct user_event_enabler *enabler; | |
668 | struct user_event_mm *mm; | |
669 | ||
ff9e1632 BB |
670 | /* |
671 | * We use the mm->next field to build a one-shot list from the global | |
672 | * RCU protected list. To build this list the event_mutex must be held. | |
673 | * This lets us build a list without requiring allocs that could fail | |
674 | * when user based events are most wanted for diagnostics. | |
675 | */ | |
676 | lockdep_assert_held(&event_mutex); | |
677 | ||
72357590 BB |
678 | /* |
679 | * We do not want to block fork/exec while enablements are being | |
680 | * updated, so we use RCU to walk the current tasks that have used | |
681 | * user_events ABI for 1 or more events. Each enabler found in each | |
682 | * task that matches the event being updated has a write to reflect | |
683 | * the kernel state back into the process. Waits/faults must not occur | |
684 | * during this. So we scan the list under RCU for all the mm that have | |
685 | * the event within it. This is needed because mm_read_lock() can wait. | |
686 | * Each user mm returned has a ref inc to handle remove RCU races. | |
687 | */ | |
688 | rcu_read_lock(); | |
689 | ||
dcbd1ac2 BB |
690 | list_for_each_entry_rcu(mm, &user_event_mms, mms_link) { |
691 | list_for_each_entry_rcu(enabler, &mm->enablers, mm_enablers_link) { | |
72357590 BB |
692 | if (enabler->event == user) { |
693 | mm->next = found; | |
694 | found = user_event_mm_get(mm); | |
695 | break; | |
696 | } | |
dcbd1ac2 BB |
697 | } |
698 | } | |
72357590 BB |
699 | |
700 | rcu_read_unlock(); | |
701 | ||
702 | return found; | |
703 | } | |
704 | ||
3e0fea09 | 705 | static struct user_event_mm *user_event_mm_alloc(struct task_struct *t) |
72357590 BB |
706 | { |
707 | struct user_event_mm *user_mm; | |
72357590 | 708 | |
f9cce238 | 709 | user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT); |
72357590 BB |
710 | |
711 | if (!user_mm) | |
712 | return NULL; | |
713 | ||
714 | user_mm->mm = t->mm; | |
715 | INIT_LIST_HEAD(&user_mm->enablers); | |
716 | refcount_set(&user_mm->refcnt, 1); | |
717 | refcount_set(&user_mm->tasks, 1); | |
718 | ||
72357590 BB |
719 | /* |
720 | * The lifetime of the memory descriptor can slightly outlast | |
721 | * the task lifetime if a ref to the user_event_mm is taken | |
722 | * between list_del_rcu() and call_rcu(). Therefore we need | |
723 | * to take a reference to it to ensure it can live this long | |
724 | * under this corner case. This can also occur in clones that | |
725 | * outlast the parent. | |
726 | */ | |
727 | mmgrab(user_mm->mm); | |
728 | ||
729 | return user_mm; | |
730 | } | |
731 | ||
3e0fea09 LT |
732 | static void user_event_mm_attach(struct user_event_mm *user_mm, struct task_struct *t) |
733 | { | |
734 | unsigned long flags; | |
735 | ||
736 | spin_lock_irqsave(&user_event_mms_lock, flags); | |
dcbd1ac2 | 737 | list_add_rcu(&user_mm->mms_link, &user_event_mms); |
3e0fea09 LT |
738 | spin_unlock_irqrestore(&user_event_mms_lock, flags); |
739 | ||
740 | t->user_event_mm = user_mm; | |
741 | } | |
742 | ||
72357590 BB |
743 | static struct user_event_mm *current_user_event_mm(void) |
744 | { | |
745 | struct user_event_mm *user_mm = current->user_event_mm; | |
746 | ||
747 | if (user_mm) | |
748 | goto inc; | |
749 | ||
3e0fea09 | 750 | user_mm = user_event_mm_alloc(current); |
72357590 BB |
751 | |
752 | if (!user_mm) | |
753 | goto error; | |
3e0fea09 LT |
754 | |
755 | user_event_mm_attach(user_mm, current); | |
72357590 BB |
756 | inc: |
757 | refcount_inc(&user_mm->refcnt); | |
758 | error: | |
759 | return user_mm; | |
760 | } | |
761 | ||
762 | static void user_event_mm_destroy(struct user_event_mm *mm) | |
763 | { | |
764 | struct user_event_enabler *enabler, *next; | |
765 | ||
dcbd1ac2 | 766 | list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) |
f0dbf6fd | 767 | user_event_enabler_destroy(enabler, false); |
72357590 BB |
768 | |
769 | mmdrop(mm->mm); | |
770 | kfree(mm); | |
771 | } | |
772 | ||
773 | static void user_event_mm_put(struct user_event_mm *mm) | |
774 | { | |
775 | if (mm && refcount_dec_and_test(&mm->refcnt)) | |
776 | user_event_mm_destroy(mm); | |
777 | } | |
778 | ||
779 | static void delayed_user_event_mm_put(struct work_struct *work) | |
780 | { | |
781 | struct user_event_mm *mm; | |
782 | ||
783 | mm = container_of(to_rcu_work(work), struct user_event_mm, put_rwork); | |
784 | user_event_mm_put(mm); | |
785 | } | |
786 | ||
787 | void user_event_mm_remove(struct task_struct *t) | |
39d6d08b | 788 | { |
72357590 BB |
789 | struct user_event_mm *mm; |
790 | unsigned long flags; | |
791 | ||
792 | might_sleep(); | |
793 | ||
794 | mm = t->user_event_mm; | |
795 | t->user_event_mm = NULL; | |
796 | ||
797 | /* Clone will increment the tasks, only remove if last clone */ | |
798 | if (!refcount_dec_and_test(&mm->tasks)) | |
799 | return; | |
800 | ||
801 | /* Remove the mm from the list, so it can no longer be enabled */ | |
802 | spin_lock_irqsave(&user_event_mms_lock, flags); | |
dcbd1ac2 | 803 | list_del_rcu(&mm->mms_link); |
72357590 BB |
804 | spin_unlock_irqrestore(&user_event_mms_lock, flags); |
805 | ||
806 | /* | |
807 | * We need to wait for currently occurring writes to stop within | |
808 | * the mm. This is required since exit_mm() snaps the current rss | |
809 | * stats and clears them. On the final mmdrop(), check_mm() will | |
810 | * report a bug if these increment. | |
811 | * | |
812 | * All writes/pins are done under mmap_read lock, take the write | |
813 | * lock to ensure in-progress faults have completed. Faults that | |
814 | * are pending but yet to run will check the task count and skip | |
815 | * the fault since the mm is going away. | |
816 | */ | |
817 | mmap_write_lock(mm->mm); | |
818 | mmap_write_unlock(mm->mm); | |
39d6d08b | 819 | |
72357590 BB |
820 | /* |
821 | * Put for mm must be done after RCU delay to handle new refs in | |
822 | * between the list_del_rcu() and now. This ensures any get refs | |
823 | * during rcu_read_lock() are accounted for during list removal. | |
824 | * | |
825 | * CPU A | CPU B | |
826 | * --------------------------------------------------------------- | |
827 | * user_event_mm_remove() | rcu_read_lock(); | |
828 | * list_del_rcu() | list_for_each_entry_rcu(); | |
829 | * call_rcu() | refcount_inc(); | |
830 | * . | rcu_read_unlock(); | |
831 | * schedule_work() | . | |
832 | * user_event_mm_put() | . | |
833 | * | |
834 | * mmdrop() cannot be called in the softirq context of call_rcu() | |
835 | * so we use a work queue after call_rcu() to run within. | |
836 | */ | |
837 | INIT_RCU_WORK(&mm->put_rwork, delayed_user_event_mm_put); | |
838 | queue_rcu_work(system_wq, &mm->put_rwork); | |
39d6d08b BB |
839 | } |
840 | ||
72357590 | 841 | void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm) |
39d6d08b | 842 | { |
3e0fea09 | 843 | struct user_event_mm *mm = user_event_mm_alloc(t); |
72357590 | 844 | struct user_event_enabler *enabler; |
39d6d08b | 845 | |
72357590 BB |
846 | if (!mm) |
847 | return; | |
848 | ||
849 | rcu_read_lock(); | |
850 | ||
dcbd1ac2 | 851 | list_for_each_entry_rcu(enabler, &old_mm->enablers, mm_enablers_link) { |
72357590 BB |
852 | if (!user_event_enabler_dup(enabler, mm)) |
853 | goto error; | |
dcbd1ac2 | 854 | } |
72357590 BB |
855 | |
856 | rcu_read_unlock(); | |
857 | ||
3e0fea09 | 858 | user_event_mm_attach(mm, t); |
72357590 BB |
859 | return; |
860 | error: | |
861 | rcu_read_unlock(); | |
3e0fea09 | 862 | user_event_mm_destroy(mm); |
72357590 BB |
863 | } |
864 | ||
97bbce89 BB |
865 | static bool current_user_event_enabler_exists(unsigned long uaddr, |
866 | unsigned char bit) | |
867 | { | |
868 | struct user_event_mm *user_mm = current_user_event_mm(); | |
869 | bool exists; | |
870 | ||
871 | if (!user_mm) | |
872 | return false; | |
873 | ||
874 | exists = user_event_enabler_exists(user_mm, uaddr, bit); | |
875 | ||
876 | user_event_mm_put(user_mm); | |
877 | ||
878 | return exists; | |
879 | } | |
880 | ||
72357590 BB |
881 | static struct user_event_enabler |
882 | *user_event_enabler_create(struct user_reg *reg, struct user_event *user, | |
883 | int *write_result) | |
884 | { | |
885 | struct user_event_enabler *enabler; | |
886 | struct user_event_mm *user_mm; | |
887 | unsigned long uaddr = (unsigned long)reg->enable_addr; | |
41d8fba1 | 888 | int attempt = 0; |
72357590 BB |
889 | |
890 | user_mm = current_user_event_mm(); | |
891 | ||
892 | if (!user_mm) | |
893 | return NULL; | |
894 | ||
f9cce238 | 895 | enabler = kzalloc(sizeof(*enabler), GFP_KERNEL_ACCOUNT); |
72357590 BB |
896 | |
897 | if (!enabler) | |
898 | goto out; | |
899 | ||
900 | enabler->event = user; | |
901 | enabler->addr = uaddr; | |
902 | enabler->values = reg->enable_bit; | |
2de9ee94 BB |
903 | |
904 | #if BITS_PER_LONG >= 64 | |
905 | if (reg->enable_size == 4) | |
906 | set_bit(ENABLE_VAL_32_ON_64_BIT, ENABLE_BITOPS(enabler)); | |
907 | #endif | |
908 | ||
72357590 BB |
909 | retry: |
910 | /* Prevents state changes from racing with new enablers */ | |
911 | mutex_lock(&event_mutex); | |
912 | ||
913 | /* Attempt to reflect the current state within the process */ | |
914 | mmap_read_lock(user_mm->mm); | |
41d8fba1 BB |
915 | *write_result = user_event_enabler_write(user_mm, enabler, false, |
916 | &attempt); | |
72357590 BB |
917 | mmap_read_unlock(user_mm->mm); |
918 | ||
919 | /* | |
920 | * If the write works, then we will track the enabler. A ref to the | |
921 | * underlying user_event is held by the enabler to prevent it going | |
922 | * away while the enabler is still in use by a process. The ref is | |
923 | * removed when the enabler is destroyed. This means a event cannot | |
924 | * be forcefully deleted from the system until all tasks using it | |
925 | * exit or run exec(), which includes forks and clones. | |
926 | */ | |
927 | if (!*write_result) { | |
f0dbf6fd | 928 | user_event_get(user); |
dcbd1ac2 | 929 | list_add_rcu(&enabler->mm_enablers_link, &user_mm->enablers); |
72357590 BB |
930 | } |
931 | ||
932 | mutex_unlock(&event_mutex); | |
933 | ||
934 | if (*write_result) { | |
935 | /* Attempt to fault-in and retry if it worked */ | |
41d8fba1 | 936 | if (!user_event_mm_fault_in(user_mm, uaddr, attempt)) |
72357590 BB |
937 | goto retry; |
938 | ||
939 | kfree(enabler); | |
940 | enabler = NULL; | |
941 | } | |
942 | out: | |
943 | user_event_mm_put(user_mm); | |
944 | ||
945 | return enabler; | |
39d6d08b BB |
946 | } |
947 | ||
d401b724 BB |
948 | static __always_inline __must_check |
949 | bool user_event_last_ref(struct user_event *user) | |
950 | { | |
a65442ed BB |
951 | int last = 0; |
952 | ||
953 | if (user->reg_flags & USER_EVENT_REG_PERSIST) | |
954 | last = 1; | |
955 | ||
956 | return refcount_read(&user->refcnt) == last; | |
d401b724 BB |
957 | } |
958 | ||
0279400a BB |
959 | static __always_inline __must_check |
960 | size_t copy_nofault(void *addr, size_t bytes, struct iov_iter *i) | |
961 | { | |
962 | size_t ret; | |
963 | ||
964 | pagefault_disable(); | |
965 | ||
966 | ret = copy_from_iter_nocache(addr, bytes, i); | |
967 | ||
968 | pagefault_enable(); | |
969 | ||
970 | return ret; | |
971 | } | |
972 | ||
7f5a08c7 BB |
973 | static struct list_head *user_event_get_fields(struct trace_event_call *call) |
974 | { | |
975 | struct user_event *user = (struct user_event *)call->data; | |
976 | ||
977 | return &user->fields; | |
978 | } | |
979 | ||
980 | /* | |
981 | * Parses a register command for user_events | |
982 | * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]] | |
983 | * | |
984 | * Example event named 'test' with a 20 char 'msg' field with an unsigned int | |
985 | * 'id' field after: | |
986 | * test char[20] msg;unsigned int id | |
987 | * | |
988 | * NOTE: Offsets are from the user data perspective, they are not from the | |
989 | * trace_entry/buffer perspective. We automatically add the common properties | |
990 | * sizes to the offset for the user. | |
7e348b32 BB |
991 | * |
992 | * Upon success user_event has its ref count increased by 1. | |
7f5a08c7 | 993 | */ |
e5d27181 | 994 | static int user_event_parse_cmd(struct user_event_group *group, |
b08d7258 BB |
995 | char *raw_command, struct user_event **newuser, |
996 | int reg_flags) | |
7f5a08c7 BB |
997 | { |
998 | char *name = raw_command; | |
999 | char *args = strpbrk(name, " "); | |
1000 | char *flags; | |
1001 | ||
1002 | if (args) | |
1003 | *args++ = '\0'; | |
1004 | ||
1005 | flags = strpbrk(name, ":"); | |
1006 | ||
1007 | if (flags) | |
1008 | *flags++ = '\0'; | |
1009 | ||
b08d7258 | 1010 | return user_event_parse(group, name, args, flags, newuser, reg_flags); |
7f5a08c7 BB |
1011 | } |
1012 | ||
1013 | static int user_field_array_size(const char *type) | |
1014 | { | |
1015 | const char *start = strchr(type, '['); | |
1016 | char val[8]; | |
1017 | char *bracket; | |
1018 | int size = 0; | |
1019 | ||
1020 | if (start == NULL) | |
1021 | return -EINVAL; | |
1022 | ||
1023 | if (strscpy(val, start + 1, sizeof(val)) <= 0) | |
1024 | return -EINVAL; | |
1025 | ||
1026 | bracket = strchr(val, ']'); | |
1027 | ||
1028 | if (!bracket) | |
1029 | return -EINVAL; | |
1030 | ||
1031 | *bracket = '\0'; | |
1032 | ||
1033 | if (kstrtouint(val, 0, &size)) | |
1034 | return -EINVAL; | |
1035 | ||
1036 | if (size > MAX_FIELD_ARRAY_SIZE) | |
1037 | return -EINVAL; | |
1038 | ||
1039 | return size; | |
1040 | } | |
1041 | ||
1042 | static int user_field_size(const char *type) | |
1043 | { | |
1044 | /* long is not allowed from a user, since it's ambigious in size */ | |
1045 | if (strcmp(type, "s64") == 0) | |
1046 | return sizeof(s64); | |
1047 | if (strcmp(type, "u64") == 0) | |
1048 | return sizeof(u64); | |
1049 | if (strcmp(type, "s32") == 0) | |
1050 | return sizeof(s32); | |
1051 | if (strcmp(type, "u32") == 0) | |
1052 | return sizeof(u32); | |
1053 | if (strcmp(type, "int") == 0) | |
1054 | return sizeof(int); | |
1055 | if (strcmp(type, "unsigned int") == 0) | |
1056 | return sizeof(unsigned int); | |
1057 | if (strcmp(type, "s16") == 0) | |
1058 | return sizeof(s16); | |
1059 | if (strcmp(type, "u16") == 0) | |
1060 | return sizeof(u16); | |
1061 | if (strcmp(type, "short") == 0) | |
1062 | return sizeof(short); | |
1063 | if (strcmp(type, "unsigned short") == 0) | |
1064 | return sizeof(unsigned short); | |
1065 | if (strcmp(type, "s8") == 0) | |
1066 | return sizeof(s8); | |
1067 | if (strcmp(type, "u8") == 0) | |
1068 | return sizeof(u8); | |
1069 | if (strcmp(type, "char") == 0) | |
1070 | return sizeof(char); | |
1071 | if (strcmp(type, "unsigned char") == 0) | |
1072 | return sizeof(unsigned char); | |
1073 | if (str_has_prefix(type, "char[")) | |
1074 | return user_field_array_size(type); | |
1075 | if (str_has_prefix(type, "unsigned char[")) | |
1076 | return user_field_array_size(type); | |
1077 | if (str_has_prefix(type, "__data_loc ")) | |
1078 | return sizeof(u32); | |
1079 | if (str_has_prefix(type, "__rel_loc ")) | |
1080 | return sizeof(u32); | |
1081 | ||
1082 | /* Uknown basic type, error */ | |
1083 | return -EINVAL; | |
1084 | } | |
1085 | ||
2467cda1 BB |
1086 | static void user_event_destroy_validators(struct user_event *user) |
1087 | { | |
1088 | struct user_event_validator *validator, *next; | |
1089 | struct list_head *head = &user->validators; | |
1090 | ||
dcbd1ac2 BB |
1091 | list_for_each_entry_safe(validator, next, head, user_event_link) { |
1092 | list_del(&validator->user_event_link); | |
2467cda1 BB |
1093 | kfree(validator); |
1094 | } | |
1095 | } | |
1096 | ||
7f5a08c7 BB |
1097 | static void user_event_destroy_fields(struct user_event *user) |
1098 | { | |
1099 | struct ftrace_event_field *field, *next; | |
1100 | struct list_head *head = &user->fields; | |
1101 | ||
1102 | list_for_each_entry_safe(field, next, head, link) { | |
1103 | list_del(&field->link); | |
1104 | kfree(field); | |
1105 | } | |
1106 | } | |
1107 | ||
1108 | static int user_event_add_field(struct user_event *user, const char *type, | |
1109 | const char *name, int offset, int size, | |
1110 | int is_signed, int filter_type) | |
1111 | { | |
2467cda1 | 1112 | struct user_event_validator *validator; |
7f5a08c7 | 1113 | struct ftrace_event_field *field; |
2467cda1 | 1114 | int validator_flags = 0; |
7f5a08c7 | 1115 | |
f9cce238 | 1116 | field = kmalloc(sizeof(*field), GFP_KERNEL_ACCOUNT); |
7f5a08c7 BB |
1117 | |
1118 | if (!field) | |
1119 | return -ENOMEM; | |
1120 | ||
2467cda1 BB |
1121 | if (str_has_prefix(type, "__data_loc ")) |
1122 | goto add_validator; | |
1123 | ||
1124 | if (str_has_prefix(type, "__rel_loc ")) { | |
1125 | validator_flags |= VALIDATOR_REL; | |
1126 | goto add_validator; | |
1127 | } | |
1128 | ||
1129 | goto add_field; | |
1130 | ||
1131 | add_validator: | |
9cbf1234 | 1132 | if (strstr(type, "char") != NULL) |
2467cda1 BB |
1133 | validator_flags |= VALIDATOR_ENSURE_NULL; |
1134 | ||
f9cce238 | 1135 | validator = kmalloc(sizeof(*validator), GFP_KERNEL_ACCOUNT); |
2467cda1 BB |
1136 | |
1137 | if (!validator) { | |
1138 | kfree(field); | |
1139 | return -ENOMEM; | |
1140 | } | |
1141 | ||
1142 | validator->flags = validator_flags; | |
1143 | validator->offset = offset; | |
1144 | ||
1145 | /* Want sequential access when validating */ | |
dcbd1ac2 | 1146 | list_add_tail(&validator->user_event_link, &user->validators); |
2467cda1 BB |
1147 | |
1148 | add_field: | |
7f5a08c7 BB |
1149 | field->type = type; |
1150 | field->name = name; | |
1151 | field->offset = offset; | |
1152 | field->size = size; | |
1153 | field->is_signed = is_signed; | |
1154 | field->filter_type = filter_type; | |
1155 | ||
9872c07b BB |
1156 | if (filter_type == FILTER_OTHER) |
1157 | field->filter_type = filter_assign_type(type); | |
1158 | ||
7f5a08c7 BB |
1159 | list_add(&field->link, &user->fields); |
1160 | ||
2467cda1 BB |
1161 | /* |
1162 | * Min size from user writes that are required, this does not include | |
1163 | * the size of trace_entry (common fields). | |
1164 | */ | |
1165 | user->min_size = (offset + size) - sizeof(struct trace_entry); | |
1166 | ||
7f5a08c7 BB |
1167 | return 0; |
1168 | } | |
1169 | ||
1170 | /* | |
1171 | * Parses the values of a field within the description | |
1172 | * Format: type name [size] | |
1173 | */ | |
1174 | static int user_event_parse_field(char *field, struct user_event *user, | |
1175 | u32 *offset) | |
1176 | { | |
1177 | char *part, *type, *name; | |
1178 | u32 depth = 0, saved_offset = *offset; | |
1179 | int len, size = -EINVAL; | |
1180 | bool is_struct = false; | |
1181 | ||
1182 | field = skip_spaces(field); | |
1183 | ||
1184 | if (*field == '\0') | |
1185 | return 0; | |
1186 | ||
1187 | /* Handle types that have a space within */ | |
1188 | len = str_has_prefix(field, "unsigned "); | |
1189 | if (len) | |
1190 | goto skip_next; | |
1191 | ||
1192 | len = str_has_prefix(field, "struct "); | |
1193 | if (len) { | |
1194 | is_struct = true; | |
1195 | goto skip_next; | |
1196 | } | |
1197 | ||
1198 | len = str_has_prefix(field, "__data_loc unsigned "); | |
1199 | if (len) | |
1200 | goto skip_next; | |
1201 | ||
1202 | len = str_has_prefix(field, "__data_loc "); | |
1203 | if (len) | |
1204 | goto skip_next; | |
1205 | ||
1206 | len = str_has_prefix(field, "__rel_loc unsigned "); | |
1207 | if (len) | |
1208 | goto skip_next; | |
1209 | ||
1210 | len = str_has_prefix(field, "__rel_loc "); | |
1211 | if (len) | |
1212 | goto skip_next; | |
1213 | ||
1214 | goto parse; | |
1215 | skip_next: | |
1216 | type = field; | |
1217 | field = strpbrk(field + len, " "); | |
1218 | ||
1219 | if (field == NULL) | |
1220 | return -EINVAL; | |
1221 | ||
1222 | *field++ = '\0'; | |
1223 | depth++; | |
1224 | parse: | |
173c2049 BB |
1225 | name = NULL; |
1226 | ||
7f5a08c7 BB |
1227 | while ((part = strsep(&field, " ")) != NULL) { |
1228 | switch (depth++) { | |
1229 | case FIELD_DEPTH_TYPE: | |
1230 | type = part; | |
1231 | break; | |
1232 | case FIELD_DEPTH_NAME: | |
1233 | name = part; | |
1234 | break; | |
1235 | case FIELD_DEPTH_SIZE: | |
1236 | if (!is_struct) | |
1237 | return -EINVAL; | |
1238 | ||
1239 | if (kstrtou32(part, 10, &size)) | |
1240 | return -EINVAL; | |
1241 | break; | |
1242 | default: | |
1243 | return -EINVAL; | |
1244 | } | |
1245 | } | |
1246 | ||
173c2049 | 1247 | if (depth < FIELD_DEPTH_SIZE || !name) |
7f5a08c7 BB |
1248 | return -EINVAL; |
1249 | ||
1250 | if (depth == FIELD_DEPTH_SIZE) | |
1251 | size = user_field_size(type); | |
1252 | ||
1253 | if (size == 0) | |
1254 | return -EINVAL; | |
1255 | ||
1256 | if (size < 0) | |
1257 | return size; | |
1258 | ||
1259 | *offset = saved_offset + size; | |
1260 | ||
1261 | return user_event_add_field(user, type, name, saved_offset, size, | |
1262 | type[0] != 'u', FILTER_OTHER); | |
1263 | } | |
1264 | ||
7f5a08c7 BB |
1265 | static int user_event_parse_fields(struct user_event *user, char *args) |
1266 | { | |
1267 | char *field; | |
1268 | u32 offset = sizeof(struct trace_entry); | |
1269 | int ret = -EINVAL; | |
1270 | ||
1271 | if (args == NULL) | |
1272 | return 0; | |
1273 | ||
1274 | while ((field = strsep(&args, ";")) != NULL) { | |
1275 | ret = user_event_parse_field(field, user, &offset); | |
1276 | ||
1277 | if (ret) | |
1278 | break; | |
1279 | } | |
1280 | ||
1281 | return ret; | |
1282 | } | |
1283 | ||
1284 | static struct trace_event_fields user_event_fields_array[1]; | |
1285 | ||
aa3b2b4c BB |
1286 | static const char *user_field_format(const char *type) |
1287 | { | |
1288 | if (strcmp(type, "s64") == 0) | |
1289 | return "%lld"; | |
1290 | if (strcmp(type, "u64") == 0) | |
1291 | return "%llu"; | |
1292 | if (strcmp(type, "s32") == 0) | |
1293 | return "%d"; | |
1294 | if (strcmp(type, "u32") == 0) | |
1295 | return "%u"; | |
1296 | if (strcmp(type, "int") == 0) | |
1297 | return "%d"; | |
1298 | if (strcmp(type, "unsigned int") == 0) | |
1299 | return "%u"; | |
1300 | if (strcmp(type, "s16") == 0) | |
1301 | return "%d"; | |
1302 | if (strcmp(type, "u16") == 0) | |
1303 | return "%u"; | |
1304 | if (strcmp(type, "short") == 0) | |
1305 | return "%d"; | |
1306 | if (strcmp(type, "unsigned short") == 0) | |
1307 | return "%u"; | |
1308 | if (strcmp(type, "s8") == 0) | |
1309 | return "%d"; | |
1310 | if (strcmp(type, "u8") == 0) | |
1311 | return "%u"; | |
1312 | if (strcmp(type, "char") == 0) | |
1313 | return "%d"; | |
1314 | if (strcmp(type, "unsigned char") == 0) | |
1315 | return "%u"; | |
9cbf1234 | 1316 | if (strstr(type, "char[") != NULL) |
aa3b2b4c BB |
1317 | return "%s"; |
1318 | ||
1319 | /* Unknown, likely struct, allowed treat as 64-bit */ | |
1320 | return "%llu"; | |
1321 | } | |
1322 | ||
1323 | static bool user_field_is_dyn_string(const char *type, const char **str_func) | |
1324 | { | |
1325 | if (str_has_prefix(type, "__data_loc ")) { | |
1326 | *str_func = "__get_str"; | |
1327 | goto check; | |
1328 | } | |
1329 | ||
1330 | if (str_has_prefix(type, "__rel_loc ")) { | |
1331 | *str_func = "__get_rel_str"; | |
1332 | goto check; | |
1333 | } | |
1334 | ||
1335 | return false; | |
1336 | check: | |
9cbf1234 | 1337 | return strstr(type, "char") != NULL; |
aa3b2b4c BB |
1338 | } |
1339 | ||
1340 | #define LEN_OR_ZERO (len ? len - pos : 0) | |
e6f89a14 BB |
1341 | static int user_dyn_field_set_string(int argc, const char **argv, int *iout, |
1342 | char *buf, int len, bool *colon) | |
1343 | { | |
1344 | int pos = 0, i = *iout; | |
1345 | ||
1346 | *colon = false; | |
1347 | ||
1348 | for (; i < argc; ++i) { | |
1349 | if (i != *iout) | |
1350 | pos += snprintf(buf + pos, LEN_OR_ZERO, " "); | |
1351 | ||
1352 | pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", argv[i]); | |
1353 | ||
1354 | if (strchr(argv[i], ';')) { | |
1355 | ++i; | |
1356 | *colon = true; | |
1357 | break; | |
1358 | } | |
1359 | } | |
1360 | ||
1361 | /* Actual set, advance i */ | |
1362 | if (len != 0) | |
1363 | *iout = i; | |
1364 | ||
1365 | return pos + 1; | |
1366 | } | |
1367 | ||
1368 | static int user_field_set_string(struct ftrace_event_field *field, | |
1369 | char *buf, int len, bool colon) | |
1370 | { | |
1371 | int pos = 0; | |
1372 | ||
1373 | pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->type); | |
1374 | pos += snprintf(buf + pos, LEN_OR_ZERO, " "); | |
1375 | pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name); | |
1376 | ||
d0a3022f BB |
1377 | if (str_has_prefix(field->type, "struct ")) |
1378 | pos += snprintf(buf + pos, LEN_OR_ZERO, " %d", field->size); | |
1379 | ||
e6f89a14 BB |
1380 | if (colon) |
1381 | pos += snprintf(buf + pos, LEN_OR_ZERO, ";"); | |
1382 | ||
1383 | return pos + 1; | |
1384 | } | |
1385 | ||
aa3b2b4c BB |
1386 | static int user_event_set_print_fmt(struct user_event *user, char *buf, int len) |
1387 | { | |
a943188d | 1388 | struct ftrace_event_field *field; |
aa3b2b4c BB |
1389 | struct list_head *head = &user->fields; |
1390 | int pos = 0, depth = 0; | |
1391 | const char *str_func; | |
1392 | ||
1393 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); | |
1394 | ||
a943188d | 1395 | list_for_each_entry_reverse(field, head, link) { |
aa3b2b4c BB |
1396 | if (depth != 0) |
1397 | pos += snprintf(buf + pos, LEN_OR_ZERO, " "); | |
1398 | ||
1399 | pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s", | |
1400 | field->name, user_field_format(field->type)); | |
1401 | ||
1402 | depth++; | |
1403 | } | |
1404 | ||
1405 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); | |
1406 | ||
a943188d | 1407 | list_for_each_entry_reverse(field, head, link) { |
aa3b2b4c BB |
1408 | if (user_field_is_dyn_string(field->type, &str_func)) |
1409 | pos += snprintf(buf + pos, LEN_OR_ZERO, | |
1410 | ", %s(%s)", str_func, field->name); | |
1411 | else | |
1412 | pos += snprintf(buf + pos, LEN_OR_ZERO, | |
1413 | ", REC->%s", field->name); | |
1414 | } | |
1415 | ||
1416 | return pos + 1; | |
1417 | } | |
1418 | #undef LEN_OR_ZERO | |
1419 | ||
1420 | static int user_event_create_print_fmt(struct user_event *user) | |
1421 | { | |
1422 | char *print_fmt; | |
1423 | int len; | |
1424 | ||
1425 | len = user_event_set_print_fmt(user, NULL, 0); | |
1426 | ||
f9cce238 | 1427 | print_fmt = kmalloc(len, GFP_KERNEL_ACCOUNT); |
aa3b2b4c BB |
1428 | |
1429 | if (!print_fmt) | |
1430 | return -ENOMEM; | |
1431 | ||
1432 | user_event_set_print_fmt(user, print_fmt, len); | |
1433 | ||
1434 | user->call.print_fmt = print_fmt; | |
1435 | ||
1436 | return 0; | |
1437 | } | |
1438 | ||
7f5a08c7 BB |
1439 | static enum print_line_t user_event_print_trace(struct trace_iterator *iter, |
1440 | int flags, | |
1441 | struct trace_event *event) | |
1442 | { | |
4bec284c | 1443 | return print_event_fields(iter, event); |
7f5a08c7 BB |
1444 | } |
1445 | ||
1446 | static struct trace_event_functions user_event_funcs = { | |
1447 | .trace = user_event_print_trace, | |
1448 | }; | |
1449 | ||
089331d4 BB |
1450 | static int user_event_set_call_visible(struct user_event *user, bool visible) |
1451 | { | |
1452 | int ret; | |
1453 | const struct cred *old_cred; | |
1454 | struct cred *cred; | |
1455 | ||
1456 | cred = prepare_creds(); | |
1457 | ||
1458 | if (!cred) | |
1459 | return -ENOMEM; | |
1460 | ||
1461 | /* | |
1462 | * While by default tracefs is locked down, systems can be configured | |
1463 | * to allow user_event files to be less locked down. The extreme case | |
1464 | * being "other" has read/write access to user_events_data/status. | |
1465 | * | |
94c255ac | 1466 | * When not locked down, processes may not have permissions to |
089331d4 BB |
1467 | * add/remove calls themselves to tracefs. We need to temporarily |
1468 | * switch to root file permission to allow for this scenario. | |
1469 | */ | |
1470 | cred->fsuid = GLOBAL_ROOT_UID; | |
1471 | ||
1472 | old_cred = override_creds(cred); | |
1473 | ||
1474 | if (visible) | |
1475 | ret = trace_add_event_call(&user->call); | |
1476 | else | |
1477 | ret = trace_remove_event_call(&user->call); | |
1478 | ||
1479 | revert_creds(old_cred); | |
1480 | put_cred(cred); | |
1481 | ||
1482 | return ret; | |
1483 | } | |
1484 | ||
7f5a08c7 BB |
1485 | static int destroy_user_event(struct user_event *user) |
1486 | { | |
1487 | int ret = 0; | |
1488 | ||
ce58e96e BB |
1489 | lockdep_assert_held(&event_mutex); |
1490 | ||
7f5a08c7 BB |
1491 | /* Must destroy fields before call removal */ |
1492 | user_event_destroy_fields(user); | |
1493 | ||
089331d4 | 1494 | ret = user_event_set_call_visible(user, false); |
7f5a08c7 BB |
1495 | |
1496 | if (ret) | |
1497 | return ret; | |
1498 | ||
1499 | dyn_event_remove(&user->devent); | |
7f5a08c7 BB |
1500 | hash_del(&user->node); |
1501 | ||
2467cda1 | 1502 | user_event_destroy_validators(user); |
64805e40 BB |
1503 | |
1504 | /* If we have different names, both must be freed */ | |
1505 | if (EVENT_NAME(user) != EVENT_TP_NAME(user)) | |
1506 | kfree(EVENT_TP_NAME(user)); | |
1507 | ||
aa3b2b4c | 1508 | kfree(user->call.print_fmt); |
7f5a08c7 BB |
1509 | kfree(EVENT_NAME(user)); |
1510 | kfree(user); | |
1511 | ||
ce58e96e BB |
1512 | if (current_user_events > 0) |
1513 | current_user_events--; | |
1514 | else | |
1515 | pr_alert("BUG: Bad current_user_events\n"); | |
1516 | ||
7f5a08c7 BB |
1517 | return ret; |
1518 | } | |
1519 | ||
e5d27181 | 1520 | static struct user_event *find_user_event(struct user_event_group *group, |
1e953de9 BB |
1521 | char *name, int argc, const char **argv, |
1522 | u32 flags, u32 *outkey) | |
7f5a08c7 BB |
1523 | { |
1524 | struct user_event *user; | |
1525 | u32 key = user_event_key(name); | |
1526 | ||
1527 | *outkey = key; | |
1528 | ||
1e953de9 | 1529 | hash_for_each_possible(group->register_table, user, node, key) { |
64805e40 BB |
1530 | /* |
1531 | * Single-format events shouldn't return multi-format | |
1532 | * events. Callers expect the underlying tracepoint to match | |
1533 | * the name exactly in these cases. Only check like-formats. | |
1534 | */ | |
1535 | if (EVENT_MULTI_FORMAT(flags) != EVENT_MULTI_FORMAT(user->reg_flags)) | |
1536 | continue; | |
1537 | ||
1e953de9 BB |
1538 | if (strcmp(EVENT_NAME(user), name)) |
1539 | continue; | |
1540 | ||
1541 | if (user_fields_match(user, argc, argv)) | |
f0dbf6fd | 1542 | return user_event_get(user); |
7f5a08c7 | 1543 | |
64805e40 BB |
1544 | /* Scan others if this is a multi-format event */ |
1545 | if (EVENT_MULTI_FORMAT(flags)) | |
1546 | continue; | |
1547 | ||
1e953de9 BB |
1548 | return ERR_PTR(-EADDRINUSE); |
1549 | } | |
1550 | ||
7f5a08c7 BB |
1551 | return NULL; |
1552 | } | |
1553 | ||
2467cda1 BB |
1554 | static int user_event_validate(struct user_event *user, void *data, int len) |
1555 | { | |
1556 | struct list_head *head = &user->validators; | |
1557 | struct user_event_validator *validator; | |
1558 | void *pos, *end = data + len; | |
1559 | u32 loc, offset, size; | |
1560 | ||
dcbd1ac2 | 1561 | list_for_each_entry(validator, head, user_event_link) { |
2467cda1 BB |
1562 | pos = data + validator->offset; |
1563 | ||
1564 | /* Already done min_size check, no bounds check here */ | |
1565 | loc = *(u32 *)pos; | |
1566 | offset = loc & 0xffff; | |
1567 | size = loc >> 16; | |
1568 | ||
1569 | if (likely(validator->flags & VALIDATOR_REL)) | |
1570 | pos += offset + sizeof(loc); | |
1571 | else | |
1572 | pos = data + offset; | |
1573 | ||
1574 | pos += size; | |
1575 | ||
1576 | if (unlikely(pos > end)) | |
1577 | return -EFAULT; | |
1578 | ||
1579 | if (likely(validator->flags & VALIDATOR_ENSURE_NULL)) | |
1580 | if (unlikely(*(char *)(pos - 1) != '\0')) | |
1581 | return -EFAULT; | |
1582 | } | |
1583 | ||
1584 | return 0; | |
1585 | } | |
1586 | ||
7f5a08c7 BB |
1587 | /* |
1588 | * Writes the user supplied payload out to a trace file. | |
1589 | */ | |
0279400a | 1590 | static void user_event_ftrace(struct user_event *user, struct iov_iter *i, |
2467cda1 | 1591 | void *tpdata, bool *faulted) |
7f5a08c7 BB |
1592 | { |
1593 | struct trace_event_file *file; | |
1594 | struct trace_entry *entry; | |
1595 | struct trace_event_buffer event_buffer; | |
2467cda1 | 1596 | size_t size = sizeof(*entry) + i->count; |
7f5a08c7 BB |
1597 | |
1598 | file = (struct trace_event_file *)tpdata; | |
1599 | ||
1600 | if (!file || | |
1601 | !(file->flags & EVENT_FILE_FL_ENABLED) || | |
1602 | trace_trigger_soft_disabled(file)) | |
1603 | return; | |
1604 | ||
1605 | /* Allocates and fills trace_entry, + 1 of this is data payload */ | |
2467cda1 | 1606 | entry = trace_event_buffer_reserve(&event_buffer, file, size); |
7f5a08c7 BB |
1607 | |
1608 | if (unlikely(!entry)) | |
1609 | return; | |
1610 | ||
6f05dcab | 1611 | if (unlikely(i->count != 0 && !copy_nofault(entry + 1, i->count, i))) |
2467cda1 BB |
1612 | goto discard; |
1613 | ||
1614 | if (!list_empty(&user->validators) && | |
1615 | unlikely(user_event_validate(user, entry, size))) | |
1616 | goto discard; | |
1617 | ||
1618 | trace_event_buffer_commit(&event_buffer); | |
1619 | ||
1620 | return; | |
1621 | discard: | |
1622 | *faulted = true; | |
1623 | __trace_event_discard_commit(event_buffer.buffer, | |
1624 | event_buffer.event); | |
7f5a08c7 BB |
1625 | } |
1626 | ||
3207d045 BB |
1627 | #ifdef CONFIG_PERF_EVENTS |
1628 | /* | |
768c1e7f | 1629 | * Writes the user supplied payload out to perf ring buffer. |
3207d045 | 1630 | */ |
0279400a | 1631 | static void user_event_perf(struct user_event *user, struct iov_iter *i, |
2467cda1 | 1632 | void *tpdata, bool *faulted) |
3207d045 BB |
1633 | { |
1634 | struct hlist_head *perf_head; | |
1635 | ||
3207d045 BB |
1636 | perf_head = this_cpu_ptr(user->call.perf_events); |
1637 | ||
1638 | if (perf_head && !hlist_empty(perf_head)) { | |
1639 | struct trace_entry *perf_entry; | |
1640 | struct pt_regs *regs; | |
0279400a | 1641 | size_t size = sizeof(*perf_entry) + i->count; |
3207d045 BB |
1642 | int context; |
1643 | ||
1644 | perf_entry = perf_trace_buf_alloc(ALIGN(size, 8), | |
1645 | ®s, &context); | |
1646 | ||
1647 | if (unlikely(!perf_entry)) | |
1648 | return; | |
1649 | ||
1650 | perf_fetch_caller_regs(regs); | |
1651 | ||
6f05dcab | 1652 | if (unlikely(i->count != 0 && !copy_nofault(perf_entry + 1, i->count, i))) |
2467cda1 BB |
1653 | goto discard; |
1654 | ||
1655 | if (!list_empty(&user->validators) && | |
1656 | unlikely(user_event_validate(user, perf_entry, size))) | |
1657 | goto discard; | |
3207d045 BB |
1658 | |
1659 | perf_trace_buf_submit(perf_entry, size, context, | |
1660 | user->call.event.type, 1, regs, | |
1661 | perf_head, NULL); | |
2467cda1 BB |
1662 | |
1663 | return; | |
1664 | discard: | |
1665 | *faulted = true; | |
1666 | perf_swevent_put_recursion_context(context); | |
3207d045 BB |
1667 | } |
1668 | } | |
1669 | #endif | |
1670 | ||
7f5a08c7 | 1671 | /* |
72357590 | 1672 | * Update the enabled bit among all user processes. |
7f5a08c7 | 1673 | */ |
72357590 | 1674 | static void update_enable_bit_for(struct user_event *user) |
7f5a08c7 BB |
1675 | { |
1676 | struct tracepoint *tp = &user->tracepoint; | |
1677 | char status = 0; | |
1678 | ||
1679 | if (atomic_read(&tp->key.enabled) > 0) { | |
1680 | struct tracepoint_func *probe_func_ptr; | |
1681 | user_event_func_t probe_func; | |
1682 | ||
1683 | rcu_read_lock_sched(); | |
1684 | ||
1685 | probe_func_ptr = rcu_dereference_sched(tp->funcs); | |
1686 | ||
1687 | if (probe_func_ptr) { | |
1688 | do { | |
1689 | probe_func = probe_func_ptr->func; | |
1690 | ||
1691 | if (probe_func == user_event_ftrace) | |
1692 | status |= EVENT_STATUS_FTRACE; | |
3207d045 BB |
1693 | #ifdef CONFIG_PERF_EVENTS |
1694 | else if (probe_func == user_event_perf) | |
1695 | status |= EVENT_STATUS_PERF; | |
1696 | #endif | |
7f5a08c7 BB |
1697 | else |
1698 | status |= EVENT_STATUS_OTHER; | |
1699 | } while ((++probe_func_ptr)->func); | |
1700 | } | |
1701 | ||
1702 | rcu_read_unlock_sched(); | |
1703 | } | |
1704 | ||
39d6d08b | 1705 | user->status = status; |
72357590 BB |
1706 | |
1707 | user_event_enabler_update(user); | |
7f5a08c7 BB |
1708 | } |
1709 | ||
1710 | /* | |
1711 | * Register callback for our events from tracing sub-systems. | |
1712 | */ | |
1713 | static int user_event_reg(struct trace_event_call *call, | |
1714 | enum trace_reg type, | |
1715 | void *data) | |
1716 | { | |
1717 | struct user_event *user = (struct user_event *)call->data; | |
1718 | int ret = 0; | |
1719 | ||
1720 | if (!user) | |
1721 | return -ENOENT; | |
1722 | ||
1723 | switch (type) { | |
1724 | case TRACE_REG_REGISTER: | |
1725 | ret = tracepoint_probe_register(call->tp, | |
1726 | call->class->probe, | |
1727 | data); | |
1728 | if (!ret) | |
1729 | goto inc; | |
1730 | break; | |
1731 | ||
1732 | case TRACE_REG_UNREGISTER: | |
1733 | tracepoint_probe_unregister(call->tp, | |
1734 | call->class->probe, | |
1735 | data); | |
1736 | goto dec; | |
1737 | ||
3207d045 BB |
1738 | #ifdef CONFIG_PERF_EVENTS |
1739 | case TRACE_REG_PERF_REGISTER: | |
1740 | ret = tracepoint_probe_register(call->tp, | |
1741 | call->class->perf_probe, | |
1742 | data); | |
1743 | if (!ret) | |
1744 | goto inc; | |
1745 | break; | |
1746 | ||
1747 | case TRACE_REG_PERF_UNREGISTER: | |
1748 | tracepoint_probe_unregister(call->tp, | |
1749 | call->class->perf_probe, | |
1750 | data); | |
1751 | goto dec; | |
1752 | ||
1753 | case TRACE_REG_PERF_OPEN: | |
1754 | case TRACE_REG_PERF_CLOSE: | |
1755 | case TRACE_REG_PERF_ADD: | |
1756 | case TRACE_REG_PERF_DEL: | |
7f5a08c7 | 1757 | break; |
3207d045 | 1758 | #endif |
7f5a08c7 BB |
1759 | } |
1760 | ||
1761 | return ret; | |
1762 | inc: | |
f0dbf6fd | 1763 | user_event_get(user); |
72357590 | 1764 | update_enable_bit_for(user); |
7f5a08c7 BB |
1765 | return 0; |
1766 | dec: | |
72357590 | 1767 | update_enable_bit_for(user); |
f0dbf6fd | 1768 | user_event_put(user, true); |
7f5a08c7 BB |
1769 | return 0; |
1770 | } | |
1771 | ||
1772 | static int user_event_create(const char *raw_command) | |
1773 | { | |
e5d27181 | 1774 | struct user_event_group *group; |
7f5a08c7 BB |
1775 | struct user_event *user; |
1776 | char *name; | |
1777 | int ret; | |
1778 | ||
1779 | if (!str_has_prefix(raw_command, USER_EVENTS_PREFIX)) | |
1780 | return -ECANCELED; | |
1781 | ||
1782 | raw_command += USER_EVENTS_PREFIX_LEN; | |
1783 | raw_command = skip_spaces(raw_command); | |
1784 | ||
f9cce238 | 1785 | name = kstrdup(raw_command, GFP_KERNEL_ACCOUNT); |
7f5a08c7 BB |
1786 | |
1787 | if (!name) | |
1788 | return -ENOMEM; | |
1789 | ||
e5d27181 BB |
1790 | group = current_user_event_group(); |
1791 | ||
ccc6e590 XJ |
1792 | if (!group) { |
1793 | kfree(name); | |
e5d27181 | 1794 | return -ENOENT; |
ccc6e590 | 1795 | } |
e5d27181 BB |
1796 | |
1797 | mutex_lock(&group->reg_mutex); | |
7e348b32 | 1798 | |
a65442ed BB |
1799 | /* Dyn events persist, otherwise they would cleanup immediately */ |
1800 | ret = user_event_parse_cmd(group, name, &user, USER_EVENT_REG_PERSIST); | |
7e348b32 BB |
1801 | |
1802 | if (!ret) | |
f0dbf6fd | 1803 | user_event_put(user, false); |
7e348b32 | 1804 | |
e5d27181 | 1805 | mutex_unlock(&group->reg_mutex); |
7f5a08c7 BB |
1806 | |
1807 | if (ret) | |
1808 | kfree(name); | |
1809 | ||
1810 | return ret; | |
1811 | } | |
1812 | ||
1813 | static int user_event_show(struct seq_file *m, struct dyn_event *ev) | |
1814 | { | |
1815 | struct user_event *user = container_of(ev, struct user_event, devent); | |
a943188d | 1816 | struct ftrace_event_field *field; |
7f5a08c7 BB |
1817 | struct list_head *head; |
1818 | int depth = 0; | |
1819 | ||
1820 | seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user)); | |
1821 | ||
1822 | head = trace_get_fields(&user->call); | |
1823 | ||
a943188d | 1824 | list_for_each_entry_reverse(field, head, link) { |
7f5a08c7 BB |
1825 | if (depth == 0) |
1826 | seq_puts(m, " "); | |
1827 | else | |
1828 | seq_puts(m, "; "); | |
1829 | ||
1830 | seq_printf(m, "%s %s", field->type, field->name); | |
1831 | ||
1832 | if (str_has_prefix(field->type, "struct ")) | |
1833 | seq_printf(m, " %d", field->size); | |
1834 | ||
1835 | depth++; | |
1836 | } | |
1837 | ||
1838 | seq_puts(m, "\n"); | |
1839 | ||
1840 | return 0; | |
1841 | } | |
1842 | ||
1843 | static bool user_event_is_busy(struct dyn_event *ev) | |
1844 | { | |
1845 | struct user_event *user = container_of(ev, struct user_event, devent); | |
1846 | ||
d401b724 | 1847 | return !user_event_last_ref(user); |
7f5a08c7 BB |
1848 | } |
1849 | ||
1850 | static int user_event_free(struct dyn_event *ev) | |
1851 | { | |
1852 | struct user_event *user = container_of(ev, struct user_event, devent); | |
1853 | ||
d401b724 | 1854 | if (!user_event_last_ref(user)) |
7f5a08c7 BB |
1855 | return -EBUSY; |
1856 | ||
5dbd04ed BB |
1857 | if (!user_event_capable(user->reg_flags)) |
1858 | return -EPERM; | |
1859 | ||
7f5a08c7 BB |
1860 | return destroy_user_event(user); |
1861 | } | |
1862 | ||
9aed4e15 BB |
1863 | static bool user_field_match(struct ftrace_event_field *field, int argc, |
1864 | const char **argv, int *iout) | |
1865 | { | |
e6f89a14 | 1866 | char *field_name = NULL, *dyn_field_name = NULL; |
9aed4e15 | 1867 | bool colon = false, match = false; |
e6f89a14 | 1868 | int dyn_len, len; |
9aed4e15 | 1869 | |
e6f89a14 | 1870 | if (*iout >= argc) |
9aed4e15 BB |
1871 | return false; |
1872 | ||
e6f89a14 BB |
1873 | dyn_len = user_dyn_field_set_string(argc, argv, iout, dyn_field_name, |
1874 | 0, &colon); | |
9aed4e15 | 1875 | |
e6f89a14 | 1876 | len = user_field_set_string(field, field_name, 0, colon); |
9aed4e15 | 1877 | |
e6f89a14 BB |
1878 | if (dyn_len != len) |
1879 | return false; | |
9aed4e15 | 1880 | |
e6f89a14 BB |
1881 | dyn_field_name = kmalloc(dyn_len, GFP_KERNEL); |
1882 | field_name = kmalloc(len, GFP_KERNEL); | |
9aed4e15 | 1883 | |
e6f89a14 BB |
1884 | if (!dyn_field_name || !field_name) |
1885 | goto out; | |
9aed4e15 | 1886 | |
e6f89a14 BB |
1887 | user_dyn_field_set_string(argc, argv, iout, dyn_field_name, |
1888 | dyn_len, &colon); | |
9aed4e15 | 1889 | |
e6f89a14 | 1890 | user_field_set_string(field, field_name, len, colon); |
9aed4e15 | 1891 | |
e6f89a14 | 1892 | match = strcmp(dyn_field_name, field_name) == 0; |
9aed4e15 | 1893 | out: |
e6f89a14 | 1894 | kfree(dyn_field_name); |
9aed4e15 BB |
1895 | kfree(field_name); |
1896 | ||
1897 | return match; | |
1898 | } | |
1899 | ||
1900 | static bool user_fields_match(struct user_event *user, int argc, | |
1901 | const char **argv) | |
1902 | { | |
a943188d | 1903 | struct ftrace_event_field *field; |
9aed4e15 BB |
1904 | struct list_head *head = &user->fields; |
1905 | int i = 0; | |
1906 | ||
1e953de9 BB |
1907 | if (argc == 0) |
1908 | return list_empty(head); | |
1909 | ||
a943188d | 1910 | list_for_each_entry_reverse(field, head, link) { |
9aed4e15 BB |
1911 | if (!user_field_match(field, argc, argv, &i)) |
1912 | return false; | |
a943188d | 1913 | } |
9aed4e15 BB |
1914 | |
1915 | if (i != argc) | |
1916 | return false; | |
1917 | ||
1918 | return true; | |
1919 | } | |
1920 | ||
7f5a08c7 BB |
1921 | static bool user_event_match(const char *system, const char *event, |
1922 | int argc, const char **argv, struct dyn_event *ev) | |
1923 | { | |
1924 | struct user_event *user = container_of(ev, struct user_event, devent); | |
9aed4e15 | 1925 | bool match; |
7f5a08c7 | 1926 | |
64805e40 BB |
1927 | match = strcmp(EVENT_NAME(user), event) == 0; |
1928 | ||
1929 | if (match && system) { | |
1930 | match = strcmp(system, user->group->system_name) == 0 || | |
1931 | strcmp(system, user->group->system_multi_name) == 0; | |
1932 | } | |
9aed4e15 | 1933 | |
1e953de9 | 1934 | if (match) |
9aed4e15 BB |
1935 | match = user_fields_match(user, argc, argv); |
1936 | ||
1937 | return match; | |
7f5a08c7 BB |
1938 | } |
1939 | ||
1940 | static struct dyn_event_operations user_event_dops = { | |
1941 | .create = user_event_create, | |
1942 | .show = user_event_show, | |
1943 | .is_busy = user_event_is_busy, | |
1944 | .free = user_event_free, | |
1945 | .match = user_event_match, | |
1946 | }; | |
1947 | ||
1948 | static int user_event_trace_register(struct user_event *user) | |
1949 | { | |
1950 | int ret; | |
1951 | ||
1952 | ret = register_trace_event(&user->call.event); | |
1953 | ||
1954 | if (!ret) | |
1955 | return -ENODEV; | |
1956 | ||
089331d4 | 1957 | ret = user_event_set_call_visible(user, true); |
7f5a08c7 BB |
1958 | |
1959 | if (ret) | |
1960 | unregister_trace_event(&user->call.event); | |
1961 | ||
1962 | return ret; | |
1963 | } | |
1964 | ||
64805e40 BB |
1965 | static int user_event_set_tp_name(struct user_event *user) |
1966 | { | |
1967 | lockdep_assert_held(&user->group->reg_mutex); | |
1968 | ||
1969 | if (EVENT_MULTI_FORMAT(user->reg_flags)) { | |
1970 | char *multi_name; | |
1971 | ||
1972 | multi_name = kasprintf(GFP_KERNEL_ACCOUNT, "%s.%llx", | |
1973 | user->reg_name, user->group->multi_id); | |
1974 | ||
1975 | if (!multi_name) | |
1976 | return -ENOMEM; | |
1977 | ||
1978 | user->call.name = multi_name; | |
1979 | user->tracepoint.name = multi_name; | |
1980 | ||
1981 | /* Inc to ensure unique multi-event name next time */ | |
1982 | user->group->multi_id++; | |
1983 | } else { | |
1984 | /* Non Multi-format uses register name */ | |
1985 | user->call.name = user->reg_name; | |
1986 | user->tracepoint.name = user->reg_name; | |
1987 | } | |
1988 | ||
1989 | return 0; | |
1990 | } | |
1991 | ||
bd125a08 BB |
1992 | /* |
1993 | * Counts how many ';' without a trailing space are in the args. | |
1994 | */ | |
1995 | static int count_semis_no_space(char *args) | |
1996 | { | |
1997 | int count = 0; | |
1998 | ||
1999 | while ((args = strchr(args, ';'))) { | |
2000 | args++; | |
2001 | ||
2002 | if (!isspace(*args)) | |
2003 | count++; | |
2004 | } | |
2005 | ||
2006 | return count; | |
2007 | } | |
2008 | ||
2009 | /* | |
2010 | * Copies the arguments while ensuring all ';' have a trailing space. | |
2011 | */ | |
2012 | static char *insert_space_after_semis(char *args, int count) | |
2013 | { | |
2014 | char *fixed, *pos; | |
2015 | int len; | |
2016 | ||
2017 | len = strlen(args) + count; | |
2018 | fixed = kmalloc(len + 1, GFP_KERNEL); | |
2019 | ||
2020 | if (!fixed) | |
2021 | return NULL; | |
2022 | ||
2023 | pos = fixed; | |
2024 | ||
2025 | /* Insert a space after ';' if there is no trailing space. */ | |
2026 | while (*args) { | |
2027 | *pos = *args++; | |
2028 | ||
2029 | if (*pos++ == ';' && !isspace(*args)) | |
2030 | *pos++ = ' '; | |
2031 | } | |
2032 | ||
2033 | *pos = '\0'; | |
2034 | ||
2035 | return fixed; | |
2036 | } | |
2037 | ||
2038 | static char **user_event_argv_split(char *args, int *argc) | |
2039 | { | |
2040 | char **split; | |
2041 | char *fixed; | |
2042 | int count; | |
2043 | ||
2044 | /* Count how many ';' without a trailing space */ | |
2045 | count = count_semis_no_space(args); | |
2046 | ||
2047 | /* No fixup is required */ | |
2048 | if (!count) | |
2049 | return argv_split(GFP_KERNEL, args, argc); | |
2050 | ||
2051 | /* We must fixup 'field;field' to 'field; field' */ | |
2052 | fixed = insert_space_after_semis(args, count); | |
2053 | ||
2054 | if (!fixed) | |
2055 | return NULL; | |
2056 | ||
2057 | /* We do a normal split afterwards */ | |
2058 | split = argv_split(GFP_KERNEL, fixed, argc); | |
2059 | ||
2060 | /* We can free since argv_split makes a copy */ | |
2061 | kfree(fixed); | |
2062 | ||
2063 | return split; | |
2064 | } | |
2065 | ||
7f5a08c7 BB |
2066 | /* |
2067 | * Parses the event name, arguments and flags then registers if successful. | |
2068 | * The name buffer lifetime is owned by this method for success cases only. | |
7e348b32 | 2069 | * Upon success the returned user_event has its ref count increased by 1. |
7f5a08c7 | 2070 | */ |
e5d27181 BB |
2071 | static int user_event_parse(struct user_event_group *group, char *name, |
2072 | char *args, char *flags, | |
b08d7258 | 2073 | struct user_event **newuser, int reg_flags) |
7f5a08c7 | 2074 | { |
7e348b32 | 2075 | struct user_event *user; |
1e953de9 | 2076 | char **argv = NULL; |
ba470eeb | 2077 | int argc = 0; |
1e953de9 BB |
2078 | int ret; |
2079 | u32 key; | |
7e348b32 | 2080 | |
5dbd04ed BB |
2081 | /* Currently don't support any text based flags */ |
2082 | if (flags != NULL) | |
a65442ed BB |
2083 | return -EINVAL; |
2084 | ||
5dbd04ed BB |
2085 | if (!user_event_capable(reg_flags)) |
2086 | return -EPERM; | |
2087 | ||
1e953de9 | 2088 | if (args) { |
bd125a08 | 2089 | argv = user_event_argv_split(args, &argc); |
1e953de9 BB |
2090 | |
2091 | if (!argv) | |
2092 | return -ENOMEM; | |
2093 | } | |
2094 | ||
7e348b32 BB |
2095 | /* Prevent dyn_event from racing */ |
2096 | mutex_lock(&event_mutex); | |
1e953de9 BB |
2097 | user = find_user_event(group, name, argc, (const char **)argv, |
2098 | reg_flags, &key); | |
7e348b32 | 2099 | mutex_unlock(&event_mutex); |
7f5a08c7 | 2100 | |
1e953de9 BB |
2101 | if (argv) |
2102 | argv_free(argv); | |
ba470eeb | 2103 | |
1e953de9 BB |
2104 | if (IS_ERR(user)) |
2105 | return PTR_ERR(user); | |
2106 | ||
2107 | if (user) { | |
2108 | *newuser = user; | |
2109 | /* | |
2110 | * Name is allocated by caller, free it since it already exists. | |
2111 | * Caller only worries about failure cases for freeing. | |
2112 | */ | |
2113 | kfree(name); | |
ba470eeb | 2114 | |
7f5a08c7 BB |
2115 | return 0; |
2116 | } | |
2117 | ||
f9cce238 | 2118 | user = kzalloc(sizeof(*user), GFP_KERNEL_ACCOUNT); |
7f5a08c7 BB |
2119 | |
2120 | if (!user) | |
2121 | return -ENOMEM; | |
2122 | ||
2123 | INIT_LIST_HEAD(&user->class.fields); | |
2124 | INIT_LIST_HEAD(&user->fields); | |
2467cda1 | 2125 | INIT_LIST_HEAD(&user->validators); |
7f5a08c7 | 2126 | |
e5d27181 | 2127 | user->group = group; |
64805e40 BB |
2128 | user->reg_name = name; |
2129 | user->reg_flags = reg_flags; | |
2130 | ||
2131 | ret = user_event_set_tp_name(user); | |
2132 | ||
2133 | if (ret) | |
2134 | goto put_user; | |
7f5a08c7 | 2135 | |
7f5a08c7 BB |
2136 | ret = user_event_parse_fields(user, args); |
2137 | ||
2138 | if (ret) | |
2139 | goto put_user; | |
2140 | ||
aa3b2b4c BB |
2141 | ret = user_event_create_print_fmt(user); |
2142 | ||
2143 | if (ret) | |
2144 | goto put_user; | |
7f5a08c7 BB |
2145 | |
2146 | user->call.data = user; | |
2147 | user->call.class = &user->class; | |
7f5a08c7 BB |
2148 | user->call.flags = TRACE_EVENT_FL_TRACEPOINT; |
2149 | user->call.tp = &user->tracepoint; | |
2150 | user->call.event.funcs = &user_event_funcs; | |
64805e40 BB |
2151 | |
2152 | if (EVENT_MULTI_FORMAT(user->reg_flags)) | |
2153 | user->class.system = group->system_multi_name; | |
2154 | else | |
2155 | user->class.system = group->system_name; | |
7f5a08c7 | 2156 | |
7f5a08c7 BB |
2157 | user->class.fields_array = user_event_fields_array; |
2158 | user->class.get_fields = user_event_get_fields; | |
2159 | user->class.reg = user_event_reg; | |
2160 | user->class.probe = user_event_ftrace; | |
3207d045 BB |
2161 | #ifdef CONFIG_PERF_EVENTS |
2162 | user->class.perf_probe = user_event_perf; | |
2163 | #endif | |
7f5a08c7 BB |
2164 | |
2165 | mutex_lock(&event_mutex); | |
efe34e99 | 2166 | |
ce58e96e BB |
2167 | if (current_user_events >= max_user_events) { |
2168 | ret = -EMFILE; | |
2169 | goto put_user_lock; | |
2170 | } | |
2171 | ||
7f5a08c7 | 2172 | ret = user_event_trace_register(user); |
7f5a08c7 BB |
2173 | |
2174 | if (ret) | |
efe34e99 | 2175 | goto put_user_lock; |
7f5a08c7 | 2176 | |
a65442ed BB |
2177 | if (user->reg_flags & USER_EVENT_REG_PERSIST) { |
2178 | /* Ensure we track self ref and caller ref (2) */ | |
2179 | refcount_set(&user->refcnt, 2); | |
2180 | } else { | |
2181 | /* Ensure we track only caller ref (1) */ | |
2182 | refcount_set(&user->refcnt, 1); | |
2183 | } | |
7e348b32 | 2184 | |
7f5a08c7 BB |
2185 | dyn_event_init(&user->devent, &user_event_dops); |
2186 | dyn_event_add(&user->devent, &user->call); | |
e5d27181 | 2187 | hash_add(group->register_table, &user->node, key); |
ce58e96e | 2188 | current_user_events++; |
7f5a08c7 | 2189 | |
efe34e99 BB |
2190 | mutex_unlock(&event_mutex); |
2191 | ||
7f5a08c7 BB |
2192 | *newuser = user; |
2193 | return 0; | |
efe34e99 BB |
2194 | put_user_lock: |
2195 | mutex_unlock(&event_mutex); | |
7f5a08c7 BB |
2196 | put_user: |
2197 | user_event_destroy_fields(user); | |
2467cda1 | 2198 | user_event_destroy_validators(user); |
4bded7af | 2199 | kfree(user->call.print_fmt); |
64805e40 BB |
2200 | |
2201 | /* Caller frees reg_name on error, but not multi-name */ | |
2202 | if (EVENT_NAME(user) != EVENT_TP_NAME(user)) | |
2203 | kfree(EVENT_TP_NAME(user)); | |
2204 | ||
7f5a08c7 BB |
2205 | kfree(user); |
2206 | return ret; | |
2207 | } | |
2208 | ||
2209 | /* | |
1e953de9 | 2210 | * Deletes previously created events if they are no longer being used. |
7f5a08c7 | 2211 | */ |
e5d27181 | 2212 | static int delete_user_event(struct user_event_group *group, char *name) |
7f5a08c7 | 2213 | { |
1e953de9 BB |
2214 | struct user_event *user; |
2215 | struct hlist_node *tmp; | |
2216 | u32 key = user_event_key(name); | |
2217 | int ret = -ENOENT; | |
7f5a08c7 | 2218 | |
1e953de9 BB |
2219 | /* Attempt to delete all event(s) with the name passed in */ |
2220 | hash_for_each_possible_safe(group->register_table, user, tmp, node, key) { | |
2221 | if (strcmp(EVENT_NAME(user), name)) | |
2222 | continue; | |
7f5a08c7 | 2223 | |
1e953de9 BB |
2224 | if (!user_event_last_ref(user)) |
2225 | return -EBUSY; | |
7e348b32 | 2226 | |
1e953de9 BB |
2227 | if (!user_event_capable(user->reg_flags)) |
2228 | return -EPERM; | |
7f5a08c7 | 2229 | |
1e953de9 | 2230 | ret = destroy_user_event(user); |
5dbd04ed | 2231 | |
1e953de9 BB |
2232 | if (ret) |
2233 | goto out; | |
2234 | } | |
2235 | out: | |
2236 | return ret; | |
7f5a08c7 BB |
2237 | } |
2238 | ||
2239 | /* | |
2240 | * Validates the user payload and writes via iterator. | |
2241 | */ | |
2242 | static ssize_t user_events_write_core(struct file *file, struct iov_iter *i) | |
2243 | { | |
e5d27181 | 2244 | struct user_event_file_info *info = file->private_data; |
7f5a08c7 BB |
2245 | struct user_event_refs *refs; |
2246 | struct user_event *user = NULL; | |
2247 | struct tracepoint *tp; | |
2248 | ssize_t ret = i->count; | |
2249 | int idx; | |
2250 | ||
2251 | if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx))) | |
2252 | return -EFAULT; | |
2253 | ||
cd98c932 BB |
2254 | if (idx < 0) |
2255 | return -EINVAL; | |
2256 | ||
7f5a08c7 BB |
2257 | rcu_read_lock_sched(); |
2258 | ||
e5d27181 | 2259 | refs = rcu_dereference_sched(info->refs); |
7f5a08c7 BB |
2260 | |
2261 | /* | |
2262 | * The refs->events array is protected by RCU, and new items may be | |
2263 | * added. But the user retrieved from indexing into the events array | |
2264 | * shall be immutable while the file is opened. | |
2265 | */ | |
2266 | if (likely(refs && idx < refs->count)) | |
2267 | user = refs->events[idx]; | |
2268 | ||
2269 | rcu_read_unlock_sched(); | |
2270 | ||
2271 | if (unlikely(user == NULL)) | |
2272 | return -ENOENT; | |
2273 | ||
2467cda1 BB |
2274 | if (unlikely(i->count < user->min_size)) |
2275 | return -EINVAL; | |
2276 | ||
7f5a08c7 BB |
2277 | tp = &user->tracepoint; |
2278 | ||
2279 | /* | |
2280 | * It's possible key.enabled disables after this check, however | |
2281 | * we don't mind if a few events are included in this condition. | |
2282 | */ | |
2283 | if (likely(atomic_read(&tp->key.enabled) > 0)) { | |
2284 | struct tracepoint_func *probe_func_ptr; | |
2285 | user_event_func_t probe_func; | |
0279400a | 2286 | struct iov_iter copy; |
7f5a08c7 | 2287 | void *tpdata; |
2467cda1 | 2288 | bool faulted; |
7f5a08c7 | 2289 | |
0279400a BB |
2290 | if (unlikely(fault_in_iov_iter_readable(i, i->count))) |
2291 | return -EFAULT; | |
7f5a08c7 | 2292 | |
2467cda1 BB |
2293 | faulted = false; |
2294 | ||
7f5a08c7 BB |
2295 | rcu_read_lock_sched(); |
2296 | ||
2297 | probe_func_ptr = rcu_dereference_sched(tp->funcs); | |
2298 | ||
2299 | if (probe_func_ptr) { | |
2300 | do { | |
0279400a | 2301 | copy = *i; |
7f5a08c7 BB |
2302 | probe_func = probe_func_ptr->func; |
2303 | tpdata = probe_func_ptr->data; | |
2467cda1 | 2304 | probe_func(user, ©, tpdata, &faulted); |
7f5a08c7 BB |
2305 | } while ((++probe_func_ptr)->func); |
2306 | } | |
2307 | ||
2308 | rcu_read_unlock_sched(); | |
2467cda1 BB |
2309 | |
2310 | if (unlikely(faulted)) | |
2311 | return -EFAULT; | |
f6d026ee | 2312 | } else |
2313 | return -EBADF; | |
7f5a08c7 BB |
2314 | |
2315 | return ret; | |
2316 | } | |
2317 | ||
e5d27181 BB |
2318 | static int user_events_open(struct inode *node, struct file *file) |
2319 | { | |
2320 | struct user_event_group *group; | |
2321 | struct user_event_file_info *info; | |
2322 | ||
2323 | group = current_user_event_group(); | |
2324 | ||
2325 | if (!group) | |
2326 | return -ENOENT; | |
2327 | ||
f9cce238 | 2328 | info = kzalloc(sizeof(*info), GFP_KERNEL_ACCOUNT); |
e5d27181 BB |
2329 | |
2330 | if (!info) | |
2331 | return -ENOMEM; | |
2332 | ||
2333 | info->group = group; | |
2334 | ||
2335 | file->private_data = info; | |
2336 | ||
2337 | return 0; | |
2338 | } | |
2339 | ||
7f5a08c7 BB |
2340 | static ssize_t user_events_write(struct file *file, const char __user *ubuf, |
2341 | size_t count, loff_t *ppos) | |
2342 | { | |
7f5a08c7 BB |
2343 | struct iov_iter i; |
2344 | ||
2345 | if (unlikely(*ppos != 0)) | |
2346 | return -EFAULT; | |
2347 | ||
9fd7874c | 2348 | if (unlikely(import_ubuf(ITER_SOURCE, (char __user *)ubuf, count, &i))) |
7f5a08c7 BB |
2349 | return -EFAULT; |
2350 | ||
2351 | return user_events_write_core(file, &i); | |
2352 | } | |
2353 | ||
2354 | static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i) | |
2355 | { | |
2356 | return user_events_write_core(kp->ki_filp, i); | |
2357 | } | |
2358 | ||
e5d27181 BB |
2359 | static int user_events_ref_add(struct user_event_file_info *info, |
2360 | struct user_event *user) | |
7f5a08c7 | 2361 | { |
e5d27181 | 2362 | struct user_event_group *group = info->group; |
7f5a08c7 BB |
2363 | struct user_event_refs *refs, *new_refs; |
2364 | int i, size, count = 0; | |
2365 | ||
e5d27181 BB |
2366 | refs = rcu_dereference_protected(info->refs, |
2367 | lockdep_is_held(&group->reg_mutex)); | |
7f5a08c7 BB |
2368 | |
2369 | if (refs) { | |
2370 | count = refs->count; | |
2371 | ||
2372 | for (i = 0; i < count; ++i) | |
2373 | if (refs->events[i] == user) | |
2374 | return i; | |
2375 | } | |
2376 | ||
2377 | size = struct_size(refs, events, count + 1); | |
2378 | ||
f9cce238 | 2379 | new_refs = kzalloc(size, GFP_KERNEL_ACCOUNT); |
7f5a08c7 BB |
2380 | |
2381 | if (!new_refs) | |
2382 | return -ENOMEM; | |
2383 | ||
2384 | new_refs->count = count + 1; | |
2385 | ||
2386 | for (i = 0; i < count; ++i) | |
2387 | new_refs->events[i] = refs->events[i]; | |
2388 | ||
f0dbf6fd | 2389 | new_refs->events[i] = user_event_get(user); |
7f5a08c7 | 2390 | |
e5d27181 | 2391 | rcu_assign_pointer(info->refs, new_refs); |
7f5a08c7 BB |
2392 | |
2393 | if (refs) | |
2394 | kfree_rcu(refs, rcu); | |
2395 | ||
2396 | return i; | |
2397 | } | |
2398 | ||
2399 | static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg) | |
2400 | { | |
2401 | u32 size; | |
2402 | long ret; | |
2403 | ||
2404 | ret = get_user(size, &ureg->size); | |
2405 | ||
2406 | if (ret) | |
2407 | return ret; | |
2408 | ||
2409 | if (size > PAGE_SIZE) | |
2410 | return -E2BIG; | |
2411 | ||
39d6d08b BB |
2412 | if (size < offsetofend(struct user_reg, write_index)) |
2413 | return -EINVAL; | |
2414 | ||
2415 | ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size); | |
2416 | ||
2417 | if (ret) | |
2418 | return ret; | |
2419 | ||
a65442ed BB |
2420 | /* Ensure only valid flags */ |
2421 | if (kreg->flags & ~(USER_EVENT_REG_MAX-1)) | |
72357590 BB |
2422 | return -EINVAL; |
2423 | ||
2424 | /* Ensure supported size */ | |
2425 | switch (kreg->enable_size) { | |
2426 | case 4: | |
2427 | /* 32-bit */ | |
2428 | break; | |
2429 | #if BITS_PER_LONG >= 64 | |
2430 | case 8: | |
2431 | /* 64-bit */ | |
2432 | break; | |
2433 | #endif | |
2434 | default: | |
2435 | return -EINVAL; | |
2436 | } | |
2437 | ||
2438 | /* Ensure natural alignment */ | |
2439 | if (kreg->enable_addr % kreg->enable_size) | |
2440 | return -EINVAL; | |
2441 | ||
2442 | /* Ensure bit range for size */ | |
2443 | if (kreg->enable_bit > (kreg->enable_size * BITS_PER_BYTE) - 1) | |
2444 | return -EINVAL; | |
2445 | ||
2446 | /* Ensure accessible */ | |
2447 | if (!access_ok((const void __user *)(uintptr_t)kreg->enable_addr, | |
2448 | kreg->enable_size)) | |
2449 | return -EFAULT; | |
2450 | ||
39d6d08b BB |
2451 | kreg->size = size; |
2452 | ||
2453 | return 0; | |
7f5a08c7 BB |
2454 | } |
2455 | ||
2456 | /* | |
2457 | * Registers a user_event on behalf of a user process. | |
2458 | */ | |
e5d27181 BB |
2459 | static long user_events_ioctl_reg(struct user_event_file_info *info, |
2460 | unsigned long uarg) | |
7f5a08c7 BB |
2461 | { |
2462 | struct user_reg __user *ureg = (struct user_reg __user *)uarg; | |
2463 | struct user_reg reg; | |
2464 | struct user_event *user; | |
72357590 | 2465 | struct user_event_enabler *enabler; |
7f5a08c7 BB |
2466 | char *name; |
2467 | long ret; | |
72357590 | 2468 | int write_result; |
7f5a08c7 BB |
2469 | |
2470 | ret = user_reg_get(ureg, ®); | |
2471 | ||
2472 | if (ret) | |
2473 | return ret; | |
2474 | ||
97bbce89 BB |
2475 | /* |
2476 | * Prevent users from using the same address and bit multiple times | |
2477 | * within the same mm address space. This can cause unexpected behavior | |
2478 | * for user processes that is far easier to debug if this is explictly | |
2479 | * an error upon registering. | |
2480 | */ | |
2481 | if (current_user_event_enabler_exists((unsigned long)reg.enable_addr, | |
2482 | reg.enable_bit)) | |
2483 | return -EADDRINUSE; | |
2484 | ||
7f5a08c7 BB |
2485 | name = strndup_user((const char __user *)(uintptr_t)reg.name_args, |
2486 | MAX_EVENT_DESC); | |
2487 | ||
2488 | if (IS_ERR(name)) { | |
2489 | ret = PTR_ERR(name); | |
2490 | return ret; | |
2491 | } | |
2492 | ||
b08d7258 | 2493 | ret = user_event_parse_cmd(info->group, name, &user, reg.flags); |
7f5a08c7 BB |
2494 | |
2495 | if (ret) { | |
2496 | kfree(name); | |
2497 | return ret; | |
2498 | } | |
2499 | ||
e5d27181 | 2500 | ret = user_events_ref_add(info, user); |
7f5a08c7 | 2501 | |
7e348b32 | 2502 | /* No longer need parse ref, ref_add either worked or not */ |
f0dbf6fd | 2503 | user_event_put(user, false); |
7e348b32 | 2504 | |
7f5a08c7 BB |
2505 | /* Positive number is index and valid */ |
2506 | if (ret < 0) | |
2507 | return ret; | |
2508 | ||
72357590 BB |
2509 | /* |
2510 | * user_events_ref_add succeeded: | |
2511 | * At this point we have a user_event, it's lifetime is bound by the | |
2512 | * reference count, not this file. If anything fails, the user_event | |
2513 | * still has a reference until the file is released. During release | |
2514 | * any remaining references (from user_events_ref_add) are decremented. | |
2515 | * | |
2516 | * Attempt to create an enabler, which too has a lifetime tied in the | |
2517 | * same way for the event. Once the task that caused the enabler to be | |
2518 | * created exits or issues exec() then the enablers it has created | |
2519 | * will be destroyed and the ref to the event will be decremented. | |
2520 | */ | |
2521 | enabler = user_event_enabler_create(®, user, &write_result); | |
2522 | ||
2523 | if (!enabler) | |
2524 | return -ENOMEM; | |
2525 | ||
2526 | /* Write failed/faulted, give error back to caller */ | |
2527 | if (write_result) | |
2528 | return write_result; | |
2529 | ||
7f5a08c7 | 2530 | put_user((u32)ret, &ureg->write_index); |
7f5a08c7 BB |
2531 | |
2532 | return 0; | |
2533 | } | |
2534 | ||
2535 | /* | |
2536 | * Deletes a user_event on behalf of a user process. | |
2537 | */ | |
e5d27181 BB |
2538 | static long user_events_ioctl_del(struct user_event_file_info *info, |
2539 | unsigned long uarg) | |
7f5a08c7 BB |
2540 | { |
2541 | void __user *ubuf = (void __user *)uarg; | |
2542 | char *name; | |
2543 | long ret; | |
2544 | ||
2545 | name = strndup_user(ubuf, MAX_EVENT_DESC); | |
2546 | ||
2547 | if (IS_ERR(name)) | |
2548 | return PTR_ERR(name); | |
2549 | ||
7e348b32 BB |
2550 | /* event_mutex prevents dyn_event from racing */ |
2551 | mutex_lock(&event_mutex); | |
e5d27181 | 2552 | ret = delete_user_event(info->group, name); |
7e348b32 | 2553 | mutex_unlock(&event_mutex); |
7f5a08c7 BB |
2554 | |
2555 | kfree(name); | |
2556 | ||
2557 | return ret; | |
2558 | } | |
2559 | ||
dcb8177c BB |
2560 | static long user_unreg_get(struct user_unreg __user *ureg, |
2561 | struct user_unreg *kreg) | |
2562 | { | |
2563 | u32 size; | |
2564 | long ret; | |
2565 | ||
2566 | ret = get_user(size, &ureg->size); | |
2567 | ||
2568 | if (ret) | |
2569 | return ret; | |
2570 | ||
2571 | if (size > PAGE_SIZE) | |
2572 | return -E2BIG; | |
2573 | ||
2574 | if (size < offsetofend(struct user_unreg, disable_addr)) | |
2575 | return -EINVAL; | |
2576 | ||
2577 | ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size); | |
2578 | ||
2579 | /* Ensure no reserved values, since we don't support any yet */ | |
2580 | if (kreg->__reserved || kreg->__reserved2) | |
2581 | return -EINVAL; | |
2582 | ||
2583 | return ret; | |
2584 | } | |
2585 | ||
17b439db | 2586 | static int user_event_mm_clear_bit(struct user_event_mm *user_mm, |
2de9ee94 BB |
2587 | unsigned long uaddr, unsigned char bit, |
2588 | unsigned long flags) | |
17b439db BB |
2589 | { |
2590 | struct user_event_enabler enabler; | |
2591 | int result; | |
41d8fba1 | 2592 | int attempt = 0; |
17b439db BB |
2593 | |
2594 | memset(&enabler, 0, sizeof(enabler)); | |
2595 | enabler.addr = uaddr; | |
2de9ee94 | 2596 | enabler.values = bit | flags; |
17b439db BB |
2597 | retry: |
2598 | /* Prevents state changes from racing with new enablers */ | |
2599 | mutex_lock(&event_mutex); | |
2600 | ||
2601 | /* Force the bit to be cleared, since no event is attached */ | |
2602 | mmap_read_lock(user_mm->mm); | |
41d8fba1 | 2603 | result = user_event_enabler_write(user_mm, &enabler, false, &attempt); |
17b439db BB |
2604 | mmap_read_unlock(user_mm->mm); |
2605 | ||
2606 | mutex_unlock(&event_mutex); | |
2607 | ||
2608 | if (result) { | |
2609 | /* Attempt to fault-in and retry if it worked */ | |
41d8fba1 | 2610 | if (!user_event_mm_fault_in(user_mm, uaddr, attempt)) |
17b439db BB |
2611 | goto retry; |
2612 | } | |
2613 | ||
2614 | return result; | |
2615 | } | |
2616 | ||
dcb8177c BB |
2617 | /* |
2618 | * Unregisters an enablement address/bit within a task/user mm. | |
2619 | */ | |
2620 | static long user_events_ioctl_unreg(unsigned long uarg) | |
2621 | { | |
2622 | struct user_unreg __user *ureg = (struct user_unreg __user *)uarg; | |
2623 | struct user_event_mm *mm = current->user_event_mm; | |
2624 | struct user_event_enabler *enabler, *next; | |
2625 | struct user_unreg reg; | |
2de9ee94 | 2626 | unsigned long flags; |
dcb8177c BB |
2627 | long ret; |
2628 | ||
2629 | ret = user_unreg_get(ureg, ®); | |
2630 | ||
2631 | if (ret) | |
2632 | return ret; | |
2633 | ||
2634 | if (!mm) | |
2635 | return -ENOENT; | |
2636 | ||
2de9ee94 | 2637 | flags = 0; |
dcb8177c BB |
2638 | ret = -ENOENT; |
2639 | ||
2640 | /* | |
2641 | * Flags freeing and faulting are used to indicate if the enabler is in | |
2642 | * use at all. When faulting is set a page-fault is occurring asyncly. | |
2643 | * During async fault if freeing is set, the enabler will be destroyed. | |
2644 | * If no async fault is happening, we can destroy it now since we hold | |
2645 | * the event_mutex during these checks. | |
2646 | */ | |
2647 | mutex_lock(&event_mutex); | |
2648 | ||
dcbd1ac2 | 2649 | list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) { |
dcb8177c | 2650 | if (enabler->addr == reg.disable_addr && |
ee7751b5 | 2651 | ENABLE_BIT(enabler) == reg.disable_bit) { |
dcb8177c BB |
2652 | set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler)); |
2653 | ||
2de9ee94 BB |
2654 | /* We must keep compat flags for the clear */ |
2655 | flags |= enabler->values & ENABLE_VAL_COMPAT_MASK; | |
2656 | ||
dcb8177c | 2657 | if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler))) |
f0dbf6fd | 2658 | user_event_enabler_destroy(enabler, true); |
dcb8177c BB |
2659 | |
2660 | /* Removed at least one */ | |
2661 | ret = 0; | |
2662 | } | |
dcbd1ac2 | 2663 | } |
dcb8177c BB |
2664 | |
2665 | mutex_unlock(&event_mutex); | |
2666 | ||
17b439db BB |
2667 | /* Ensure bit is now cleared for user, regardless of event status */ |
2668 | if (!ret) | |
2669 | ret = user_event_mm_clear_bit(mm, reg.disable_addr, | |
2de9ee94 | 2670 | reg.disable_bit, flags); |
17b439db | 2671 | |
dcb8177c BB |
2672 | return ret; |
2673 | } | |
2674 | ||
7f5a08c7 BB |
2675 | /* |
2676 | * Handles the ioctl from user mode to register or alter operations. | |
2677 | */ | |
2678 | static long user_events_ioctl(struct file *file, unsigned int cmd, | |
2679 | unsigned long uarg) | |
2680 | { | |
e5d27181 BB |
2681 | struct user_event_file_info *info = file->private_data; |
2682 | struct user_event_group *group = info->group; | |
7f5a08c7 BB |
2683 | long ret = -ENOTTY; |
2684 | ||
2685 | switch (cmd) { | |
2686 | case DIAG_IOCSREG: | |
e5d27181 BB |
2687 | mutex_lock(&group->reg_mutex); |
2688 | ret = user_events_ioctl_reg(info, uarg); | |
2689 | mutex_unlock(&group->reg_mutex); | |
7f5a08c7 BB |
2690 | break; |
2691 | ||
2692 | case DIAG_IOCSDEL: | |
e5d27181 BB |
2693 | mutex_lock(&group->reg_mutex); |
2694 | ret = user_events_ioctl_del(info, uarg); | |
2695 | mutex_unlock(&group->reg_mutex); | |
7f5a08c7 | 2696 | break; |
dcb8177c BB |
2697 | |
2698 | case DIAG_IOCSUNREG: | |
2699 | mutex_lock(&group->reg_mutex); | |
2700 | ret = user_events_ioctl_unreg(uarg); | |
2701 | mutex_unlock(&group->reg_mutex); | |
2702 | break; | |
7f5a08c7 BB |
2703 | } |
2704 | ||
2705 | return ret; | |
2706 | } | |
2707 | ||
2708 | /* | |
2709 | * Handles the final close of the file from user mode. | |
2710 | */ | |
2711 | static int user_events_release(struct inode *node, struct file *file) | |
2712 | { | |
e5d27181 BB |
2713 | struct user_event_file_info *info = file->private_data; |
2714 | struct user_event_group *group; | |
7f5a08c7 | 2715 | struct user_event_refs *refs; |
7f5a08c7 BB |
2716 | int i; |
2717 | ||
e5d27181 BB |
2718 | if (!info) |
2719 | return -EINVAL; | |
2720 | ||
2721 | group = info->group; | |
2722 | ||
7f5a08c7 BB |
2723 | /* |
2724 | * Ensure refs cannot change under any situation by taking the | |
2725 | * register mutex during the final freeing of the references. | |
2726 | */ | |
e5d27181 | 2727 | mutex_lock(&group->reg_mutex); |
7f5a08c7 | 2728 | |
e5d27181 | 2729 | refs = info->refs; |
7f5a08c7 BB |
2730 | |
2731 | if (!refs) | |
2732 | goto out; | |
2733 | ||
2734 | /* | |
2735 | * The lifetime of refs has reached an end, it's tied to this file. | |
2736 | * The underlying user_events are ref counted, and cannot be freed. | |
2737 | * After this decrement, the user_events may be freed elsewhere. | |
2738 | */ | |
f0dbf6fd BB |
2739 | for (i = 0; i < refs->count; ++i) |
2740 | user_event_put(refs->events[i], false); | |
7f5a08c7 | 2741 | |
7f5a08c7 BB |
2742 | out: |
2743 | file->private_data = NULL; | |
2744 | ||
e5d27181 | 2745 | mutex_unlock(&group->reg_mutex); |
7f5a08c7 BB |
2746 | |
2747 | kfree(refs); | |
e5d27181 | 2748 | kfree(info); |
7f5a08c7 BB |
2749 | |
2750 | return 0; | |
2751 | } | |
2752 | ||
2753 | static const struct file_operations user_data_fops = { | |
a4c40c13 BB |
2754 | .open = user_events_open, |
2755 | .write = user_events_write, | |
2756 | .write_iter = user_events_write_iter, | |
7f5a08c7 | 2757 | .unlocked_ioctl = user_events_ioctl, |
a4c40c13 | 2758 | .release = user_events_release, |
7f5a08c7 BB |
2759 | }; |
2760 | ||
7f5a08c7 BB |
2761 | static void *user_seq_start(struct seq_file *m, loff_t *pos) |
2762 | { | |
2763 | if (*pos) | |
2764 | return NULL; | |
2765 | ||
2766 | return (void *)1; | |
2767 | } | |
2768 | ||
2769 | static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos) | |
2770 | { | |
2771 | ++*pos; | |
2772 | return NULL; | |
2773 | } | |
2774 | ||
2775 | static void user_seq_stop(struct seq_file *m, void *p) | |
2776 | { | |
2777 | } | |
2778 | ||
2779 | static int user_seq_show(struct seq_file *m, void *p) | |
2780 | { | |
e5d27181 | 2781 | struct user_event_group *group = m->private; |
7f5a08c7 BB |
2782 | struct user_event *user; |
2783 | char status; | |
72357590 | 2784 | int i, active = 0, busy = 0; |
7f5a08c7 | 2785 | |
e5d27181 BB |
2786 | if (!group) |
2787 | return -EINVAL; | |
2788 | ||
2789 | mutex_lock(&group->reg_mutex); | |
7f5a08c7 | 2790 | |
e5d27181 | 2791 | hash_for_each(group->register_table, i, user, node) { |
39d6d08b | 2792 | status = user->status; |
7f5a08c7 | 2793 | |
64805e40 | 2794 | seq_printf(m, "%s", EVENT_TP_NAME(user)); |
7f5a08c7 | 2795 | |
72357590 | 2796 | if (status != 0) |
7f5a08c7 BB |
2797 | seq_puts(m, " #"); |
2798 | ||
2799 | if (status != 0) { | |
2800 | seq_puts(m, " Used by"); | |
2801 | if (status & EVENT_STATUS_FTRACE) | |
2802 | seq_puts(m, " ftrace"); | |
2803 | if (status & EVENT_STATUS_PERF) | |
2804 | seq_puts(m, " perf"); | |
2805 | if (status & EVENT_STATUS_OTHER) | |
2806 | seq_puts(m, " other"); | |
2807 | busy++; | |
2808 | } | |
2809 | ||
7f5a08c7 BB |
2810 | seq_puts(m, "\n"); |
2811 | active++; | |
2812 | } | |
2813 | ||
e5d27181 | 2814 | mutex_unlock(&group->reg_mutex); |
7f5a08c7 BB |
2815 | |
2816 | seq_puts(m, "\n"); | |
2817 | seq_printf(m, "Active: %d\n", active); | |
2818 | seq_printf(m, "Busy: %d\n", busy); | |
7f5a08c7 BB |
2819 | |
2820 | return 0; | |
2821 | } | |
2822 | ||
2823 | static const struct seq_operations user_seq_ops = { | |
a4c40c13 BB |
2824 | .start = user_seq_start, |
2825 | .next = user_seq_next, | |
2826 | .stop = user_seq_stop, | |
2827 | .show = user_seq_show, | |
7f5a08c7 BB |
2828 | }; |
2829 | ||
2830 | static int user_status_open(struct inode *node, struct file *file) | |
2831 | { | |
e5d27181 BB |
2832 | struct user_event_group *group; |
2833 | int ret; | |
2834 | ||
2835 | group = current_user_event_group(); | |
2836 | ||
2837 | if (!group) | |
2838 | return -ENOENT; | |
2839 | ||
2840 | ret = seq_open(file, &user_seq_ops); | |
2841 | ||
2842 | if (!ret) { | |
2843 | /* Chain group to seq_file */ | |
2844 | struct seq_file *m = file->private_data; | |
2845 | ||
2846 | m->private = group; | |
2847 | } | |
2848 | ||
2849 | return ret; | |
7f5a08c7 BB |
2850 | } |
2851 | ||
2852 | static const struct file_operations user_status_fops = { | |
a4c40c13 BB |
2853 | .open = user_status_open, |
2854 | .read = seq_read, | |
2855 | .llseek = seq_lseek, | |
2856 | .release = seq_release, | |
7f5a08c7 BB |
2857 | }; |
2858 | ||
2859 | /* | |
2860 | * Creates a set of tracefs files to allow user mode interactions. | |
2861 | */ | |
2862 | static int create_user_tracefs(void) | |
2863 | { | |
2864 | struct dentry *edata, *emmap; | |
2865 | ||
2866 | edata = tracefs_create_file("user_events_data", TRACE_MODE_WRITE, | |
2867 | NULL, NULL, &user_data_fops); | |
2868 | ||
2869 | if (!edata) { | |
2870 | pr_warn("Could not create tracefs 'user_events_data' entry\n"); | |
2871 | goto err; | |
2872 | } | |
2873 | ||
72357590 | 2874 | emmap = tracefs_create_file("user_events_status", TRACE_MODE_READ, |
7f5a08c7 BB |
2875 | NULL, NULL, &user_status_fops); |
2876 | ||
2877 | if (!emmap) { | |
2878 | tracefs_remove(edata); | |
2879 | pr_warn("Could not create tracefs 'user_events_mmap' entry\n"); | |
2880 | goto err; | |
2881 | } | |
2882 | ||
2883 | return 0; | |
2884 | err: | |
2885 | return -ENODEV; | |
2886 | } | |
2887 | ||
ce58e96e BB |
2888 | static int set_max_user_events_sysctl(struct ctl_table *table, int write, |
2889 | void *buffer, size_t *lenp, loff_t *ppos) | |
2890 | { | |
2891 | int ret; | |
2892 | ||
2893 | mutex_lock(&event_mutex); | |
2894 | ||
2895 | ret = proc_douintvec(table, write, buffer, lenp, ppos); | |
2896 | ||
2897 | mutex_unlock(&event_mutex); | |
2898 | ||
2899 | return ret; | |
2900 | } | |
2901 | ||
2902 | static struct ctl_table user_event_sysctls[] = { | |
2903 | { | |
2904 | .procname = "user_events_max", | |
2905 | .data = &max_user_events, | |
2906 | .maxlen = sizeof(unsigned int), | |
2907 | .mode = 0644, | |
2908 | .proc_handler = set_max_user_events_sysctl, | |
2909 | }, | |
2910 | {} | |
2911 | }; | |
2912 | ||
7f5a08c7 BB |
2913 | static int __init trace_events_user_init(void) |
2914 | { | |
2915 | int ret; | |
2916 | ||
81f8fb65 BB |
2917 | fault_cache = KMEM_CACHE(user_event_enabler_fault, 0); |
2918 | ||
2919 | if (!fault_cache) | |
2920 | return -ENOMEM; | |
2921 | ||
ed0e0ae0 | 2922 | init_group = user_event_group_create(); |
7f5a08c7 | 2923 | |
81f8fb65 BB |
2924 | if (!init_group) { |
2925 | kmem_cache_destroy(fault_cache); | |
7f5a08c7 | 2926 | return -ENOMEM; |
81f8fb65 | 2927 | } |
7f5a08c7 BB |
2928 | |
2929 | ret = create_user_tracefs(); | |
2930 | ||
2931 | if (ret) { | |
2932 | pr_warn("user_events could not register with tracefs\n"); | |
e5d27181 | 2933 | user_event_group_destroy(init_group); |
81f8fb65 | 2934 | kmem_cache_destroy(fault_cache); |
e5d27181 | 2935 | init_group = NULL; |
7f5a08c7 BB |
2936 | return ret; |
2937 | } | |
2938 | ||
2939 | if (dyn_event_register(&user_event_dops)) | |
2940 | pr_warn("user_events could not register with dyn_events\n"); | |
2941 | ||
ce58e96e BB |
2942 | register_sysctl_init("kernel", user_event_sysctls); |
2943 | ||
7f5a08c7 BB |
2944 | return 0; |
2945 | } | |
2946 | ||
2947 | fs_initcall(trace_events_user_init); |