1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, Microsoft Corporation.
6 * Beau Belgrave <beaub@linux.microsoft.com>
9 #include <linux/bitmap.h>
10 #include <linux/cdev.h>
11 #include <linux/hashtable.h>
12 #include <linux/list.h>
14 #include <linux/uio.h>
15 #include <linux/ioctl.h>
16 #include <linux/jhash.h>
17 #include <linux/refcount.h>
18 #include <linux/trace_events.h>
19 #include <linux/tracefs.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/highmem.h>
23 #include <linux/init.h>
24 #include <linux/user_events.h>
25 #include "trace_dynevent.h"
26 #include "trace_output.h"
29 #define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1)
31 #define FIELD_DEPTH_TYPE 0
32 #define FIELD_DEPTH_NAME 1
33 #define FIELD_DEPTH_SIZE 2
35 /* Limit how long of an event name plus args within the subsystem. */
36 #define MAX_EVENT_DESC 512
37 #define EVENT_NAME(user_event) ((user_event)->tracepoint.name)
38 #define MAX_FIELD_ARRAY_SIZE 1024
41 * Internal bits (kernel side only) to keep track of connected probes:
42 * These are used when status is requested in text form about an event. These
43 * bits are compared against an internal byte on the event to determine which
44 * probes to print out to the user.
46 * These do not reflect the mapped bytes between the user and kernel space.
48 #define EVENT_STATUS_FTRACE BIT(0)
49 #define EVENT_STATUS_PERF BIT(1)
50 #define EVENT_STATUS_OTHER BIT(7)
53 * User register flags are not allowed yet, keep them here until we are
54 * ready to expose them out to the user ABI.
57 /* Event will not delete upon last reference closing */
58 USER_EVENT_REG_PERSIST = 1U << 0,
60 /* This value or above is currently non-ABI */
61 USER_EVENT_REG_MAX = 1U << 1,
65 * Stores the system name, tables, and locks for a group of events. This
66 * allows isolation for events by various means.
68 struct user_event_group {
70 struct hlist_node node;
71 struct mutex reg_mutex;
72 DECLARE_HASHTABLE(register_table, 8);
75 /* Group for init_user_ns mapping, top-most group */
76 static struct user_event_group *init_group;
78 /* Max allowed events for the whole system */
79 static unsigned int max_user_events = 32768;
81 /* Current number of events on the whole system */
82 static unsigned int current_user_events;
85 * Stores per-event properties, as users register events
86 * within a file a user_event might be created if it does not
87 * already exist. These are globally used and their lifetime
88 * is tied to the refcnt member. These cannot go away until the
92 struct user_event_group *group;
93 struct tracepoint tracepoint;
94 struct trace_event_call call;
95 struct trace_event_class class;
96 struct dyn_event devent;
97 struct hlist_node node;
98 struct list_head fields;
99 struct list_head validators;
100 struct work_struct put_work;
108 * Stores per-mm/event properties that enable an address to be
109 * updated properly for each task. As tasks are forked, we use
110 * these to track enablement sites that are tied to an event.
112 struct user_event_enabler {
113 struct list_head mm_enablers_link;
114 struct user_event *event;
117 /* Track enable bit, flags, etc. Aligned for bitops. */
118 unsigned long values;
121 /* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */
122 #define ENABLE_VAL_BIT_MASK 0x3F
124 /* Bit 6 is for faulting status of enablement */
125 #define ENABLE_VAL_FAULTING_BIT 6
127 /* Bit 7 is for freeing status of enablement */
128 #define ENABLE_VAL_FREEING_BIT 7
130 /* Only duplicate the bit value */
131 #define ENABLE_VAL_DUP_MASK ENABLE_VAL_BIT_MASK
133 #define ENABLE_BITOPS(e) (&(e)->values)
135 #define ENABLE_BIT(e) ((int)((e)->values & ENABLE_VAL_BIT_MASK))
137 /* Used for asynchronous faulting in of pages */
138 struct user_event_enabler_fault {
139 struct work_struct work;
140 struct user_event_mm *mm;
141 struct user_event_enabler *enabler;
145 static struct kmem_cache *fault_cache;
147 /* Global list of memory descriptors using user_events */
148 static LIST_HEAD(user_event_mms);
149 static DEFINE_SPINLOCK(user_event_mms_lock);
152 * Stores per-file events references, as users register events
153 * within a file this structure is modified and freed via RCU.
154 * The lifetime of this struct is tied to the lifetime of the file.
155 * These are not shared and only accessible by the file that created it.
157 struct user_event_refs {
160 struct user_event *events[];
163 struct user_event_file_info {
164 struct user_event_group *group;
165 struct user_event_refs *refs;
168 #define VALIDATOR_ENSURE_NULL (1 << 0)
169 #define VALIDATOR_REL (1 << 1)
171 struct user_event_validator {
172 struct list_head user_event_link;
177 typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
178 void *tpdata, bool *faulted);
180 static int user_event_parse(struct user_event_group *group, char *name,
181 char *args, char *flags,
182 struct user_event **newuser, int reg_flags);
184 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm);
185 static struct user_event_mm *user_event_mm_get_all(struct user_event *user);
186 static void user_event_mm_put(struct user_event_mm *mm);
187 static int destroy_user_event(struct user_event *user);
189 static u32 user_event_key(char *name)
191 return jhash(name, strlen(name), 0);
194 static struct user_event *user_event_get(struct user_event *user)
196 refcount_inc(&user->refcnt);
201 static void delayed_destroy_user_event(struct work_struct *work)
203 struct user_event *user = container_of(
204 work, struct user_event, put_work);
206 mutex_lock(&event_mutex);
208 if (!refcount_dec_and_test(&user->refcnt))
211 if (destroy_user_event(user)) {
213 * The only reason this would fail here is if we cannot
214 * update the visibility of the event. In this case the
215 * event stays in the hashtable, waiting for someone to
216 * attempt to delete it later.
218 pr_warn("user_events: Unable to delete event\n");
219 refcount_set(&user->refcnt, 1);
222 mutex_unlock(&event_mutex);
225 static void user_event_put(struct user_event *user, bool locked)
233 * When the event is not enabled for auto-delete there will always
234 * be at least 1 reference to the event. During the event creation
235 * we initially set the refcnt to 2 to achieve this. In those cases
236 * the caller must acquire event_mutex and after decrement check if
237 * the refcnt is 1, meaning this is the last reference. When auto
238 * delete is enabled, there will only be 1 ref, IE: refcnt will be
239 * only set to 1 during creation to allow the below checks to go
240 * through upon the last put. The last put must always be done with
241 * the event mutex held.
244 lockdep_assert_not_held(&event_mutex);
245 delete = refcount_dec_and_mutex_lock(&user->refcnt, &event_mutex);
247 lockdep_assert_held(&event_mutex);
248 delete = refcount_dec_and_test(&user->refcnt);
255 * We now have the event_mutex in all cases, which ensures that
256 * no new references will be taken until event_mutex is released.
257 * New references come through find_user_event(), which requires
258 * the event_mutex to be held.
261 if (user->reg_flags & USER_EVENT_REG_PERSIST) {
262 /* We should not get here when persist flag is set */
263 pr_alert("BUG: Auto-delete engaged on persistent event\n");
268 * Unfortunately we have to attempt the actual destroy in a work
269 * queue. This is because not all cases handle a trace_event_call
270 * being removed within the class->reg() operation for unregister.
272 INIT_WORK(&user->put_work, delayed_destroy_user_event);
275 * Since the event is still in the hashtable, we have to re-inc
276 * the ref count to 1. This count will be decremented and checked
277 * in the work queue to ensure it's still the last ref. This is
278 * needed because a user-process could register the same event in
279 * between the time of event_mutex release and the work queue
280 * running the delayed destroy. If we removed the item now from
281 * the hashtable, this would result in a timing window where a
282 * user process would fail a register because the trace_event_call
283 * register would fail in the tracing layers.
285 refcount_set(&user->refcnt, 1);
287 if (WARN_ON_ONCE(!schedule_work(&user->put_work))) {
289 * If we fail we must wait for an admin to attempt delete or
290 * another register/close of the event, whichever is first.
292 pr_warn("user_events: Unable to queue delayed destroy\n");
295 /* Ensure if we didn't have event_mutex before we unlock it */
297 mutex_unlock(&event_mutex);
300 static void user_event_group_destroy(struct user_event_group *group)
302 kfree(group->system_name);
306 static char *user_event_group_system_name(void)
309 int len = sizeof(USER_EVENTS_SYSTEM) + 1;
311 system_name = kmalloc(len, GFP_KERNEL);
316 snprintf(system_name, len, "%s", USER_EVENTS_SYSTEM);
321 static struct user_event_group *current_user_event_group(void)
326 static struct user_event_group *user_event_group_create(void)
328 struct user_event_group *group;
330 group = kzalloc(sizeof(*group), GFP_KERNEL);
335 group->system_name = user_event_group_system_name();
337 if (!group->system_name)
340 mutex_init(&group->reg_mutex);
341 hash_init(group->register_table);
346 user_event_group_destroy(group);
351 static void user_event_enabler_destroy(struct user_event_enabler *enabler,
354 list_del_rcu(&enabler->mm_enablers_link);
356 /* No longer tracking the event via the enabler */
357 user_event_put(enabler->event, locked);
362 static int user_event_mm_fault_in(struct user_event_mm *mm, unsigned long uaddr,
369 * Normally this is low, ensure that it cannot be taken advantage of by
370 * bad user processes to cause excessive looping.
375 mmap_read_lock(mm->mm);
377 /* Ensure MM has tasks, cannot use after exit_mm() */
378 if (refcount_read(&mm->tasks) == 0) {
383 ret = fixup_user_fault(mm->mm, uaddr, FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE,
386 mmap_read_unlock(mm->mm);
391 static int user_event_enabler_write(struct user_event_mm *mm,
392 struct user_event_enabler *enabler,
393 bool fixup_fault, int *attempt);
395 static void user_event_enabler_fault_fixup(struct work_struct *work)
397 struct user_event_enabler_fault *fault = container_of(
398 work, struct user_event_enabler_fault, work);
399 struct user_event_enabler *enabler = fault->enabler;
400 struct user_event_mm *mm = fault->mm;
401 unsigned long uaddr = enabler->addr;
402 int attempt = fault->attempt;
405 ret = user_event_mm_fault_in(mm, uaddr, attempt);
407 if (ret && ret != -ENOENT) {
408 struct user_event *user = enabler->event;
410 pr_warn("user_events: Fault for mm: 0x%pK @ 0x%llx event: %s\n",
411 mm->mm, (unsigned long long)uaddr, EVENT_NAME(user));
414 /* Prevent state changes from racing */
415 mutex_lock(&event_mutex);
417 /* User asked for enabler to be removed during fault */
418 if (test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))) {
419 user_event_enabler_destroy(enabler, true);
424 * If we managed to get the page, re-issue the write. We do not
425 * want to get into a possible infinite loop, which is why we only
426 * attempt again directly if the page came in. If we couldn't get
427 * the page here, then we will try again the next time the event is
430 clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
433 mmap_read_lock(mm->mm);
434 user_event_enabler_write(mm, enabler, true, &attempt);
435 mmap_read_unlock(mm->mm);
438 mutex_unlock(&event_mutex);
440 /* In all cases we no longer need the mm or fault */
441 user_event_mm_put(mm);
442 kmem_cache_free(fault_cache, fault);
445 static bool user_event_enabler_queue_fault(struct user_event_mm *mm,
446 struct user_event_enabler *enabler,
449 struct user_event_enabler_fault *fault;
451 fault = kmem_cache_zalloc(fault_cache, GFP_NOWAIT | __GFP_NOWARN);
456 INIT_WORK(&fault->work, user_event_enabler_fault_fixup);
457 fault->mm = user_event_mm_get(mm);
458 fault->enabler = enabler;
459 fault->attempt = attempt;
461 /* Don't try to queue in again while we have a pending fault */
462 set_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
464 if (!schedule_work(&fault->work)) {
465 /* Allow another attempt later */
466 clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
468 user_event_mm_put(mm);
469 kmem_cache_free(fault_cache, fault);
477 static int user_event_enabler_write(struct user_event_mm *mm,
478 struct user_event_enabler *enabler,
479 bool fixup_fault, int *attempt)
481 unsigned long uaddr = enabler->addr;
487 lockdep_assert_held(&event_mutex);
488 mmap_assert_locked(mm->mm);
492 /* Ensure MM has tasks, cannot use after exit_mm() */
493 if (refcount_read(&mm->tasks) == 0)
496 if (unlikely(test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)) ||
497 test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))))
500 ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT,
503 if (unlikely(ret <= 0)) {
507 if (!user_event_enabler_queue_fault(mm, enabler, *attempt))
508 pr_warn("user_events: Unable to queue fault handler\n");
513 kaddr = kmap_local_page(page);
514 ptr = kaddr + (uaddr & ~PAGE_MASK);
516 /* Update bit atomically, user tracers must be atomic as well */
517 if (enabler->event && enabler->event->status)
518 set_bit(ENABLE_BIT(enabler), ptr);
520 clear_bit(ENABLE_BIT(enabler), ptr);
523 unpin_user_pages_dirty_lock(&page, 1, true);
528 static bool user_event_enabler_exists(struct user_event_mm *mm,
529 unsigned long uaddr, unsigned char bit)
531 struct user_event_enabler *enabler;
533 list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
534 if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit)
541 static void user_event_enabler_update(struct user_event *user)
543 struct user_event_enabler *enabler;
544 struct user_event_mm *next;
545 struct user_event_mm *mm;
548 lockdep_assert_held(&event_mutex);
551 * We need to build a one-shot list of all the mms that have an
552 * enabler for the user_event passed in. This list is only valid
553 * while holding the event_mutex. The only reason for this is due
554 * to the global mm list being RCU protected and we use methods
555 * which can wait (mmap_read_lock and pin_user_pages_remote).
557 * NOTE: user_event_mm_get_all() increments the ref count of each
558 * mm that is added to the list to prevent removal timing windows.
559 * We must always put each mm after they are used, which may wait.
561 mm = user_event_mm_get_all(user);
565 mmap_read_lock(mm->mm);
567 list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
568 if (enabler->event == user) {
570 user_event_enabler_write(mm, enabler, true, &attempt);
574 mmap_read_unlock(mm->mm);
575 user_event_mm_put(mm);
580 static bool user_event_enabler_dup(struct user_event_enabler *orig,
581 struct user_event_mm *mm)
583 struct user_event_enabler *enabler;
585 /* Skip pending frees */
586 if (unlikely(test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(orig))))
589 enabler = kzalloc(sizeof(*enabler), GFP_NOWAIT | __GFP_ACCOUNT);
594 enabler->event = user_event_get(orig->event);
595 enabler->addr = orig->addr;
597 /* Only dup part of value (ignore future flags, etc) */
598 enabler->values = orig->values & ENABLE_VAL_DUP_MASK;
600 /* Enablers not exposed yet, RCU not required */
601 list_add(&enabler->mm_enablers_link, &mm->enablers);
606 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm)
608 refcount_inc(&mm->refcnt);
613 static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
615 struct user_event_mm *found = NULL;
616 struct user_event_enabler *enabler;
617 struct user_event_mm *mm;
620 * We use the mm->next field to build a one-shot list from the global
621 * RCU protected list. To build this list the event_mutex must be held.
622 * This lets us build a list without requiring allocs that could fail
623 * when user based events are most wanted for diagnostics.
625 lockdep_assert_held(&event_mutex);
628 * We do not want to block fork/exec while enablements are being
629 * updated, so we use RCU to walk the current tasks that have used
630 * user_events ABI for 1 or more events. Each enabler found in each
631 * task that matches the event being updated has a write to reflect
632 * the kernel state back into the process. Waits/faults must not occur
633 * during this. So we scan the list under RCU for all the mm that have
634 * the event within it. This is needed because mm_read_lock() can wait.
635 * Each user mm returned has a ref inc to handle remove RCU races.
639 list_for_each_entry_rcu(mm, &user_event_mms, mms_link) {
640 list_for_each_entry_rcu(enabler, &mm->enablers, mm_enablers_link) {
641 if (enabler->event == user) {
643 found = user_event_mm_get(mm);
654 static struct user_event_mm *user_event_mm_alloc(struct task_struct *t)
656 struct user_event_mm *user_mm;
658 user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT);
664 INIT_LIST_HEAD(&user_mm->enablers);
665 refcount_set(&user_mm->refcnt, 1);
666 refcount_set(&user_mm->tasks, 1);
669 * The lifetime of the memory descriptor can slightly outlast
670 * the task lifetime if a ref to the user_event_mm is taken
671 * between list_del_rcu() and call_rcu(). Therefore we need
672 * to take a reference to it to ensure it can live this long
673 * under this corner case. This can also occur in clones that
674 * outlast the parent.
681 static void user_event_mm_attach(struct user_event_mm *user_mm, struct task_struct *t)
685 spin_lock_irqsave(&user_event_mms_lock, flags);
686 list_add_rcu(&user_mm->mms_link, &user_event_mms);
687 spin_unlock_irqrestore(&user_event_mms_lock, flags);
689 t->user_event_mm = user_mm;
692 static struct user_event_mm *current_user_event_mm(void)
694 struct user_event_mm *user_mm = current->user_event_mm;
699 user_mm = user_event_mm_alloc(current);
704 user_event_mm_attach(user_mm, current);
706 refcount_inc(&user_mm->refcnt);
711 static void user_event_mm_destroy(struct user_event_mm *mm)
713 struct user_event_enabler *enabler, *next;
715 list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link)
716 user_event_enabler_destroy(enabler, false);
722 static void user_event_mm_put(struct user_event_mm *mm)
724 if (mm && refcount_dec_and_test(&mm->refcnt))
725 user_event_mm_destroy(mm);
728 static void delayed_user_event_mm_put(struct work_struct *work)
730 struct user_event_mm *mm;
732 mm = container_of(to_rcu_work(work), struct user_event_mm, put_rwork);
733 user_event_mm_put(mm);
736 void user_event_mm_remove(struct task_struct *t)
738 struct user_event_mm *mm;
743 mm = t->user_event_mm;
744 t->user_event_mm = NULL;
746 /* Clone will increment the tasks, only remove if last clone */
747 if (!refcount_dec_and_test(&mm->tasks))
750 /* Remove the mm from the list, so it can no longer be enabled */
751 spin_lock_irqsave(&user_event_mms_lock, flags);
752 list_del_rcu(&mm->mms_link);
753 spin_unlock_irqrestore(&user_event_mms_lock, flags);
756 * We need to wait for currently occurring writes to stop within
757 * the mm. This is required since exit_mm() snaps the current rss
758 * stats and clears them. On the final mmdrop(), check_mm() will
759 * report a bug if these increment.
761 * All writes/pins are done under mmap_read lock, take the write
762 * lock to ensure in-progress faults have completed. Faults that
763 * are pending but yet to run will check the task count and skip
764 * the fault since the mm is going away.
766 mmap_write_lock(mm->mm);
767 mmap_write_unlock(mm->mm);
770 * Put for mm must be done after RCU delay to handle new refs in
771 * between the list_del_rcu() and now. This ensures any get refs
772 * during rcu_read_lock() are accounted for during list removal.
775 * ---------------------------------------------------------------
776 * user_event_mm_remove() | rcu_read_lock();
777 * list_del_rcu() | list_for_each_entry_rcu();
778 * call_rcu() | refcount_inc();
779 * . | rcu_read_unlock();
780 * schedule_work() | .
781 * user_event_mm_put() | .
783 * mmdrop() cannot be called in the softirq context of call_rcu()
784 * so we use a work queue after call_rcu() to run within.
786 INIT_RCU_WORK(&mm->put_rwork, delayed_user_event_mm_put);
787 queue_rcu_work(system_wq, &mm->put_rwork);
790 void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm)
792 struct user_event_mm *mm = user_event_mm_alloc(t);
793 struct user_event_enabler *enabler;
800 list_for_each_entry_rcu(enabler, &old_mm->enablers, mm_enablers_link) {
801 if (!user_event_enabler_dup(enabler, mm))
807 user_event_mm_attach(mm, t);
811 user_event_mm_destroy(mm);
814 static bool current_user_event_enabler_exists(unsigned long uaddr,
817 struct user_event_mm *user_mm = current_user_event_mm();
823 exists = user_event_enabler_exists(user_mm, uaddr, bit);
825 user_event_mm_put(user_mm);
830 static struct user_event_enabler
831 *user_event_enabler_create(struct user_reg *reg, struct user_event *user,
834 struct user_event_enabler *enabler;
835 struct user_event_mm *user_mm;
836 unsigned long uaddr = (unsigned long)reg->enable_addr;
839 user_mm = current_user_event_mm();
844 enabler = kzalloc(sizeof(*enabler), GFP_KERNEL_ACCOUNT);
849 enabler->event = user;
850 enabler->addr = uaddr;
851 enabler->values = reg->enable_bit;
853 /* Prevents state changes from racing with new enablers */
854 mutex_lock(&event_mutex);
856 /* Attempt to reflect the current state within the process */
857 mmap_read_lock(user_mm->mm);
858 *write_result = user_event_enabler_write(user_mm, enabler, false,
860 mmap_read_unlock(user_mm->mm);
863 * If the write works, then we will track the enabler. A ref to the
864 * underlying user_event is held by the enabler to prevent it going
865 * away while the enabler is still in use by a process. The ref is
866 * removed when the enabler is destroyed. This means a event cannot
867 * be forcefully deleted from the system until all tasks using it
868 * exit or run exec(), which includes forks and clones.
870 if (!*write_result) {
871 user_event_get(user);
872 list_add_rcu(&enabler->mm_enablers_link, &user_mm->enablers);
875 mutex_unlock(&event_mutex);
878 /* Attempt to fault-in and retry if it worked */
879 if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
886 user_event_mm_put(user_mm);
891 static __always_inline __must_check
892 bool user_event_last_ref(struct user_event *user)
896 if (user->reg_flags & USER_EVENT_REG_PERSIST)
899 return refcount_read(&user->refcnt) == last;
902 static __always_inline __must_check
903 size_t copy_nofault(void *addr, size_t bytes, struct iov_iter *i)
909 ret = copy_from_iter_nocache(addr, bytes, i);
916 static struct list_head *user_event_get_fields(struct trace_event_call *call)
918 struct user_event *user = (struct user_event *)call->data;
920 return &user->fields;
924 * Parses a register command for user_events
925 * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]]
927 * Example event named 'test' with a 20 char 'msg' field with an unsigned int
929 * test char[20] msg;unsigned int id
931 * NOTE: Offsets are from the user data perspective, they are not from the
932 * trace_entry/buffer perspective. We automatically add the common properties
933 * sizes to the offset for the user.
935 * Upon success user_event has its ref count increased by 1.
937 static int user_event_parse_cmd(struct user_event_group *group,
938 char *raw_command, struct user_event **newuser,
941 char *name = raw_command;
942 char *args = strpbrk(name, " ");
948 flags = strpbrk(name, ":");
953 return user_event_parse(group, name, args, flags, newuser, reg_flags);
956 static int user_field_array_size(const char *type)
958 const char *start = strchr(type, '[');
966 if (strscpy(val, start + 1, sizeof(val)) <= 0)
969 bracket = strchr(val, ']');
976 if (kstrtouint(val, 0, &size))
979 if (size > MAX_FIELD_ARRAY_SIZE)
985 static int user_field_size(const char *type)
987 /* long is not allowed from a user, since it's ambigious in size */
988 if (strcmp(type, "s64") == 0)
990 if (strcmp(type, "u64") == 0)
992 if (strcmp(type, "s32") == 0)
994 if (strcmp(type, "u32") == 0)
996 if (strcmp(type, "int") == 0)
998 if (strcmp(type, "unsigned int") == 0)
999 return sizeof(unsigned int);
1000 if (strcmp(type, "s16") == 0)
1002 if (strcmp(type, "u16") == 0)
1004 if (strcmp(type, "short") == 0)
1005 return sizeof(short);
1006 if (strcmp(type, "unsigned short") == 0)
1007 return sizeof(unsigned short);
1008 if (strcmp(type, "s8") == 0)
1010 if (strcmp(type, "u8") == 0)
1012 if (strcmp(type, "char") == 0)
1013 return sizeof(char);
1014 if (strcmp(type, "unsigned char") == 0)
1015 return sizeof(unsigned char);
1016 if (str_has_prefix(type, "char["))
1017 return user_field_array_size(type);
1018 if (str_has_prefix(type, "unsigned char["))
1019 return user_field_array_size(type);
1020 if (str_has_prefix(type, "__data_loc "))
1022 if (str_has_prefix(type, "__rel_loc "))
1025 /* Uknown basic type, error */
1029 static void user_event_destroy_validators(struct user_event *user)
1031 struct user_event_validator *validator, *next;
1032 struct list_head *head = &user->validators;
1034 list_for_each_entry_safe(validator, next, head, user_event_link) {
1035 list_del(&validator->user_event_link);
1040 static void user_event_destroy_fields(struct user_event *user)
1042 struct ftrace_event_field *field, *next;
1043 struct list_head *head = &user->fields;
1045 list_for_each_entry_safe(field, next, head, link) {
1046 list_del(&field->link);
1051 static int user_event_add_field(struct user_event *user, const char *type,
1052 const char *name, int offset, int size,
1053 int is_signed, int filter_type)
1055 struct user_event_validator *validator;
1056 struct ftrace_event_field *field;
1057 int validator_flags = 0;
1059 field = kmalloc(sizeof(*field), GFP_KERNEL_ACCOUNT);
1064 if (str_has_prefix(type, "__data_loc "))
1067 if (str_has_prefix(type, "__rel_loc ")) {
1068 validator_flags |= VALIDATOR_REL;
1075 if (strstr(type, "char") != NULL)
1076 validator_flags |= VALIDATOR_ENSURE_NULL;
1078 validator = kmalloc(sizeof(*validator), GFP_KERNEL_ACCOUNT);
1085 validator->flags = validator_flags;
1086 validator->offset = offset;
1088 /* Want sequential access when validating */
1089 list_add_tail(&validator->user_event_link, &user->validators);
1094 field->offset = offset;
1096 field->is_signed = is_signed;
1097 field->filter_type = filter_type;
1099 if (filter_type == FILTER_OTHER)
1100 field->filter_type = filter_assign_type(type);
1102 list_add(&field->link, &user->fields);
1105 * Min size from user writes that are required, this does not include
1106 * the size of trace_entry (common fields).
1108 user->min_size = (offset + size) - sizeof(struct trace_entry);
1114 * Parses the values of a field within the description
1115 * Format: type name [size]
1117 static int user_event_parse_field(char *field, struct user_event *user,
1120 char *part, *type, *name;
1121 u32 depth = 0, saved_offset = *offset;
1122 int len, size = -EINVAL;
1123 bool is_struct = false;
1125 field = skip_spaces(field);
1130 /* Handle types that have a space within */
1131 len = str_has_prefix(field, "unsigned ");
1135 len = str_has_prefix(field, "struct ");
1141 len = str_has_prefix(field, "__data_loc unsigned ");
1145 len = str_has_prefix(field, "__data_loc ");
1149 len = str_has_prefix(field, "__rel_loc unsigned ");
1153 len = str_has_prefix(field, "__rel_loc ");
1160 field = strpbrk(field + len, " ");
1170 while ((part = strsep(&field, " ")) != NULL) {
1172 case FIELD_DEPTH_TYPE:
1175 case FIELD_DEPTH_NAME:
1178 case FIELD_DEPTH_SIZE:
1182 if (kstrtou32(part, 10, &size))
1190 if (depth < FIELD_DEPTH_SIZE || !name)
1193 if (depth == FIELD_DEPTH_SIZE)
1194 size = user_field_size(type);
1202 *offset = saved_offset + size;
1204 return user_event_add_field(user, type, name, saved_offset, size,
1205 type[0] != 'u', FILTER_OTHER);
1208 static int user_event_parse_fields(struct user_event *user, char *args)
1211 u32 offset = sizeof(struct trace_entry);
1217 while ((field = strsep(&args, ";")) != NULL) {
1218 ret = user_event_parse_field(field, user, &offset);
1227 static struct trace_event_fields user_event_fields_array[1];
1229 static const char *user_field_format(const char *type)
1231 if (strcmp(type, "s64") == 0)
1233 if (strcmp(type, "u64") == 0)
1235 if (strcmp(type, "s32") == 0)
1237 if (strcmp(type, "u32") == 0)
1239 if (strcmp(type, "int") == 0)
1241 if (strcmp(type, "unsigned int") == 0)
1243 if (strcmp(type, "s16") == 0)
1245 if (strcmp(type, "u16") == 0)
1247 if (strcmp(type, "short") == 0)
1249 if (strcmp(type, "unsigned short") == 0)
1251 if (strcmp(type, "s8") == 0)
1253 if (strcmp(type, "u8") == 0)
1255 if (strcmp(type, "char") == 0)
1257 if (strcmp(type, "unsigned char") == 0)
1259 if (strstr(type, "char[") != NULL)
1262 /* Unknown, likely struct, allowed treat as 64-bit */
1266 static bool user_field_is_dyn_string(const char *type, const char **str_func)
1268 if (str_has_prefix(type, "__data_loc ")) {
1269 *str_func = "__get_str";
1273 if (str_has_prefix(type, "__rel_loc ")) {
1274 *str_func = "__get_rel_str";
1280 return strstr(type, "char") != NULL;
1283 #define LEN_OR_ZERO (len ? len - pos : 0)
1284 static int user_dyn_field_set_string(int argc, const char **argv, int *iout,
1285 char *buf, int len, bool *colon)
1287 int pos = 0, i = *iout;
1291 for (; i < argc; ++i) {
1293 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1295 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", argv[i]);
1297 if (strchr(argv[i], ';')) {
1304 /* Actual set, advance i */
1311 static int user_field_set_string(struct ftrace_event_field *field,
1312 char *buf, int len, bool colon)
1316 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->type);
1317 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1318 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name);
1320 if (str_has_prefix(field->type, "struct "))
1321 pos += snprintf(buf + pos, LEN_OR_ZERO, " %d", field->size);
1324 pos += snprintf(buf + pos, LEN_OR_ZERO, ";");
1329 static int user_event_set_print_fmt(struct user_event *user, char *buf, int len)
1331 struct ftrace_event_field *field;
1332 struct list_head *head = &user->fields;
1333 int pos = 0, depth = 0;
1334 const char *str_func;
1336 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1338 list_for_each_entry_reverse(field, head, link) {
1340 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1342 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s",
1343 field->name, user_field_format(field->type));
1348 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1350 list_for_each_entry_reverse(field, head, link) {
1351 if (user_field_is_dyn_string(field->type, &str_func))
1352 pos += snprintf(buf + pos, LEN_OR_ZERO,
1353 ", %s(%s)", str_func, field->name);
1355 pos += snprintf(buf + pos, LEN_OR_ZERO,
1356 ", REC->%s", field->name);
1363 static int user_event_create_print_fmt(struct user_event *user)
1368 len = user_event_set_print_fmt(user, NULL, 0);
1370 print_fmt = kmalloc(len, GFP_KERNEL_ACCOUNT);
1375 user_event_set_print_fmt(user, print_fmt, len);
1377 user->call.print_fmt = print_fmt;
1382 static enum print_line_t user_event_print_trace(struct trace_iterator *iter,
1384 struct trace_event *event)
1386 return print_event_fields(iter, event);
1389 static struct trace_event_functions user_event_funcs = {
1390 .trace = user_event_print_trace,
1393 static int user_event_set_call_visible(struct user_event *user, bool visible)
1396 const struct cred *old_cred;
1399 cred = prepare_creds();
1405 * While by default tracefs is locked down, systems can be configured
1406 * to allow user_event files to be less locked down. The extreme case
1407 * being "other" has read/write access to user_events_data/status.
1409 * When not locked down, processes may not have permissions to
1410 * add/remove calls themselves to tracefs. We need to temporarily
1411 * switch to root file permission to allow for this scenario.
1413 cred->fsuid = GLOBAL_ROOT_UID;
1415 old_cred = override_creds(cred);
1418 ret = trace_add_event_call(&user->call);
1420 ret = trace_remove_event_call(&user->call);
1422 revert_creds(old_cred);
1428 static int destroy_user_event(struct user_event *user)
1432 lockdep_assert_held(&event_mutex);
1434 /* Must destroy fields before call removal */
1435 user_event_destroy_fields(user);
1437 ret = user_event_set_call_visible(user, false);
1442 dyn_event_remove(&user->devent);
1443 hash_del(&user->node);
1445 user_event_destroy_validators(user);
1446 kfree(user->call.print_fmt);
1447 kfree(EVENT_NAME(user));
1450 if (current_user_events > 0)
1451 current_user_events--;
1453 pr_alert("BUG: Bad current_user_events\n");
1458 static struct user_event *find_user_event(struct user_event_group *group,
1459 char *name, u32 *outkey)
1461 struct user_event *user;
1462 u32 key = user_event_key(name);
1466 hash_for_each_possible(group->register_table, user, node, key)
1467 if (!strcmp(EVENT_NAME(user), name))
1468 return user_event_get(user);
1473 static int user_event_validate(struct user_event *user, void *data, int len)
1475 struct list_head *head = &user->validators;
1476 struct user_event_validator *validator;
1477 void *pos, *end = data + len;
1478 u32 loc, offset, size;
1480 list_for_each_entry(validator, head, user_event_link) {
1481 pos = data + validator->offset;
1483 /* Already done min_size check, no bounds check here */
1485 offset = loc & 0xffff;
1488 if (likely(validator->flags & VALIDATOR_REL))
1489 pos += offset + sizeof(loc);
1491 pos = data + offset;
1495 if (unlikely(pos > end))
1498 if (likely(validator->flags & VALIDATOR_ENSURE_NULL))
1499 if (unlikely(*(char *)(pos - 1) != '\0'))
1507 * Writes the user supplied payload out to a trace file.
1509 static void user_event_ftrace(struct user_event *user, struct iov_iter *i,
1510 void *tpdata, bool *faulted)
1512 struct trace_event_file *file;
1513 struct trace_entry *entry;
1514 struct trace_event_buffer event_buffer;
1515 size_t size = sizeof(*entry) + i->count;
1517 file = (struct trace_event_file *)tpdata;
1520 !(file->flags & EVENT_FILE_FL_ENABLED) ||
1521 trace_trigger_soft_disabled(file))
1524 /* Allocates and fills trace_entry, + 1 of this is data payload */
1525 entry = trace_event_buffer_reserve(&event_buffer, file, size);
1527 if (unlikely(!entry))
1530 if (unlikely(i->count != 0 && !copy_nofault(entry + 1, i->count, i)))
1533 if (!list_empty(&user->validators) &&
1534 unlikely(user_event_validate(user, entry, size)))
1537 trace_event_buffer_commit(&event_buffer);
1542 __trace_event_discard_commit(event_buffer.buffer,
1543 event_buffer.event);
1546 #ifdef CONFIG_PERF_EVENTS
1548 * Writes the user supplied payload out to perf ring buffer.
1550 static void user_event_perf(struct user_event *user, struct iov_iter *i,
1551 void *tpdata, bool *faulted)
1553 struct hlist_head *perf_head;
1555 perf_head = this_cpu_ptr(user->call.perf_events);
1557 if (perf_head && !hlist_empty(perf_head)) {
1558 struct trace_entry *perf_entry;
1559 struct pt_regs *regs;
1560 size_t size = sizeof(*perf_entry) + i->count;
1563 perf_entry = perf_trace_buf_alloc(ALIGN(size, 8),
1566 if (unlikely(!perf_entry))
1569 perf_fetch_caller_regs(regs);
1571 if (unlikely(i->count != 0 && !copy_nofault(perf_entry + 1, i->count, i)))
1574 if (!list_empty(&user->validators) &&
1575 unlikely(user_event_validate(user, perf_entry, size)))
1578 perf_trace_buf_submit(perf_entry, size, context,
1579 user->call.event.type, 1, regs,
1585 perf_swevent_put_recursion_context(context);
1591 * Update the enabled bit among all user processes.
1593 static void update_enable_bit_for(struct user_event *user)
1595 struct tracepoint *tp = &user->tracepoint;
1598 if (atomic_read(&tp->key.enabled) > 0) {
1599 struct tracepoint_func *probe_func_ptr;
1600 user_event_func_t probe_func;
1602 rcu_read_lock_sched();
1604 probe_func_ptr = rcu_dereference_sched(tp->funcs);
1606 if (probe_func_ptr) {
1608 probe_func = probe_func_ptr->func;
1610 if (probe_func == user_event_ftrace)
1611 status |= EVENT_STATUS_FTRACE;
1612 #ifdef CONFIG_PERF_EVENTS
1613 else if (probe_func == user_event_perf)
1614 status |= EVENT_STATUS_PERF;
1617 status |= EVENT_STATUS_OTHER;
1618 } while ((++probe_func_ptr)->func);
1621 rcu_read_unlock_sched();
1624 user->status = status;
1626 user_event_enabler_update(user);
1630 * Register callback for our events from tracing sub-systems.
1632 static int user_event_reg(struct trace_event_call *call,
1633 enum trace_reg type,
1636 struct user_event *user = (struct user_event *)call->data;
1643 case TRACE_REG_REGISTER:
1644 ret = tracepoint_probe_register(call->tp,
1651 case TRACE_REG_UNREGISTER:
1652 tracepoint_probe_unregister(call->tp,
1657 #ifdef CONFIG_PERF_EVENTS
1658 case TRACE_REG_PERF_REGISTER:
1659 ret = tracepoint_probe_register(call->tp,
1660 call->class->perf_probe,
1666 case TRACE_REG_PERF_UNREGISTER:
1667 tracepoint_probe_unregister(call->tp,
1668 call->class->perf_probe,
1672 case TRACE_REG_PERF_OPEN:
1673 case TRACE_REG_PERF_CLOSE:
1674 case TRACE_REG_PERF_ADD:
1675 case TRACE_REG_PERF_DEL:
1682 user_event_get(user);
1683 update_enable_bit_for(user);
1686 update_enable_bit_for(user);
1687 user_event_put(user, true);
1691 static int user_event_create(const char *raw_command)
1693 struct user_event_group *group;
1694 struct user_event *user;
1698 if (!str_has_prefix(raw_command, USER_EVENTS_PREFIX))
1701 raw_command += USER_EVENTS_PREFIX_LEN;
1702 raw_command = skip_spaces(raw_command);
1704 name = kstrdup(raw_command, GFP_KERNEL_ACCOUNT);
1709 group = current_user_event_group();
1716 mutex_lock(&group->reg_mutex);
1718 /* Dyn events persist, otherwise they would cleanup immediately */
1719 ret = user_event_parse_cmd(group, name, &user, USER_EVENT_REG_PERSIST);
1722 user_event_put(user, false);
1724 mutex_unlock(&group->reg_mutex);
1732 static int user_event_show(struct seq_file *m, struct dyn_event *ev)
1734 struct user_event *user = container_of(ev, struct user_event, devent);
1735 struct ftrace_event_field *field;
1736 struct list_head *head;
1739 seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user));
1741 head = trace_get_fields(&user->call);
1743 list_for_each_entry_reverse(field, head, link) {
1749 seq_printf(m, "%s %s", field->type, field->name);
1751 if (str_has_prefix(field->type, "struct "))
1752 seq_printf(m, " %d", field->size);
1762 static bool user_event_is_busy(struct dyn_event *ev)
1764 struct user_event *user = container_of(ev, struct user_event, devent);
1766 return !user_event_last_ref(user);
1769 static int user_event_free(struct dyn_event *ev)
1771 struct user_event *user = container_of(ev, struct user_event, devent);
1773 if (!user_event_last_ref(user))
1776 return destroy_user_event(user);
1779 static bool user_field_match(struct ftrace_event_field *field, int argc,
1780 const char **argv, int *iout)
1782 char *field_name = NULL, *dyn_field_name = NULL;
1783 bool colon = false, match = false;
1789 dyn_len = user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1792 len = user_field_set_string(field, field_name, 0, colon);
1797 dyn_field_name = kmalloc(dyn_len, GFP_KERNEL);
1798 field_name = kmalloc(len, GFP_KERNEL);
1800 if (!dyn_field_name || !field_name)
1803 user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1806 user_field_set_string(field, field_name, len, colon);
1808 match = strcmp(dyn_field_name, field_name) == 0;
1810 kfree(dyn_field_name);
1816 static bool user_fields_match(struct user_event *user, int argc,
1819 struct ftrace_event_field *field;
1820 struct list_head *head = &user->fields;
1823 list_for_each_entry_reverse(field, head, link) {
1824 if (!user_field_match(field, argc, argv, &i))
1834 static bool user_event_match(const char *system, const char *event,
1835 int argc, const char **argv, struct dyn_event *ev)
1837 struct user_event *user = container_of(ev, struct user_event, devent);
1840 match = strcmp(EVENT_NAME(user), event) == 0 &&
1841 (!system || strcmp(system, USER_EVENTS_SYSTEM) == 0);
1843 if (match && argc > 0)
1844 match = user_fields_match(user, argc, argv);
1845 else if (match && argc == 0)
1846 match = list_empty(&user->fields);
1851 static struct dyn_event_operations user_event_dops = {
1852 .create = user_event_create,
1853 .show = user_event_show,
1854 .is_busy = user_event_is_busy,
1855 .free = user_event_free,
1856 .match = user_event_match,
1859 static int user_event_trace_register(struct user_event *user)
1863 ret = register_trace_event(&user->call.event);
1868 ret = user_event_set_call_visible(user, true);
1871 unregister_trace_event(&user->call.event);
1877 * Parses the event name, arguments and flags then registers if successful.
1878 * The name buffer lifetime is owned by this method for success cases only.
1879 * Upon success the returned user_event has its ref count increased by 1.
1881 static int user_event_parse(struct user_event_group *group, char *name,
1882 char *args, char *flags,
1883 struct user_event **newuser, int reg_flags)
1887 struct user_event *user;
1891 /* User register flags are not ready yet */
1892 if (reg_flags != 0 || flags != NULL)
1895 /* Prevent dyn_event from racing */
1896 mutex_lock(&event_mutex);
1897 user = find_user_event(group, name, &key);
1898 mutex_unlock(&event_mutex);
1902 argv = argv_split(GFP_KERNEL, args, &argc);
1908 ret = user_fields_match(user, argc, (const char **)argv);
1912 ret = list_empty(&user->fields);
1917 * Name is allocated by caller, free it since it already exists.
1918 * Caller only worries about failure cases for freeing.
1928 user_event_put(user, false);
1932 user = kzalloc(sizeof(*user), GFP_KERNEL_ACCOUNT);
1937 INIT_LIST_HEAD(&user->class.fields);
1938 INIT_LIST_HEAD(&user->fields);
1939 INIT_LIST_HEAD(&user->validators);
1941 user->group = group;
1942 user->tracepoint.name = name;
1944 ret = user_event_parse_fields(user, args);
1949 ret = user_event_create_print_fmt(user);
1954 user->call.data = user;
1955 user->call.class = &user->class;
1956 user->call.name = name;
1957 user->call.flags = TRACE_EVENT_FL_TRACEPOINT;
1958 user->call.tp = &user->tracepoint;
1959 user->call.event.funcs = &user_event_funcs;
1960 user->class.system = group->system_name;
1962 user->class.fields_array = user_event_fields_array;
1963 user->class.get_fields = user_event_get_fields;
1964 user->class.reg = user_event_reg;
1965 user->class.probe = user_event_ftrace;
1966 #ifdef CONFIG_PERF_EVENTS
1967 user->class.perf_probe = user_event_perf;
1970 mutex_lock(&event_mutex);
1972 if (current_user_events >= max_user_events) {
1977 ret = user_event_trace_register(user);
1982 user->reg_flags = reg_flags;
1984 if (user->reg_flags & USER_EVENT_REG_PERSIST) {
1985 /* Ensure we track self ref and caller ref (2) */
1986 refcount_set(&user->refcnt, 2);
1988 /* Ensure we track only caller ref (1) */
1989 refcount_set(&user->refcnt, 1);
1992 dyn_event_init(&user->devent, &user_event_dops);
1993 dyn_event_add(&user->devent, &user->call);
1994 hash_add(group->register_table, &user->node, key);
1995 current_user_events++;
1997 mutex_unlock(&event_mutex);
2002 mutex_unlock(&event_mutex);
2004 user_event_destroy_fields(user);
2005 user_event_destroy_validators(user);
2006 kfree(user->call.print_fmt);
2012 * Deletes a previously created event if it is no longer being used.
2014 static int delete_user_event(struct user_event_group *group, char *name)
2017 struct user_event *user = find_user_event(group, name, &key);
2022 user_event_put(user, true);
2024 if (!user_event_last_ref(user))
2027 return destroy_user_event(user);
2031 * Validates the user payload and writes via iterator.
2033 static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
2035 struct user_event_file_info *info = file->private_data;
2036 struct user_event_refs *refs;
2037 struct user_event *user = NULL;
2038 struct tracepoint *tp;
2039 ssize_t ret = i->count;
2042 if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx)))
2048 rcu_read_lock_sched();
2050 refs = rcu_dereference_sched(info->refs);
2053 * The refs->events array is protected by RCU, and new items may be
2054 * added. But the user retrieved from indexing into the events array
2055 * shall be immutable while the file is opened.
2057 if (likely(refs && idx < refs->count))
2058 user = refs->events[idx];
2060 rcu_read_unlock_sched();
2062 if (unlikely(user == NULL))
2065 if (unlikely(i->count < user->min_size))
2068 tp = &user->tracepoint;
2071 * It's possible key.enabled disables after this check, however
2072 * we don't mind if a few events are included in this condition.
2074 if (likely(atomic_read(&tp->key.enabled) > 0)) {
2075 struct tracepoint_func *probe_func_ptr;
2076 user_event_func_t probe_func;
2077 struct iov_iter copy;
2081 if (unlikely(fault_in_iov_iter_readable(i, i->count)))
2086 rcu_read_lock_sched();
2088 probe_func_ptr = rcu_dereference_sched(tp->funcs);
2090 if (probe_func_ptr) {
2093 probe_func = probe_func_ptr->func;
2094 tpdata = probe_func_ptr->data;
2095 probe_func(user, ©, tpdata, &faulted);
2096 } while ((++probe_func_ptr)->func);
2099 rcu_read_unlock_sched();
2101 if (unlikely(faulted))
2109 static int user_events_open(struct inode *node, struct file *file)
2111 struct user_event_group *group;
2112 struct user_event_file_info *info;
2114 group = current_user_event_group();
2119 info = kzalloc(sizeof(*info), GFP_KERNEL_ACCOUNT);
2124 info->group = group;
2126 file->private_data = info;
2131 static ssize_t user_events_write(struct file *file, const char __user *ubuf,
2132 size_t count, loff_t *ppos)
2137 if (unlikely(*ppos != 0))
2140 if (unlikely(import_single_range(ITER_SOURCE, (char __user *)ubuf,
2144 return user_events_write_core(file, &i);
2147 static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i)
2149 return user_events_write_core(kp->ki_filp, i);
2152 static int user_events_ref_add(struct user_event_file_info *info,
2153 struct user_event *user)
2155 struct user_event_group *group = info->group;
2156 struct user_event_refs *refs, *new_refs;
2157 int i, size, count = 0;
2159 refs = rcu_dereference_protected(info->refs,
2160 lockdep_is_held(&group->reg_mutex));
2163 count = refs->count;
2165 for (i = 0; i < count; ++i)
2166 if (refs->events[i] == user)
2170 size = struct_size(refs, events, count + 1);
2172 new_refs = kzalloc(size, GFP_KERNEL_ACCOUNT);
2177 new_refs->count = count + 1;
2179 for (i = 0; i < count; ++i)
2180 new_refs->events[i] = refs->events[i];
2182 new_refs->events[i] = user_event_get(user);
2184 rcu_assign_pointer(info->refs, new_refs);
2187 kfree_rcu(refs, rcu);
2192 static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg)
2197 ret = get_user(size, &ureg->size);
2202 if (size > PAGE_SIZE)
2205 if (size < offsetofend(struct user_reg, write_index))
2208 ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
2213 /* Ensure only valid flags */
2214 if (kreg->flags & ~(USER_EVENT_REG_MAX-1))
2217 /* Ensure supported size */
2218 switch (kreg->enable_size) {
2222 #if BITS_PER_LONG >= 64
2231 /* Ensure natural alignment */
2232 if (kreg->enable_addr % kreg->enable_size)
2235 /* Ensure bit range for size */
2236 if (kreg->enable_bit > (kreg->enable_size * BITS_PER_BYTE) - 1)
2239 /* Ensure accessible */
2240 if (!access_ok((const void __user *)(uintptr_t)kreg->enable_addr,
2250 * Registers a user_event on behalf of a user process.
2252 static long user_events_ioctl_reg(struct user_event_file_info *info,
2255 struct user_reg __user *ureg = (struct user_reg __user *)uarg;
2256 struct user_reg reg;
2257 struct user_event *user;
2258 struct user_event_enabler *enabler;
2263 ret = user_reg_get(ureg, ®);
2269 * Prevent users from using the same address and bit multiple times
2270 * within the same mm address space. This can cause unexpected behavior
2271 * for user processes that is far easier to debug if this is explictly
2272 * an error upon registering.
2274 if (current_user_event_enabler_exists((unsigned long)reg.enable_addr,
2278 name = strndup_user((const char __user *)(uintptr_t)reg.name_args,
2282 ret = PTR_ERR(name);
2286 ret = user_event_parse_cmd(info->group, name, &user, reg.flags);
2293 ret = user_events_ref_add(info, user);
2295 /* No longer need parse ref, ref_add either worked or not */
2296 user_event_put(user, false);
2298 /* Positive number is index and valid */
2303 * user_events_ref_add succeeded:
2304 * At this point we have a user_event, it's lifetime is bound by the
2305 * reference count, not this file. If anything fails, the user_event
2306 * still has a reference until the file is released. During release
2307 * any remaining references (from user_events_ref_add) are decremented.
2309 * Attempt to create an enabler, which too has a lifetime tied in the
2310 * same way for the event. Once the task that caused the enabler to be
2311 * created exits or issues exec() then the enablers it has created
2312 * will be destroyed and the ref to the event will be decremented.
2314 enabler = user_event_enabler_create(®, user, &write_result);
2319 /* Write failed/faulted, give error back to caller */
2321 return write_result;
2323 put_user((u32)ret, &ureg->write_index);
2329 * Deletes a user_event on behalf of a user process.
2331 static long user_events_ioctl_del(struct user_event_file_info *info,
2334 void __user *ubuf = (void __user *)uarg;
2338 name = strndup_user(ubuf, MAX_EVENT_DESC);
2341 return PTR_ERR(name);
2343 /* event_mutex prevents dyn_event from racing */
2344 mutex_lock(&event_mutex);
2345 ret = delete_user_event(info->group, name);
2346 mutex_unlock(&event_mutex);
2353 static long user_unreg_get(struct user_unreg __user *ureg,
2354 struct user_unreg *kreg)
2359 ret = get_user(size, &ureg->size);
2364 if (size > PAGE_SIZE)
2367 if (size < offsetofend(struct user_unreg, disable_addr))
2370 ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
2372 /* Ensure no reserved values, since we don't support any yet */
2373 if (kreg->__reserved || kreg->__reserved2)
2379 static int user_event_mm_clear_bit(struct user_event_mm *user_mm,
2380 unsigned long uaddr, unsigned char bit)
2382 struct user_event_enabler enabler;
2386 memset(&enabler, 0, sizeof(enabler));
2387 enabler.addr = uaddr;
2388 enabler.values = bit;
2390 /* Prevents state changes from racing with new enablers */
2391 mutex_lock(&event_mutex);
2393 /* Force the bit to be cleared, since no event is attached */
2394 mmap_read_lock(user_mm->mm);
2395 result = user_event_enabler_write(user_mm, &enabler, false, &attempt);
2396 mmap_read_unlock(user_mm->mm);
2398 mutex_unlock(&event_mutex);
2401 /* Attempt to fault-in and retry if it worked */
2402 if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
2410 * Unregisters an enablement address/bit within a task/user mm.
2412 static long user_events_ioctl_unreg(unsigned long uarg)
2414 struct user_unreg __user *ureg = (struct user_unreg __user *)uarg;
2415 struct user_event_mm *mm = current->user_event_mm;
2416 struct user_event_enabler *enabler, *next;
2417 struct user_unreg reg;
2420 ret = user_unreg_get(ureg, ®);
2431 * Flags freeing and faulting are used to indicate if the enabler is in
2432 * use at all. When faulting is set a page-fault is occurring asyncly.
2433 * During async fault if freeing is set, the enabler will be destroyed.
2434 * If no async fault is happening, we can destroy it now since we hold
2435 * the event_mutex during these checks.
2437 mutex_lock(&event_mutex);
2439 list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) {
2440 if (enabler->addr == reg.disable_addr &&
2441 ENABLE_BIT(enabler) == reg.disable_bit) {
2442 set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler));
2444 if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)))
2445 user_event_enabler_destroy(enabler, true);
2447 /* Removed at least one */
2452 mutex_unlock(&event_mutex);
2454 /* Ensure bit is now cleared for user, regardless of event status */
2456 ret = user_event_mm_clear_bit(mm, reg.disable_addr,
2463 * Handles the ioctl from user mode to register or alter operations.
2465 static long user_events_ioctl(struct file *file, unsigned int cmd,
2468 struct user_event_file_info *info = file->private_data;
2469 struct user_event_group *group = info->group;
2474 mutex_lock(&group->reg_mutex);
2475 ret = user_events_ioctl_reg(info, uarg);
2476 mutex_unlock(&group->reg_mutex);
2480 mutex_lock(&group->reg_mutex);
2481 ret = user_events_ioctl_del(info, uarg);
2482 mutex_unlock(&group->reg_mutex);
2485 case DIAG_IOCSUNREG:
2486 mutex_lock(&group->reg_mutex);
2487 ret = user_events_ioctl_unreg(uarg);
2488 mutex_unlock(&group->reg_mutex);
2496 * Handles the final close of the file from user mode.
2498 static int user_events_release(struct inode *node, struct file *file)
2500 struct user_event_file_info *info = file->private_data;
2501 struct user_event_group *group;
2502 struct user_event_refs *refs;
2508 group = info->group;
2511 * Ensure refs cannot change under any situation by taking the
2512 * register mutex during the final freeing of the references.
2514 mutex_lock(&group->reg_mutex);
2522 * The lifetime of refs has reached an end, it's tied to this file.
2523 * The underlying user_events are ref counted, and cannot be freed.
2524 * After this decrement, the user_events may be freed elsewhere.
2526 for (i = 0; i < refs->count; ++i)
2527 user_event_put(refs->events[i], false);
2530 file->private_data = NULL;
2532 mutex_unlock(&group->reg_mutex);
2540 static const struct file_operations user_data_fops = {
2541 .open = user_events_open,
2542 .write = user_events_write,
2543 .write_iter = user_events_write_iter,
2544 .unlocked_ioctl = user_events_ioctl,
2545 .release = user_events_release,
2548 static void *user_seq_start(struct seq_file *m, loff_t *pos)
2556 static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos)
2562 static void user_seq_stop(struct seq_file *m, void *p)
2566 static int user_seq_show(struct seq_file *m, void *p)
2568 struct user_event_group *group = m->private;
2569 struct user_event *user;
2571 int i, active = 0, busy = 0;
2576 mutex_lock(&group->reg_mutex);
2578 hash_for_each(group->register_table, i, user, node) {
2579 status = user->status;
2581 seq_printf(m, "%s", EVENT_NAME(user));
2587 seq_puts(m, " Used by");
2588 if (status & EVENT_STATUS_FTRACE)
2589 seq_puts(m, " ftrace");
2590 if (status & EVENT_STATUS_PERF)
2591 seq_puts(m, " perf");
2592 if (status & EVENT_STATUS_OTHER)
2593 seq_puts(m, " other");
2601 mutex_unlock(&group->reg_mutex);
2604 seq_printf(m, "Active: %d\n", active);
2605 seq_printf(m, "Busy: %d\n", busy);
2610 static const struct seq_operations user_seq_ops = {
2611 .start = user_seq_start,
2612 .next = user_seq_next,
2613 .stop = user_seq_stop,
2614 .show = user_seq_show,
2617 static int user_status_open(struct inode *node, struct file *file)
2619 struct user_event_group *group;
2622 group = current_user_event_group();
2627 ret = seq_open(file, &user_seq_ops);
2630 /* Chain group to seq_file */
2631 struct seq_file *m = file->private_data;
2639 static const struct file_operations user_status_fops = {
2640 .open = user_status_open,
2642 .llseek = seq_lseek,
2643 .release = seq_release,
2647 * Creates a set of tracefs files to allow user mode interactions.
2649 static int create_user_tracefs(void)
2651 struct dentry *edata, *emmap;
2653 edata = tracefs_create_file("user_events_data", TRACE_MODE_WRITE,
2654 NULL, NULL, &user_data_fops);
2657 pr_warn("Could not create tracefs 'user_events_data' entry\n");
2661 emmap = tracefs_create_file("user_events_status", TRACE_MODE_READ,
2662 NULL, NULL, &user_status_fops);
2665 tracefs_remove(edata);
2666 pr_warn("Could not create tracefs 'user_events_mmap' entry\n");
2675 static int set_max_user_events_sysctl(struct ctl_table *table, int write,
2676 void *buffer, size_t *lenp, loff_t *ppos)
2680 mutex_lock(&event_mutex);
2682 ret = proc_douintvec(table, write, buffer, lenp, ppos);
2684 mutex_unlock(&event_mutex);
2689 static struct ctl_table user_event_sysctls[] = {
2691 .procname = "user_events_max",
2692 .data = &max_user_events,
2693 .maxlen = sizeof(unsigned int),
2695 .proc_handler = set_max_user_events_sysctl,
2700 static int __init trace_events_user_init(void)
2704 fault_cache = KMEM_CACHE(user_event_enabler_fault, 0);
2709 init_group = user_event_group_create();
2712 kmem_cache_destroy(fault_cache);
2716 ret = create_user_tracefs();
2719 pr_warn("user_events could not register with tracefs\n");
2720 user_event_group_destroy(init_group);
2721 kmem_cache_destroy(fault_cache);
2726 if (dyn_event_register(&user_event_dops))
2727 pr_warn("user_events could not register with dyn_events\n");
2729 register_sysctl_init("kernel", user_event_sysctls);
2734 fs_initcall(trace_events_user_init);