Merge tag 'v6.3-p2' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[linux-block.git] / kernel / trace / trace_events_user.c
CommitLineData
7f5a08c7
BB
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2021, Microsoft Corporation.
4 *
5 * Authors:
6 * Beau Belgrave <beaub@linux.microsoft.com>
7 */
8
9#include <linux/bitmap.h>
10#include <linux/cdev.h>
11#include <linux/hashtable.h>
12#include <linux/list.h>
13#include <linux/io.h>
14#include <linux/uio.h>
15#include <linux/ioctl.h>
16#include <linux/jhash.h>
d401b724 17#include <linux/refcount.h>
7f5a08c7
BB
18#include <linux/trace_events.h>
19#include <linux/tracefs.h>
20#include <linux/types.h>
21#include <linux/uaccess.h>
5cfff569
SRG
22/* Reminder to move to uapi when everything works */
23#ifdef CONFIG_COMPILE_TEST
24#include <linux/user_events.h>
25#else
7f5a08c7 26#include <uapi/linux/user_events.h>
5cfff569 27#endif
7f5a08c7
BB
28#include "trace.h"
29#include "trace_dynevent.h"
30
31#define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1)
32
33#define FIELD_DEPTH_TYPE 0
34#define FIELD_DEPTH_NAME 1
35#define FIELD_DEPTH_SIZE 2
36
37/*
38 * Limits how many trace_event calls user processes can create:
bc47ee48 39 * Must be a power of two of PAGE_SIZE.
7f5a08c7 40 */
bc47ee48
SRG
41#define MAX_PAGE_ORDER 0
42#define MAX_PAGES (1 << MAX_PAGE_ORDER)
39d6d08b
BB
43#define MAX_BYTES (MAX_PAGES * PAGE_SIZE)
44#define MAX_EVENTS (MAX_BYTES * 8)
7f5a08c7
BB
45
46/* Limit how long of an event name plus args within the subsystem. */
47#define MAX_EVENT_DESC 512
48#define EVENT_NAME(user_event) ((user_event)->tracepoint.name)
49#define MAX_FIELD_ARRAY_SIZE 1024
50
39d6d08b
BB
51/*
52 * The MAP_STATUS_* macros are used for taking a index and determining the
53 * appropriate byte and the bit in the byte to set/reset for an event.
54 *
55 * The lower 3 bits of the index decide which bit to set.
56 * The remaining upper bits of the index decide which byte to use for the bit.
57 *
58 * This is used when an event has a probe attached/removed to reflect live
59 * status of the event wanting tracing or not to user-programs via shared
60 * memory maps.
61 */
62#define MAP_STATUS_BYTE(index) ((index) >> 3)
63#define MAP_STATUS_MASK(index) BIT((index) & 7)
64
65/*
66 * Internal bits (kernel side only) to keep track of connected probes:
67 * These are used when status is requested in text form about an event. These
68 * bits are compared against an internal byte on the event to determine which
69 * probes to print out to the user.
70 *
71 * These do not reflect the mapped bytes between the user and kernel space.
72 */
73#define EVENT_STATUS_FTRACE BIT(0)
74#define EVENT_STATUS_PERF BIT(1)
75#define EVENT_STATUS_OTHER BIT(7)
76
e5d27181
BB
77/*
78 * Stores the pages, tables, and locks for a group of events.
79 * Each logical grouping of events has its own group, with a
80 * matching page for status checks within user programs. This
81 * allows for isolation of events to user programs by various
82 * means.
83 */
84struct user_event_group {
85 struct page *pages;
86 char *register_page_data;
87 char *system_name;
88 struct hlist_node node;
89 struct mutex reg_mutex;
90 DECLARE_HASHTABLE(register_table, 8);
91 DECLARE_BITMAP(page_bitmap, MAX_EVENTS);
92};
7f5a08c7 93
e5d27181
BB
94/* Group for init_user_ns mapping, top-most group */
95static struct user_event_group *init_group;
7f5a08c7
BB
96
97/*
98 * Stores per-event properties, as users register events
99 * within a file a user_event might be created if it does not
100 * already exist. These are globally used and their lifetime
101 * is tied to the refcnt member. These cannot go away until the
d401b724 102 * refcnt reaches one.
7f5a08c7
BB
103 */
104struct user_event {
e5d27181 105 struct user_event_group *group;
7f5a08c7
BB
106 struct tracepoint tracepoint;
107 struct trace_event_call call;
108 struct trace_event_class class;
109 struct dyn_event devent;
110 struct hlist_node node;
111 struct list_head fields;
2467cda1 112 struct list_head validators;
d401b724 113 refcount_t refcnt;
7f5a08c7
BB
114 int index;
115 int flags;
2467cda1 116 int min_size;
39d6d08b 117 char status;
7f5a08c7
BB
118};
119
120/*
121 * Stores per-file events references, as users register events
122 * within a file this structure is modified and freed via RCU.
123 * The lifetime of this struct is tied to the lifetime of the file.
124 * These are not shared and only accessible by the file that created it.
125 */
126struct user_event_refs {
127 struct rcu_head rcu;
128 int count;
129 struct user_event *events[];
130};
131
e5d27181
BB
132struct user_event_file_info {
133 struct user_event_group *group;
134 struct user_event_refs *refs;
135};
136
2467cda1
BB
137#define VALIDATOR_ENSURE_NULL (1 << 0)
138#define VALIDATOR_REL (1 << 1)
139
140struct user_event_validator {
141 struct list_head link;
142 int offset;
143 int flags;
144};
145
0279400a 146typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
2467cda1 147 void *tpdata, bool *faulted);
7f5a08c7 148
e5d27181
BB
149static int user_event_parse(struct user_event_group *group, char *name,
150 char *args, char *flags,
7f5a08c7
BB
151 struct user_event **newuser);
152
153static u32 user_event_key(char *name)
154{
155 return jhash(name, strlen(name), 0);
156}
157
e5d27181
BB
158static void set_page_reservations(char *pages, bool set)
159{
160 int page;
161
162 for (page = 0; page < MAX_PAGES; ++page) {
163 void *addr = pages + (PAGE_SIZE * page);
164
165 if (set)
166 SetPageReserved(virt_to_page(addr));
167 else
168 ClearPageReserved(virt_to_page(addr));
169 }
170}
171
172static void user_event_group_destroy(struct user_event_group *group)
173{
174 if (group->register_page_data)
175 set_page_reservations(group->register_page_data, false);
176
177 if (group->pages)
178 __free_pages(group->pages, MAX_PAGE_ORDER);
179
180 kfree(group->system_name);
181 kfree(group);
182}
183
184static char *user_event_group_system_name(struct user_namespace *user_ns)
185{
186 char *system_name;
187 int len = sizeof(USER_EVENTS_SYSTEM) + 1;
188
189 if (user_ns != &init_user_ns) {
190 /*
191 * Unexpected at this point:
192 * We only currently support init_user_ns.
193 * When we enable more, this will trigger a failure so log.
194 */
195 pr_warn("user_events: Namespace other than init_user_ns!\n");
196 return NULL;
197 }
198
199 system_name = kmalloc(len, GFP_KERNEL);
200
201 if (!system_name)
202 return NULL;
203
204 snprintf(system_name, len, "%s", USER_EVENTS_SYSTEM);
205
206 return system_name;
207}
208
209static inline struct user_event_group
210*user_event_group_from_user_ns(struct user_namespace *user_ns)
211{
212 if (user_ns == &init_user_ns)
213 return init_group;
214
215 return NULL;
216}
217
218static struct user_event_group *current_user_event_group(void)
219{
220 struct user_namespace *user_ns = current_user_ns();
221 struct user_event_group *group = NULL;
222
223 while (user_ns) {
224 group = user_event_group_from_user_ns(user_ns);
225
226 if (group)
227 break;
228
229 user_ns = user_ns->parent;
230 }
231
232 return group;
233}
234
235static struct user_event_group
236*user_event_group_create(struct user_namespace *user_ns)
237{
238 struct user_event_group *group;
239
240 group = kzalloc(sizeof(*group), GFP_KERNEL);
241
242 if (!group)
243 return NULL;
244
245 group->system_name = user_event_group_system_name(user_ns);
246
247 if (!group->system_name)
248 goto error;
249
250 group->pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, MAX_PAGE_ORDER);
251
252 if (!group->pages)
253 goto error;
254
255 group->register_page_data = page_address(group->pages);
256
257 set_page_reservations(group->register_page_data, true);
258
259 /* Zero all bits beside 0 (which is reserved for failures) */
260 bitmap_zero(group->page_bitmap, MAX_EVENTS);
261 set_bit(0, group->page_bitmap);
262
263 mutex_init(&group->reg_mutex);
264 hash_init(group->register_table);
265
266 return group;
267error:
268 if (group)
269 user_event_group_destroy(group);
270
271 return NULL;
272};
273
39d6d08b
BB
274static __always_inline
275void user_event_register_set(struct user_event *user)
276{
277 int i = user->index;
278
e5d27181 279 user->group->register_page_data[MAP_STATUS_BYTE(i)] |= MAP_STATUS_MASK(i);
39d6d08b
BB
280}
281
282static __always_inline
283void user_event_register_clear(struct user_event *user)
284{
285 int i = user->index;
286
e5d27181 287 user->group->register_page_data[MAP_STATUS_BYTE(i)] &= ~MAP_STATUS_MASK(i);
39d6d08b
BB
288}
289
d401b724
BB
290static __always_inline __must_check
291bool user_event_last_ref(struct user_event *user)
292{
293 return refcount_read(&user->refcnt) == 1;
294}
295
0279400a
BB
296static __always_inline __must_check
297size_t copy_nofault(void *addr, size_t bytes, struct iov_iter *i)
298{
299 size_t ret;
300
301 pagefault_disable();
302
303 ret = copy_from_iter_nocache(addr, bytes, i);
304
305 pagefault_enable();
306
307 return ret;
308}
309
7f5a08c7
BB
310static struct list_head *user_event_get_fields(struct trace_event_call *call)
311{
312 struct user_event *user = (struct user_event *)call->data;
313
314 return &user->fields;
315}
316
317/*
318 * Parses a register command for user_events
319 * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]]
320 *
321 * Example event named 'test' with a 20 char 'msg' field with an unsigned int
322 * 'id' field after:
323 * test char[20] msg;unsigned int id
324 *
325 * NOTE: Offsets are from the user data perspective, they are not from the
326 * trace_entry/buffer perspective. We automatically add the common properties
327 * sizes to the offset for the user.
7e348b32
BB
328 *
329 * Upon success user_event has its ref count increased by 1.
7f5a08c7 330 */
e5d27181
BB
331static int user_event_parse_cmd(struct user_event_group *group,
332 char *raw_command, struct user_event **newuser)
7f5a08c7
BB
333{
334 char *name = raw_command;
335 char *args = strpbrk(name, " ");
336 char *flags;
337
338 if (args)
339 *args++ = '\0';
340
341 flags = strpbrk(name, ":");
342
343 if (flags)
344 *flags++ = '\0';
345
e5d27181 346 return user_event_parse(group, name, args, flags, newuser);
7f5a08c7
BB
347}
348
349static int user_field_array_size(const char *type)
350{
351 const char *start = strchr(type, '[');
352 char val[8];
353 char *bracket;
354 int size = 0;
355
356 if (start == NULL)
357 return -EINVAL;
358
359 if (strscpy(val, start + 1, sizeof(val)) <= 0)
360 return -EINVAL;
361
362 bracket = strchr(val, ']');
363
364 if (!bracket)
365 return -EINVAL;
366
367 *bracket = '\0';
368
369 if (kstrtouint(val, 0, &size))
370 return -EINVAL;
371
372 if (size > MAX_FIELD_ARRAY_SIZE)
373 return -EINVAL;
374
375 return size;
376}
377
378static int user_field_size(const char *type)
379{
380 /* long is not allowed from a user, since it's ambigious in size */
381 if (strcmp(type, "s64") == 0)
382 return sizeof(s64);
383 if (strcmp(type, "u64") == 0)
384 return sizeof(u64);
385 if (strcmp(type, "s32") == 0)
386 return sizeof(s32);
387 if (strcmp(type, "u32") == 0)
388 return sizeof(u32);
389 if (strcmp(type, "int") == 0)
390 return sizeof(int);
391 if (strcmp(type, "unsigned int") == 0)
392 return sizeof(unsigned int);
393 if (strcmp(type, "s16") == 0)
394 return sizeof(s16);
395 if (strcmp(type, "u16") == 0)
396 return sizeof(u16);
397 if (strcmp(type, "short") == 0)
398 return sizeof(short);
399 if (strcmp(type, "unsigned short") == 0)
400 return sizeof(unsigned short);
401 if (strcmp(type, "s8") == 0)
402 return sizeof(s8);
403 if (strcmp(type, "u8") == 0)
404 return sizeof(u8);
405 if (strcmp(type, "char") == 0)
406 return sizeof(char);
407 if (strcmp(type, "unsigned char") == 0)
408 return sizeof(unsigned char);
409 if (str_has_prefix(type, "char["))
410 return user_field_array_size(type);
411 if (str_has_prefix(type, "unsigned char["))
412 return user_field_array_size(type);
413 if (str_has_prefix(type, "__data_loc "))
414 return sizeof(u32);
415 if (str_has_prefix(type, "__rel_loc "))
416 return sizeof(u32);
417
418 /* Uknown basic type, error */
419 return -EINVAL;
420}
421
2467cda1
BB
422static void user_event_destroy_validators(struct user_event *user)
423{
424 struct user_event_validator *validator, *next;
425 struct list_head *head = &user->validators;
426
427 list_for_each_entry_safe(validator, next, head, link) {
428 list_del(&validator->link);
429 kfree(validator);
430 }
431}
432
7f5a08c7
BB
433static void user_event_destroy_fields(struct user_event *user)
434{
435 struct ftrace_event_field *field, *next;
436 struct list_head *head = &user->fields;
437
438 list_for_each_entry_safe(field, next, head, link) {
439 list_del(&field->link);
440 kfree(field);
441 }
442}
443
444static int user_event_add_field(struct user_event *user, const char *type,
445 const char *name, int offset, int size,
446 int is_signed, int filter_type)
447{
2467cda1 448 struct user_event_validator *validator;
7f5a08c7 449 struct ftrace_event_field *field;
2467cda1 450 int validator_flags = 0;
7f5a08c7
BB
451
452 field = kmalloc(sizeof(*field), GFP_KERNEL);
453
454 if (!field)
455 return -ENOMEM;
456
2467cda1
BB
457 if (str_has_prefix(type, "__data_loc "))
458 goto add_validator;
459
460 if (str_has_prefix(type, "__rel_loc ")) {
461 validator_flags |= VALIDATOR_REL;
462 goto add_validator;
463 }
464
465 goto add_field;
466
467add_validator:
9cbf1234 468 if (strstr(type, "char") != NULL)
2467cda1
BB
469 validator_flags |= VALIDATOR_ENSURE_NULL;
470
471 validator = kmalloc(sizeof(*validator), GFP_KERNEL);
472
473 if (!validator) {
474 kfree(field);
475 return -ENOMEM;
476 }
477
478 validator->flags = validator_flags;
479 validator->offset = offset;
480
481 /* Want sequential access when validating */
482 list_add_tail(&validator->link, &user->validators);
483
484add_field:
7f5a08c7
BB
485 field->type = type;
486 field->name = name;
487 field->offset = offset;
488 field->size = size;
489 field->is_signed = is_signed;
490 field->filter_type = filter_type;
491
492 list_add(&field->link, &user->fields);
493
2467cda1
BB
494 /*
495 * Min size from user writes that are required, this does not include
496 * the size of trace_entry (common fields).
497 */
498 user->min_size = (offset + size) - sizeof(struct trace_entry);
499
7f5a08c7
BB
500 return 0;
501}
502
503/*
504 * Parses the values of a field within the description
505 * Format: type name [size]
506 */
507static int user_event_parse_field(char *field, struct user_event *user,
508 u32 *offset)
509{
510 char *part, *type, *name;
511 u32 depth = 0, saved_offset = *offset;
512 int len, size = -EINVAL;
513 bool is_struct = false;
514
515 field = skip_spaces(field);
516
517 if (*field == '\0')
518 return 0;
519
520 /* Handle types that have a space within */
521 len = str_has_prefix(field, "unsigned ");
522 if (len)
523 goto skip_next;
524
525 len = str_has_prefix(field, "struct ");
526 if (len) {
527 is_struct = true;
528 goto skip_next;
529 }
530
531 len = str_has_prefix(field, "__data_loc unsigned ");
532 if (len)
533 goto skip_next;
534
535 len = str_has_prefix(field, "__data_loc ");
536 if (len)
537 goto skip_next;
538
539 len = str_has_prefix(field, "__rel_loc unsigned ");
540 if (len)
541 goto skip_next;
542
543 len = str_has_prefix(field, "__rel_loc ");
544 if (len)
545 goto skip_next;
546
547 goto parse;
548skip_next:
549 type = field;
550 field = strpbrk(field + len, " ");
551
552 if (field == NULL)
553 return -EINVAL;
554
555 *field++ = '\0';
556 depth++;
557parse:
173c2049
BB
558 name = NULL;
559
7f5a08c7
BB
560 while ((part = strsep(&field, " ")) != NULL) {
561 switch (depth++) {
562 case FIELD_DEPTH_TYPE:
563 type = part;
564 break;
565 case FIELD_DEPTH_NAME:
566 name = part;
567 break;
568 case FIELD_DEPTH_SIZE:
569 if (!is_struct)
570 return -EINVAL;
571
572 if (kstrtou32(part, 10, &size))
573 return -EINVAL;
574 break;
575 default:
576 return -EINVAL;
577 }
578 }
579
173c2049 580 if (depth < FIELD_DEPTH_SIZE || !name)
7f5a08c7
BB
581 return -EINVAL;
582
583 if (depth == FIELD_DEPTH_SIZE)
584 size = user_field_size(type);
585
586 if (size == 0)
587 return -EINVAL;
588
589 if (size < 0)
590 return size;
591
592 *offset = saved_offset + size;
593
594 return user_event_add_field(user, type, name, saved_offset, size,
595 type[0] != 'u', FILTER_OTHER);
596}
597
7f5a08c7
BB
598static int user_event_parse_fields(struct user_event *user, char *args)
599{
600 char *field;
601 u32 offset = sizeof(struct trace_entry);
602 int ret = -EINVAL;
603
604 if (args == NULL)
605 return 0;
606
607 while ((field = strsep(&args, ";")) != NULL) {
608 ret = user_event_parse_field(field, user, &offset);
609
610 if (ret)
611 break;
612 }
613
614 return ret;
615}
616
617static struct trace_event_fields user_event_fields_array[1];
618
aa3b2b4c
BB
619static const char *user_field_format(const char *type)
620{
621 if (strcmp(type, "s64") == 0)
622 return "%lld";
623 if (strcmp(type, "u64") == 0)
624 return "%llu";
625 if (strcmp(type, "s32") == 0)
626 return "%d";
627 if (strcmp(type, "u32") == 0)
628 return "%u";
629 if (strcmp(type, "int") == 0)
630 return "%d";
631 if (strcmp(type, "unsigned int") == 0)
632 return "%u";
633 if (strcmp(type, "s16") == 0)
634 return "%d";
635 if (strcmp(type, "u16") == 0)
636 return "%u";
637 if (strcmp(type, "short") == 0)
638 return "%d";
639 if (strcmp(type, "unsigned short") == 0)
640 return "%u";
641 if (strcmp(type, "s8") == 0)
642 return "%d";
643 if (strcmp(type, "u8") == 0)
644 return "%u";
645 if (strcmp(type, "char") == 0)
646 return "%d";
647 if (strcmp(type, "unsigned char") == 0)
648 return "%u";
9cbf1234 649 if (strstr(type, "char[") != NULL)
aa3b2b4c
BB
650 return "%s";
651
652 /* Unknown, likely struct, allowed treat as 64-bit */
653 return "%llu";
654}
655
656static bool user_field_is_dyn_string(const char *type, const char **str_func)
657{
658 if (str_has_prefix(type, "__data_loc ")) {
659 *str_func = "__get_str";
660 goto check;
661 }
662
663 if (str_has_prefix(type, "__rel_loc ")) {
664 *str_func = "__get_rel_str";
665 goto check;
666 }
667
668 return false;
669check:
9cbf1234 670 return strstr(type, "char") != NULL;
aa3b2b4c
BB
671}
672
673#define LEN_OR_ZERO (len ? len - pos : 0)
e6f89a14
BB
674static int user_dyn_field_set_string(int argc, const char **argv, int *iout,
675 char *buf, int len, bool *colon)
676{
677 int pos = 0, i = *iout;
678
679 *colon = false;
680
681 for (; i < argc; ++i) {
682 if (i != *iout)
683 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
684
685 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", argv[i]);
686
687 if (strchr(argv[i], ';')) {
688 ++i;
689 *colon = true;
690 break;
691 }
692 }
693
694 /* Actual set, advance i */
695 if (len != 0)
696 *iout = i;
697
698 return pos + 1;
699}
700
701static int user_field_set_string(struct ftrace_event_field *field,
702 char *buf, int len, bool colon)
703{
704 int pos = 0;
705
706 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->type);
707 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
708 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name);
709
710 if (colon)
711 pos += snprintf(buf + pos, LEN_OR_ZERO, ";");
712
713 return pos + 1;
714}
715
aa3b2b4c
BB
716static int user_event_set_print_fmt(struct user_event *user, char *buf, int len)
717{
718 struct ftrace_event_field *field, *next;
719 struct list_head *head = &user->fields;
720 int pos = 0, depth = 0;
721 const char *str_func;
722
723 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
724
725 list_for_each_entry_safe_reverse(field, next, head, link) {
726 if (depth != 0)
727 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
728
729 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s",
730 field->name, user_field_format(field->type));
731
732 depth++;
733 }
734
735 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
736
737 list_for_each_entry_safe_reverse(field, next, head, link) {
738 if (user_field_is_dyn_string(field->type, &str_func))
739 pos += snprintf(buf + pos, LEN_OR_ZERO,
740 ", %s(%s)", str_func, field->name);
741 else
742 pos += snprintf(buf + pos, LEN_OR_ZERO,
743 ", REC->%s", field->name);
744 }
745
746 return pos + 1;
747}
748#undef LEN_OR_ZERO
749
750static int user_event_create_print_fmt(struct user_event *user)
751{
752 char *print_fmt;
753 int len;
754
755 len = user_event_set_print_fmt(user, NULL, 0);
756
757 print_fmt = kmalloc(len, GFP_KERNEL);
758
759 if (!print_fmt)
760 return -ENOMEM;
761
762 user_event_set_print_fmt(user, print_fmt, len);
763
764 user->call.print_fmt = print_fmt;
765
766 return 0;
767}
768
7f5a08c7
BB
769static enum print_line_t user_event_print_trace(struct trace_iterator *iter,
770 int flags,
771 struct trace_event *event)
772{
773 /* Unsafe to try to decode user provided print_fmt, use hex */
774 trace_print_hex_dump_seq(&iter->seq, "", DUMP_PREFIX_OFFSET, 16,
775 1, iter->ent, iter->ent_size, true);
776
777 return trace_handle_return(&iter->seq);
778}
779
780static struct trace_event_functions user_event_funcs = {
781 .trace = user_event_print_trace,
782};
783
089331d4
BB
784static int user_event_set_call_visible(struct user_event *user, bool visible)
785{
786 int ret;
787 const struct cred *old_cred;
788 struct cred *cred;
789
790 cred = prepare_creds();
791
792 if (!cred)
793 return -ENOMEM;
794
795 /*
796 * While by default tracefs is locked down, systems can be configured
797 * to allow user_event files to be less locked down. The extreme case
798 * being "other" has read/write access to user_events_data/status.
799 *
94c255ac 800 * When not locked down, processes may not have permissions to
089331d4
BB
801 * add/remove calls themselves to tracefs. We need to temporarily
802 * switch to root file permission to allow for this scenario.
803 */
804 cred->fsuid = GLOBAL_ROOT_UID;
805
806 old_cred = override_creds(cred);
807
808 if (visible)
809 ret = trace_add_event_call(&user->call);
810 else
811 ret = trace_remove_event_call(&user->call);
812
813 revert_creds(old_cred);
814 put_cred(cred);
815
816 return ret;
817}
818
7f5a08c7
BB
819static int destroy_user_event(struct user_event *user)
820{
821 int ret = 0;
822
823 /* Must destroy fields before call removal */
824 user_event_destroy_fields(user);
825
089331d4 826 ret = user_event_set_call_visible(user, false);
7f5a08c7
BB
827
828 if (ret)
829 return ret;
830
831 dyn_event_remove(&user->devent);
832
39d6d08b 833 user_event_register_clear(user);
e5d27181 834 clear_bit(user->index, user->group->page_bitmap);
7f5a08c7
BB
835 hash_del(&user->node);
836
2467cda1 837 user_event_destroy_validators(user);
aa3b2b4c 838 kfree(user->call.print_fmt);
7f5a08c7
BB
839 kfree(EVENT_NAME(user));
840 kfree(user);
841
842 return ret;
843}
844
e5d27181
BB
845static struct user_event *find_user_event(struct user_event_group *group,
846 char *name, u32 *outkey)
7f5a08c7
BB
847{
848 struct user_event *user;
849 u32 key = user_event_key(name);
850
851 *outkey = key;
852
e5d27181 853 hash_for_each_possible(group->register_table, user, node, key)
7e348b32 854 if (!strcmp(EVENT_NAME(user), name)) {
d401b724 855 refcount_inc(&user->refcnt);
7f5a08c7 856 return user;
7e348b32 857 }
7f5a08c7
BB
858
859 return NULL;
860}
861
2467cda1
BB
862static int user_event_validate(struct user_event *user, void *data, int len)
863{
864 struct list_head *head = &user->validators;
865 struct user_event_validator *validator;
866 void *pos, *end = data + len;
867 u32 loc, offset, size;
868
869 list_for_each_entry(validator, head, link) {
870 pos = data + validator->offset;
871
872 /* Already done min_size check, no bounds check here */
873 loc = *(u32 *)pos;
874 offset = loc & 0xffff;
875 size = loc >> 16;
876
877 if (likely(validator->flags & VALIDATOR_REL))
878 pos += offset + sizeof(loc);
879 else
880 pos = data + offset;
881
882 pos += size;
883
884 if (unlikely(pos > end))
885 return -EFAULT;
886
887 if (likely(validator->flags & VALIDATOR_ENSURE_NULL))
888 if (unlikely(*(char *)(pos - 1) != '\0'))
889 return -EFAULT;
890 }
891
892 return 0;
893}
894
7f5a08c7
BB
895/*
896 * Writes the user supplied payload out to a trace file.
897 */
0279400a 898static void user_event_ftrace(struct user_event *user, struct iov_iter *i,
2467cda1 899 void *tpdata, bool *faulted)
7f5a08c7
BB
900{
901 struct trace_event_file *file;
902 struct trace_entry *entry;
903 struct trace_event_buffer event_buffer;
2467cda1 904 size_t size = sizeof(*entry) + i->count;
7f5a08c7
BB
905
906 file = (struct trace_event_file *)tpdata;
907
908 if (!file ||
909 !(file->flags & EVENT_FILE_FL_ENABLED) ||
910 trace_trigger_soft_disabled(file))
911 return;
912
913 /* Allocates and fills trace_entry, + 1 of this is data payload */
2467cda1 914 entry = trace_event_buffer_reserve(&event_buffer, file, size);
7f5a08c7
BB
915
916 if (unlikely(!entry))
917 return;
918
0279400a 919 if (unlikely(!copy_nofault(entry + 1, i->count, i)))
2467cda1
BB
920 goto discard;
921
922 if (!list_empty(&user->validators) &&
923 unlikely(user_event_validate(user, entry, size)))
924 goto discard;
925
926 trace_event_buffer_commit(&event_buffer);
927
928 return;
929discard:
930 *faulted = true;
931 __trace_event_discard_commit(event_buffer.buffer,
932 event_buffer.event);
7f5a08c7
BB
933}
934
3207d045
BB
935#ifdef CONFIG_PERF_EVENTS
936/*
768c1e7f 937 * Writes the user supplied payload out to perf ring buffer.
3207d045 938 */
0279400a 939static void user_event_perf(struct user_event *user, struct iov_iter *i,
2467cda1 940 void *tpdata, bool *faulted)
3207d045
BB
941{
942 struct hlist_head *perf_head;
943
3207d045
BB
944 perf_head = this_cpu_ptr(user->call.perf_events);
945
946 if (perf_head && !hlist_empty(perf_head)) {
947 struct trace_entry *perf_entry;
948 struct pt_regs *regs;
0279400a 949 size_t size = sizeof(*perf_entry) + i->count;
3207d045
BB
950 int context;
951
952 perf_entry = perf_trace_buf_alloc(ALIGN(size, 8),
953 &regs, &context);
954
955 if (unlikely(!perf_entry))
956 return;
957
958 perf_fetch_caller_regs(regs);
959
2467cda1
BB
960 if (unlikely(!copy_nofault(perf_entry + 1, i->count, i)))
961 goto discard;
962
963 if (!list_empty(&user->validators) &&
964 unlikely(user_event_validate(user, perf_entry, size)))
965 goto discard;
3207d045
BB
966
967 perf_trace_buf_submit(perf_entry, size, context,
968 user->call.event.type, 1, regs,
969 perf_head, NULL);
2467cda1
BB
970
971 return;
972discard:
973 *faulted = true;
974 perf_swevent_put_recursion_context(context);
3207d045
BB
975 }
976}
977#endif
978
7f5a08c7
BB
979/*
980 * Update the register page that is shared between user processes.
981 */
982static void update_reg_page_for(struct user_event *user)
983{
984 struct tracepoint *tp = &user->tracepoint;
985 char status = 0;
986
987 if (atomic_read(&tp->key.enabled) > 0) {
988 struct tracepoint_func *probe_func_ptr;
989 user_event_func_t probe_func;
990
991 rcu_read_lock_sched();
992
993 probe_func_ptr = rcu_dereference_sched(tp->funcs);
994
995 if (probe_func_ptr) {
996 do {
997 probe_func = probe_func_ptr->func;
998
999 if (probe_func == user_event_ftrace)
1000 status |= EVENT_STATUS_FTRACE;
3207d045
BB
1001#ifdef CONFIG_PERF_EVENTS
1002 else if (probe_func == user_event_perf)
1003 status |= EVENT_STATUS_PERF;
1004#endif
7f5a08c7
BB
1005 else
1006 status |= EVENT_STATUS_OTHER;
1007 } while ((++probe_func_ptr)->func);
1008 }
1009
1010 rcu_read_unlock_sched();
1011 }
1012
39d6d08b
BB
1013 if (status)
1014 user_event_register_set(user);
1015 else
1016 user_event_register_clear(user);
1017
1018 user->status = status;
7f5a08c7
BB
1019}
1020
1021/*
1022 * Register callback for our events from tracing sub-systems.
1023 */
1024static int user_event_reg(struct trace_event_call *call,
1025 enum trace_reg type,
1026 void *data)
1027{
1028 struct user_event *user = (struct user_event *)call->data;
1029 int ret = 0;
1030
1031 if (!user)
1032 return -ENOENT;
1033
1034 switch (type) {
1035 case TRACE_REG_REGISTER:
1036 ret = tracepoint_probe_register(call->tp,
1037 call->class->probe,
1038 data);
1039 if (!ret)
1040 goto inc;
1041 break;
1042
1043 case TRACE_REG_UNREGISTER:
1044 tracepoint_probe_unregister(call->tp,
1045 call->class->probe,
1046 data);
1047 goto dec;
1048
3207d045
BB
1049#ifdef CONFIG_PERF_EVENTS
1050 case TRACE_REG_PERF_REGISTER:
1051 ret = tracepoint_probe_register(call->tp,
1052 call->class->perf_probe,
1053 data);
1054 if (!ret)
1055 goto inc;
1056 break;
1057
1058 case TRACE_REG_PERF_UNREGISTER:
1059 tracepoint_probe_unregister(call->tp,
1060 call->class->perf_probe,
1061 data);
1062 goto dec;
1063
1064 case TRACE_REG_PERF_OPEN:
1065 case TRACE_REG_PERF_CLOSE:
1066 case TRACE_REG_PERF_ADD:
1067 case TRACE_REG_PERF_DEL:
7f5a08c7 1068 break;
3207d045 1069#endif
7f5a08c7
BB
1070 }
1071
1072 return ret;
1073inc:
d401b724 1074 refcount_inc(&user->refcnt);
7f5a08c7
BB
1075 update_reg_page_for(user);
1076 return 0;
1077dec:
1078 update_reg_page_for(user);
d401b724 1079 refcount_dec(&user->refcnt);
7f5a08c7
BB
1080 return 0;
1081}
1082
1083static int user_event_create(const char *raw_command)
1084{
e5d27181 1085 struct user_event_group *group;
7f5a08c7
BB
1086 struct user_event *user;
1087 char *name;
1088 int ret;
1089
1090 if (!str_has_prefix(raw_command, USER_EVENTS_PREFIX))
1091 return -ECANCELED;
1092
1093 raw_command += USER_EVENTS_PREFIX_LEN;
1094 raw_command = skip_spaces(raw_command);
1095
1096 name = kstrdup(raw_command, GFP_KERNEL);
1097
1098 if (!name)
1099 return -ENOMEM;
1100
e5d27181
BB
1101 group = current_user_event_group();
1102
ccc6e590
XJ
1103 if (!group) {
1104 kfree(name);
e5d27181 1105 return -ENOENT;
ccc6e590 1106 }
e5d27181
BB
1107
1108 mutex_lock(&group->reg_mutex);
7e348b32 1109
e5d27181 1110 ret = user_event_parse_cmd(group, name, &user);
7e348b32
BB
1111
1112 if (!ret)
d401b724 1113 refcount_dec(&user->refcnt);
7e348b32 1114
e5d27181 1115 mutex_unlock(&group->reg_mutex);
7f5a08c7
BB
1116
1117 if (ret)
1118 kfree(name);
1119
1120 return ret;
1121}
1122
1123static int user_event_show(struct seq_file *m, struct dyn_event *ev)
1124{
1125 struct user_event *user = container_of(ev, struct user_event, devent);
1126 struct ftrace_event_field *field, *next;
1127 struct list_head *head;
1128 int depth = 0;
1129
1130 seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user));
1131
1132 head = trace_get_fields(&user->call);
1133
1134 list_for_each_entry_safe_reverse(field, next, head, link) {
1135 if (depth == 0)
1136 seq_puts(m, " ");
1137 else
1138 seq_puts(m, "; ");
1139
1140 seq_printf(m, "%s %s", field->type, field->name);
1141
1142 if (str_has_prefix(field->type, "struct "))
1143 seq_printf(m, " %d", field->size);
1144
1145 depth++;
1146 }
1147
1148 seq_puts(m, "\n");
1149
1150 return 0;
1151}
1152
1153static bool user_event_is_busy(struct dyn_event *ev)
1154{
1155 struct user_event *user = container_of(ev, struct user_event, devent);
1156
d401b724 1157 return !user_event_last_ref(user);
7f5a08c7
BB
1158}
1159
1160static int user_event_free(struct dyn_event *ev)
1161{
1162 struct user_event *user = container_of(ev, struct user_event, devent);
1163
d401b724 1164 if (!user_event_last_ref(user))
7f5a08c7
BB
1165 return -EBUSY;
1166
1167 return destroy_user_event(user);
1168}
1169
9aed4e15
BB
1170static bool user_field_match(struct ftrace_event_field *field, int argc,
1171 const char **argv, int *iout)
1172{
e6f89a14 1173 char *field_name = NULL, *dyn_field_name = NULL;
9aed4e15 1174 bool colon = false, match = false;
e6f89a14 1175 int dyn_len, len;
9aed4e15 1176
e6f89a14 1177 if (*iout >= argc)
9aed4e15
BB
1178 return false;
1179
e6f89a14
BB
1180 dyn_len = user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1181 0, &colon);
9aed4e15 1182
e6f89a14 1183 len = user_field_set_string(field, field_name, 0, colon);
9aed4e15 1184
e6f89a14
BB
1185 if (dyn_len != len)
1186 return false;
9aed4e15 1187
e6f89a14
BB
1188 dyn_field_name = kmalloc(dyn_len, GFP_KERNEL);
1189 field_name = kmalloc(len, GFP_KERNEL);
9aed4e15 1190
e6f89a14
BB
1191 if (!dyn_field_name || !field_name)
1192 goto out;
9aed4e15 1193
e6f89a14
BB
1194 user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1195 dyn_len, &colon);
9aed4e15 1196
e6f89a14 1197 user_field_set_string(field, field_name, len, colon);
9aed4e15 1198
e6f89a14 1199 match = strcmp(dyn_field_name, field_name) == 0;
9aed4e15 1200out:
e6f89a14 1201 kfree(dyn_field_name);
9aed4e15
BB
1202 kfree(field_name);
1203
1204 return match;
1205}
1206
1207static bool user_fields_match(struct user_event *user, int argc,
1208 const char **argv)
1209{
1210 struct ftrace_event_field *field, *next;
1211 struct list_head *head = &user->fields;
1212 int i = 0;
1213
1214 list_for_each_entry_safe_reverse(field, next, head, link)
1215 if (!user_field_match(field, argc, argv, &i))
1216 return false;
1217
1218 if (i != argc)
1219 return false;
1220
1221 return true;
1222}
1223
7f5a08c7
BB
1224static bool user_event_match(const char *system, const char *event,
1225 int argc, const char **argv, struct dyn_event *ev)
1226{
1227 struct user_event *user = container_of(ev, struct user_event, devent);
9aed4e15 1228 bool match;
7f5a08c7 1229
9aed4e15 1230 match = strcmp(EVENT_NAME(user), event) == 0 &&
7f5a08c7 1231 (!system || strcmp(system, USER_EVENTS_SYSTEM) == 0);
9aed4e15
BB
1232
1233 if (match && argc > 0)
1234 match = user_fields_match(user, argc, argv);
1235
1236 return match;
7f5a08c7
BB
1237}
1238
1239static struct dyn_event_operations user_event_dops = {
1240 .create = user_event_create,
1241 .show = user_event_show,
1242 .is_busy = user_event_is_busy,
1243 .free = user_event_free,
1244 .match = user_event_match,
1245};
1246
1247static int user_event_trace_register(struct user_event *user)
1248{
1249 int ret;
1250
1251 ret = register_trace_event(&user->call.event);
1252
1253 if (!ret)
1254 return -ENODEV;
1255
089331d4 1256 ret = user_event_set_call_visible(user, true);
7f5a08c7
BB
1257
1258 if (ret)
1259 unregister_trace_event(&user->call.event);
1260
1261 return ret;
1262}
1263
1264/*
1265 * Parses the event name, arguments and flags then registers if successful.
1266 * The name buffer lifetime is owned by this method for success cases only.
7e348b32 1267 * Upon success the returned user_event has its ref count increased by 1.
7f5a08c7 1268 */
e5d27181
BB
1269static int user_event_parse(struct user_event_group *group, char *name,
1270 char *args, char *flags,
7f5a08c7
BB
1271 struct user_event **newuser)
1272{
1273 int ret;
1274 int index;
1275 u32 key;
7e348b32
BB
1276 struct user_event *user;
1277
1278 /* Prevent dyn_event from racing */
1279 mutex_lock(&event_mutex);
e5d27181 1280 user = find_user_event(group, name, &key);
7e348b32 1281 mutex_unlock(&event_mutex);
7f5a08c7
BB
1282
1283 if (user) {
1284 *newuser = user;
1285 /*
1286 * Name is allocated by caller, free it since it already exists.
1287 * Caller only worries about failure cases for freeing.
1288 */
1289 kfree(name);
1290 return 0;
1291 }
1292
e5d27181 1293 index = find_first_zero_bit(group->page_bitmap, MAX_EVENTS);
7f5a08c7
BB
1294
1295 if (index == MAX_EVENTS)
1296 return -EMFILE;
1297
1298 user = kzalloc(sizeof(*user), GFP_KERNEL);
1299
1300 if (!user)
1301 return -ENOMEM;
1302
1303 INIT_LIST_HEAD(&user->class.fields);
1304 INIT_LIST_HEAD(&user->fields);
2467cda1 1305 INIT_LIST_HEAD(&user->validators);
7f5a08c7 1306
e5d27181 1307 user->group = group;
7f5a08c7
BB
1308 user->tracepoint.name = name;
1309
7f5a08c7
BB
1310 ret = user_event_parse_fields(user, args);
1311
1312 if (ret)
1313 goto put_user;
1314
aa3b2b4c
BB
1315 ret = user_event_create_print_fmt(user);
1316
1317 if (ret)
1318 goto put_user;
7f5a08c7
BB
1319
1320 user->call.data = user;
1321 user->call.class = &user->class;
1322 user->call.name = name;
1323 user->call.flags = TRACE_EVENT_FL_TRACEPOINT;
1324 user->call.tp = &user->tracepoint;
1325 user->call.event.funcs = &user_event_funcs;
e5d27181 1326 user->class.system = group->system_name;
7f5a08c7 1327
7f5a08c7
BB
1328 user->class.fields_array = user_event_fields_array;
1329 user->class.get_fields = user_event_get_fields;
1330 user->class.reg = user_event_reg;
1331 user->class.probe = user_event_ftrace;
3207d045
BB
1332#ifdef CONFIG_PERF_EVENTS
1333 user->class.perf_probe = user_event_perf;
1334#endif
7f5a08c7
BB
1335
1336 mutex_lock(&event_mutex);
efe34e99 1337
7f5a08c7 1338 ret = user_event_trace_register(user);
7f5a08c7
BB
1339
1340 if (ret)
efe34e99 1341 goto put_user_lock;
7f5a08c7
BB
1342
1343 user->index = index;
7e348b32 1344
d401b724
BB
1345 /* Ensure we track self ref and caller ref (2) */
1346 refcount_set(&user->refcnt, 2);
7e348b32 1347
7f5a08c7
BB
1348 dyn_event_init(&user->devent, &user_event_dops);
1349 dyn_event_add(&user->devent, &user->call);
e5d27181
BB
1350 set_bit(user->index, group->page_bitmap);
1351 hash_add(group->register_table, &user->node, key);
7f5a08c7 1352
efe34e99
BB
1353 mutex_unlock(&event_mutex);
1354
7f5a08c7
BB
1355 *newuser = user;
1356 return 0;
efe34e99
BB
1357put_user_lock:
1358 mutex_unlock(&event_mutex);
7f5a08c7
BB
1359put_user:
1360 user_event_destroy_fields(user);
2467cda1 1361 user_event_destroy_validators(user);
4bded7af 1362 kfree(user->call.print_fmt);
7f5a08c7
BB
1363 kfree(user);
1364 return ret;
1365}
1366
1367/*
1368 * Deletes a previously created event if it is no longer being used.
1369 */
e5d27181 1370static int delete_user_event(struct user_event_group *group, char *name)
7f5a08c7
BB
1371{
1372 u32 key;
e5d27181 1373 struct user_event *user = find_user_event(group, name, &key);
7f5a08c7
BB
1374
1375 if (!user)
1376 return -ENOENT;
1377
d401b724 1378 refcount_dec(&user->refcnt);
7e348b32 1379
d401b724
BB
1380 if (!user_event_last_ref(user))
1381 return -EBUSY;
7f5a08c7 1382
d401b724 1383 return destroy_user_event(user);
7f5a08c7
BB
1384}
1385
1386/*
1387 * Validates the user payload and writes via iterator.
1388 */
1389static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
1390{
e5d27181 1391 struct user_event_file_info *info = file->private_data;
7f5a08c7
BB
1392 struct user_event_refs *refs;
1393 struct user_event *user = NULL;
1394 struct tracepoint *tp;
1395 ssize_t ret = i->count;
1396 int idx;
1397
1398 if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx)))
1399 return -EFAULT;
1400
1401 rcu_read_lock_sched();
1402
e5d27181 1403 refs = rcu_dereference_sched(info->refs);
7f5a08c7
BB
1404
1405 /*
1406 * The refs->events array is protected by RCU, and new items may be
1407 * added. But the user retrieved from indexing into the events array
1408 * shall be immutable while the file is opened.
1409 */
1410 if (likely(refs && idx < refs->count))
1411 user = refs->events[idx];
1412
1413 rcu_read_unlock_sched();
1414
1415 if (unlikely(user == NULL))
1416 return -ENOENT;
1417
2467cda1
BB
1418 if (unlikely(i->count < user->min_size))
1419 return -EINVAL;
1420
7f5a08c7
BB
1421 tp = &user->tracepoint;
1422
1423 /*
1424 * It's possible key.enabled disables after this check, however
1425 * we don't mind if a few events are included in this condition.
1426 */
1427 if (likely(atomic_read(&tp->key.enabled) > 0)) {
1428 struct tracepoint_func *probe_func_ptr;
1429 user_event_func_t probe_func;
0279400a 1430 struct iov_iter copy;
7f5a08c7 1431 void *tpdata;
2467cda1 1432 bool faulted;
7f5a08c7 1433
0279400a
BB
1434 if (unlikely(fault_in_iov_iter_readable(i, i->count)))
1435 return -EFAULT;
7f5a08c7 1436
2467cda1
BB
1437 faulted = false;
1438
7f5a08c7
BB
1439 rcu_read_lock_sched();
1440
1441 probe_func_ptr = rcu_dereference_sched(tp->funcs);
1442
1443 if (probe_func_ptr) {
1444 do {
0279400a 1445 copy = *i;
7f5a08c7
BB
1446 probe_func = probe_func_ptr->func;
1447 tpdata = probe_func_ptr->data;
2467cda1 1448 probe_func(user, &copy, tpdata, &faulted);
7f5a08c7
BB
1449 } while ((++probe_func_ptr)->func);
1450 }
1451
1452 rcu_read_unlock_sched();
2467cda1
BB
1453
1454 if (unlikely(faulted))
1455 return -EFAULT;
7f5a08c7
BB
1456 }
1457
1458 return ret;
1459}
1460
e5d27181
BB
1461static int user_events_open(struct inode *node, struct file *file)
1462{
1463 struct user_event_group *group;
1464 struct user_event_file_info *info;
1465
1466 group = current_user_event_group();
1467
1468 if (!group)
1469 return -ENOENT;
1470
1471 info = kzalloc(sizeof(*info), GFP_KERNEL);
1472
1473 if (!info)
1474 return -ENOMEM;
1475
1476 info->group = group;
1477
1478 file->private_data = info;
1479
1480 return 0;
1481}
1482
7f5a08c7
BB
1483static ssize_t user_events_write(struct file *file, const char __user *ubuf,
1484 size_t count, loff_t *ppos)
1485{
1486 struct iovec iov;
1487 struct iov_iter i;
1488
1489 if (unlikely(*ppos != 0))
1490 return -EFAULT;
1491
de4eda9d 1492 if (unlikely(import_single_range(ITER_SOURCE, (char __user *)ubuf,
95f18760 1493 count, &iov, &i)))
7f5a08c7
BB
1494 return -EFAULT;
1495
1496 return user_events_write_core(file, &i);
1497}
1498
1499static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i)
1500{
1501 return user_events_write_core(kp->ki_filp, i);
1502}
1503
e5d27181
BB
1504static int user_events_ref_add(struct user_event_file_info *info,
1505 struct user_event *user)
7f5a08c7 1506{
e5d27181 1507 struct user_event_group *group = info->group;
7f5a08c7
BB
1508 struct user_event_refs *refs, *new_refs;
1509 int i, size, count = 0;
1510
e5d27181
BB
1511 refs = rcu_dereference_protected(info->refs,
1512 lockdep_is_held(&group->reg_mutex));
7f5a08c7
BB
1513
1514 if (refs) {
1515 count = refs->count;
1516
1517 for (i = 0; i < count; ++i)
1518 if (refs->events[i] == user)
1519 return i;
1520 }
1521
1522 size = struct_size(refs, events, count + 1);
1523
1524 new_refs = kzalloc(size, GFP_KERNEL);
1525
1526 if (!new_refs)
1527 return -ENOMEM;
1528
1529 new_refs->count = count + 1;
1530
1531 for (i = 0; i < count; ++i)
1532 new_refs->events[i] = refs->events[i];
1533
1534 new_refs->events[i] = user;
1535
d401b724 1536 refcount_inc(&user->refcnt);
7f5a08c7 1537
e5d27181 1538 rcu_assign_pointer(info->refs, new_refs);
7f5a08c7
BB
1539
1540 if (refs)
1541 kfree_rcu(refs, rcu);
1542
1543 return i;
1544}
1545
1546static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg)
1547{
1548 u32 size;
1549 long ret;
1550
1551 ret = get_user(size, &ureg->size);
1552
1553 if (ret)
1554 return ret;
1555
1556 if (size > PAGE_SIZE)
1557 return -E2BIG;
1558
39d6d08b
BB
1559 if (size < offsetofend(struct user_reg, write_index))
1560 return -EINVAL;
1561
1562 ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
1563
1564 if (ret)
1565 return ret;
1566
1567 kreg->size = size;
1568
1569 return 0;
7f5a08c7
BB
1570}
1571
1572/*
1573 * Registers a user_event on behalf of a user process.
1574 */
e5d27181
BB
1575static long user_events_ioctl_reg(struct user_event_file_info *info,
1576 unsigned long uarg)
7f5a08c7
BB
1577{
1578 struct user_reg __user *ureg = (struct user_reg __user *)uarg;
1579 struct user_reg reg;
1580 struct user_event *user;
1581 char *name;
1582 long ret;
1583
1584 ret = user_reg_get(ureg, &reg);
1585
1586 if (ret)
1587 return ret;
1588
1589 name = strndup_user((const char __user *)(uintptr_t)reg.name_args,
1590 MAX_EVENT_DESC);
1591
1592 if (IS_ERR(name)) {
1593 ret = PTR_ERR(name);
1594 return ret;
1595 }
1596
e5d27181 1597 ret = user_event_parse_cmd(info->group, name, &user);
7f5a08c7
BB
1598
1599 if (ret) {
1600 kfree(name);
1601 return ret;
1602 }
1603
e5d27181 1604 ret = user_events_ref_add(info, user);
7f5a08c7 1605
7e348b32 1606 /* No longer need parse ref, ref_add either worked or not */
d401b724 1607 refcount_dec(&user->refcnt);
7e348b32 1608
7f5a08c7
BB
1609 /* Positive number is index and valid */
1610 if (ret < 0)
1611 return ret;
1612
1613 put_user((u32)ret, &ureg->write_index);
39d6d08b 1614 put_user(user->index, &ureg->status_bit);
7f5a08c7
BB
1615
1616 return 0;
1617}
1618
1619/*
1620 * Deletes a user_event on behalf of a user process.
1621 */
e5d27181
BB
1622static long user_events_ioctl_del(struct user_event_file_info *info,
1623 unsigned long uarg)
7f5a08c7
BB
1624{
1625 void __user *ubuf = (void __user *)uarg;
1626 char *name;
1627 long ret;
1628
1629 name = strndup_user(ubuf, MAX_EVENT_DESC);
1630
1631 if (IS_ERR(name))
1632 return PTR_ERR(name);
1633
7e348b32
BB
1634 /* event_mutex prevents dyn_event from racing */
1635 mutex_lock(&event_mutex);
e5d27181 1636 ret = delete_user_event(info->group, name);
7e348b32 1637 mutex_unlock(&event_mutex);
7f5a08c7
BB
1638
1639 kfree(name);
1640
1641 return ret;
1642}
1643
1644/*
1645 * Handles the ioctl from user mode to register or alter operations.
1646 */
1647static long user_events_ioctl(struct file *file, unsigned int cmd,
1648 unsigned long uarg)
1649{
e5d27181
BB
1650 struct user_event_file_info *info = file->private_data;
1651 struct user_event_group *group = info->group;
7f5a08c7
BB
1652 long ret = -ENOTTY;
1653
1654 switch (cmd) {
1655 case DIAG_IOCSREG:
e5d27181
BB
1656 mutex_lock(&group->reg_mutex);
1657 ret = user_events_ioctl_reg(info, uarg);
1658 mutex_unlock(&group->reg_mutex);
7f5a08c7
BB
1659 break;
1660
1661 case DIAG_IOCSDEL:
e5d27181
BB
1662 mutex_lock(&group->reg_mutex);
1663 ret = user_events_ioctl_del(info, uarg);
1664 mutex_unlock(&group->reg_mutex);
7f5a08c7
BB
1665 break;
1666 }
1667
1668 return ret;
1669}
1670
1671/*
1672 * Handles the final close of the file from user mode.
1673 */
1674static int user_events_release(struct inode *node, struct file *file)
1675{
e5d27181
BB
1676 struct user_event_file_info *info = file->private_data;
1677 struct user_event_group *group;
7f5a08c7
BB
1678 struct user_event_refs *refs;
1679 struct user_event *user;
1680 int i;
1681
e5d27181
BB
1682 if (!info)
1683 return -EINVAL;
1684
1685 group = info->group;
1686
7f5a08c7
BB
1687 /*
1688 * Ensure refs cannot change under any situation by taking the
1689 * register mutex during the final freeing of the references.
1690 */
e5d27181 1691 mutex_lock(&group->reg_mutex);
7f5a08c7 1692
e5d27181 1693 refs = info->refs;
7f5a08c7
BB
1694
1695 if (!refs)
1696 goto out;
1697
1698 /*
1699 * The lifetime of refs has reached an end, it's tied to this file.
1700 * The underlying user_events are ref counted, and cannot be freed.
1701 * After this decrement, the user_events may be freed elsewhere.
1702 */
1703 for (i = 0; i < refs->count; ++i) {
1704 user = refs->events[i];
1705
1706 if (user)
d401b724 1707 refcount_dec(&user->refcnt);
7f5a08c7
BB
1708 }
1709out:
1710 file->private_data = NULL;
1711
e5d27181 1712 mutex_unlock(&group->reg_mutex);
7f5a08c7
BB
1713
1714 kfree(refs);
e5d27181 1715 kfree(info);
7f5a08c7
BB
1716
1717 return 0;
1718}
1719
1720static const struct file_operations user_data_fops = {
e5d27181 1721 .open = user_events_open,
7f5a08c7
BB
1722 .write = user_events_write,
1723 .write_iter = user_events_write_iter,
1724 .unlocked_ioctl = user_events_ioctl,
1725 .release = user_events_release,
1726};
1727
e5d27181
BB
1728static struct user_event_group *user_status_group(struct file *file)
1729{
1730 struct seq_file *m = file->private_data;
1731
1732 if (!m)
1733 return NULL;
1734
1735 return m->private;
1736}
1737
7f5a08c7
BB
1738/*
1739 * Maps the shared page into the user process for checking if event is enabled.
1740 */
1741static int user_status_mmap(struct file *file, struct vm_area_struct *vma)
1742{
e5d27181
BB
1743 char *pages;
1744 struct user_event_group *group = user_status_group(file);
7f5a08c7
BB
1745 unsigned long size = vma->vm_end - vma->vm_start;
1746
39d6d08b 1747 if (size != MAX_BYTES)
7f5a08c7
BB
1748 return -EINVAL;
1749
e5d27181
BB
1750 if (!group)
1751 return -EINVAL;
1752
1753 pages = group->register_page_data;
1754
7f5a08c7 1755 return remap_pfn_range(vma, vma->vm_start,
e5d27181 1756 virt_to_phys(pages) >> PAGE_SHIFT,
7f5a08c7
BB
1757 size, vm_get_page_prot(VM_READ));
1758}
1759
1760static void *user_seq_start(struct seq_file *m, loff_t *pos)
1761{
1762 if (*pos)
1763 return NULL;
1764
1765 return (void *)1;
1766}
1767
1768static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos)
1769{
1770 ++*pos;
1771 return NULL;
1772}
1773
1774static void user_seq_stop(struct seq_file *m, void *p)
1775{
1776}
1777
1778static int user_seq_show(struct seq_file *m, void *p)
1779{
e5d27181 1780 struct user_event_group *group = m->private;
7f5a08c7
BB
1781 struct user_event *user;
1782 char status;
1783 int i, active = 0, busy = 0, flags;
1784
e5d27181
BB
1785 if (!group)
1786 return -EINVAL;
1787
1788 mutex_lock(&group->reg_mutex);
7f5a08c7 1789
e5d27181 1790 hash_for_each(group->register_table, i, user, node) {
39d6d08b 1791 status = user->status;
7f5a08c7
BB
1792 flags = user->flags;
1793
1794 seq_printf(m, "%d:%s", user->index, EVENT_NAME(user));
1795
1796 if (flags != 0 || status != 0)
1797 seq_puts(m, " #");
1798
1799 if (status != 0) {
1800 seq_puts(m, " Used by");
1801 if (status & EVENT_STATUS_FTRACE)
1802 seq_puts(m, " ftrace");
1803 if (status & EVENT_STATUS_PERF)
1804 seq_puts(m, " perf");
1805 if (status & EVENT_STATUS_OTHER)
1806 seq_puts(m, " other");
1807 busy++;
1808 }
1809
7f5a08c7
BB
1810 seq_puts(m, "\n");
1811 active++;
1812 }
1813
e5d27181 1814 mutex_unlock(&group->reg_mutex);
7f5a08c7
BB
1815
1816 seq_puts(m, "\n");
1817 seq_printf(m, "Active: %d\n", active);
1818 seq_printf(m, "Busy: %d\n", busy);
1819 seq_printf(m, "Max: %ld\n", MAX_EVENTS);
1820
1821 return 0;
1822}
1823
1824static const struct seq_operations user_seq_ops = {
1825 .start = user_seq_start,
1826 .next = user_seq_next,
1827 .stop = user_seq_stop,
1828 .show = user_seq_show,
1829};
1830
1831static int user_status_open(struct inode *node, struct file *file)
1832{
e5d27181
BB
1833 struct user_event_group *group;
1834 int ret;
1835
1836 group = current_user_event_group();
1837
1838 if (!group)
1839 return -ENOENT;
1840
1841 ret = seq_open(file, &user_seq_ops);
1842
1843 if (!ret) {
1844 /* Chain group to seq_file */
1845 struct seq_file *m = file->private_data;
1846
1847 m->private = group;
1848 }
1849
1850 return ret;
7f5a08c7
BB
1851}
1852
1853static const struct file_operations user_status_fops = {
1854 .open = user_status_open,
1855 .mmap = user_status_mmap,
1856 .read = seq_read,
1857 .llseek = seq_lseek,
1858 .release = seq_release,
1859};
1860
1861/*
1862 * Creates a set of tracefs files to allow user mode interactions.
1863 */
1864static int create_user_tracefs(void)
1865{
1866 struct dentry *edata, *emmap;
1867
1868 edata = tracefs_create_file("user_events_data", TRACE_MODE_WRITE,
1869 NULL, NULL, &user_data_fops);
1870
1871 if (!edata) {
1872 pr_warn("Could not create tracefs 'user_events_data' entry\n");
1873 goto err;
1874 }
1875
1876 /* mmap with MAP_SHARED requires writable fd */
1877 emmap = tracefs_create_file("user_events_status", TRACE_MODE_WRITE,
1878 NULL, NULL, &user_status_fops);
1879
1880 if (!emmap) {
1881 tracefs_remove(edata);
1882 pr_warn("Could not create tracefs 'user_events_mmap' entry\n");
1883 goto err;
1884 }
1885
1886 return 0;
1887err:
1888 return -ENODEV;
1889}
1890
7f5a08c7
BB
1891static int __init trace_events_user_init(void)
1892{
1893 int ret;
1894
e5d27181 1895 init_group = user_event_group_create(&init_user_ns);
7f5a08c7 1896
e5d27181 1897 if (!init_group)
7f5a08c7 1898 return -ENOMEM;
7f5a08c7
BB
1899
1900 ret = create_user_tracefs();
1901
1902 if (ret) {
1903 pr_warn("user_events could not register with tracefs\n");
e5d27181
BB
1904 user_event_group_destroy(init_group);
1905 init_group = NULL;
7f5a08c7
BB
1906 return ret;
1907 }
1908
1909 if (dyn_event_register(&user_event_dops))
1910 pr_warn("user_events could not register with dyn_events\n");
1911
1912 return 0;
1913}
1914
1915fs_initcall(trace_events_user_init);