uprobes/perf: Avoid uprobe_apply() whenever possible
authorOleg Nesterov <oleg@redhat.com>
Mon, 4 Feb 2013 18:05:43 +0000 (19:05 +0100)
committerOleg Nesterov <oleg@redhat.com>
Fri, 8 Feb 2013 17:28:08 +0000 (18:28 +0100)
uprobe_perf_open/close call the costly uprobe_apply() every time,
we can avoid it if:

- "nr_systemwide != 0" is not changed.

- There is another process/thread with the same ->mm.

- copy_proccess() does inherit_event(). dup_mmap() preserves the
  inserted breakpoints.

- event->attr.enable_on_exec == T, we can rely on uprobe_mmap()
  called by exec/mmap paths.

- tp_target is exiting. Only _close() checks PF_EXITING, I don't
  think TRACE_REG_PERF_OPEN can hit the dying task too often.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
kernel/trace/trace_uprobe.c

index 2399f14165554ee2cbf2f3af4de08098cc548547..8dad2a92dee9c9fff297d62edfe72147a1b371e4 100644 (file)
@@ -680,30 +680,60 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
        return false;
 }
 
+static inline bool
+uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
+{
+       return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm);
+}
+
 static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
 {
+       bool done;
+
        write_lock(&tu->filter.rwlock);
-       if (event->hw.tp_target)
+       if (event->hw.tp_target) {
+               /*
+                * event->parent != NULL means copy_process(), we can avoid
+                * uprobe_apply(). current->mm must be probed and we can rely
+                * on dup_mmap() which preserves the already installed bp's.
+                *
+                * attr.enable_on_exec means that exec/mmap will install the
+                * breakpoints we need.
+                */
+               done = tu->filter.nr_systemwide ||
+                       event->parent || event->attr.enable_on_exec ||
+                       uprobe_filter_event(tu, event);
                list_add(&event->hw.tp_list, &tu->filter.perf_events);
-       else
+       } else {
+               done = tu->filter.nr_systemwide;
                tu->filter.nr_systemwide++;
+       }
        write_unlock(&tu->filter.rwlock);
 
-       uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
+       if (!done)
+               uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
 
        return 0;
 }
 
 static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
 {
+       bool done;
+
        write_lock(&tu->filter.rwlock);
-       if (event->hw.tp_target)
+       if (event->hw.tp_target) {
                list_del(&event->hw.tp_list);
-       else
+               done = tu->filter.nr_systemwide ||
+                       (event->hw.tp_target->flags & PF_EXITING) ||
+                       uprobe_filter_event(tu, event);
+       } else {
                tu->filter.nr_systemwide--;
+               done = tu->filter.nr_systemwide;
+       }
        write_unlock(&tu->filter.rwlock);
 
-       uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
+       if (!done)
+               uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
 
        return 0;
 }