sched/headers: Prepare for new header dependencies before moving code to <linux/sched...
[linux-2.6-block.git] / kernel / events / core.c
index 77a932b54a64fbeb2640b35c1cc4c096994bf1d7..6f41548f2e320a98182f4fe4b10700bcab7e6b86 100644 (file)
@@ -46,6 +46,8 @@
 #include <linux/filter.h>
 #include <linux/namei.h>
 #include <linux/parser.h>
+#include <linux/sched/clock.h>
+#include <linux/sched/mm.h>
 
 #include "internal.h"
 
@@ -455,7 +457,7 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
                                void __user *buffer, size_t *lenp,
                                loff_t *ppos)
 {
-       int ret = proc_dointvec(table, write, buffer, lenp, ppos);
+       int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 
        if (ret || !write)
                return ret;
@@ -3522,6 +3524,8 @@ static void perf_event_enable_on_exec(int ctxn)
        if (enabled) {
                clone_ctx = unclone_ctx(ctx);
                ctx_resched(cpuctx, ctx, event_type);
+       } else {
+               ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
        }
        perf_ctx_unlock(cpuctx, ctx);
 
@@ -4925,9 +4929,9 @@ unlock:
        rcu_read_unlock();
 }
 
-static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int perf_mmap_fault(struct vm_fault *vmf)
 {
-       struct perf_event *event = vma->vm_file->private_data;
+       struct perf_event *event = vmf->vma->vm_file->private_data;
        struct ring_buffer *rb;
        int ret = VM_FAULT_SIGBUS;
 
@@ -4950,7 +4954,7 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                goto unlock;
 
        get_page(vmf->page);
-       vmf->page->mapping = vma->vm_file->f_mapping;
+       vmf->page->mapping = vmf->vma->vm_file->f_mapping;
        vmf->page->index   = vmf->pgoff;
 
        ret = 0;
@@ -9955,6 +9959,7 @@ SYSCALL_DEFINE5(perf_event_open,
                 * of swizzling perf_event::ctx.
                 */
                perf_remove_from_context(group_leader, 0);
+               put_ctx(gctx);
 
                list_for_each_entry(sibling, &group_leader->sibling_list,
                                    group_entry) {
@@ -9993,13 +9998,6 @@ SYSCALL_DEFINE5(perf_event_open,
                perf_event__state_init(group_leader);
                perf_install_in_context(ctx, group_leader, group_leader->cpu);
                get_ctx(ctx);
-
-               /*
-                * Now that all events are installed in @ctx, nothing
-                * references @gctx anymore, so drop the last reference we have
-                * on it.
-                */
-               put_ctx(gctx);
        }
 
        /*
@@ -10959,5 +10957,11 @@ struct cgroup_subsys perf_event_cgrp_subsys = {
        .css_alloc      = perf_cgroup_css_alloc,
        .css_free       = perf_cgroup_css_free,
        .attach         = perf_cgroup_attach,
+       /*
+        * Implicitly enable on dfl hierarchy so that perf events can
+        * always be filtered by cgroup2 path as long as perf_event
+        * controller is not mounted on a legacy hierarchy.
+        */
+       .implicit_on_dfl = true,
 };
 #endif /* CONFIG_CGROUP_PERF */