perf/x86: Only allow rdpmc if a perf_event is mapped
authorAndy Lutomirski <luto@amacapital.net>
Fri, 24 Oct 2014 22:58:12 +0000 (15:58 -0700)
committerIngo Molnar <mingo@kernel.org>
Wed, 4 Feb 2015 11:10:47 +0000 (12:10 +0100)
We currently allow any process to use rdpmc.  This significantly
weakens the protection offered by PR_TSC_DISABLED, and it could be
helpful to users attempting to exploit timing attacks.

Since we can't enable access to individual counters, use a very
coarse heuristic to limit access to rdpmc: allow access only when
a perf_event is mmapped.  This protects seccomp sandboxes.

There is plenty of room to further tighen these restrictions.  For
example, this allows rdpmc for any x86_pmu event, but it's only
useful for self-monitoring tasks.

As a side effect, cap_user_rdpmc will now be false for AMD uncore
events.  This isn't a real regression, since .event_idx is disabled
for these events anyway for the time being.  Whenever that gets
re-added, the cap_user_rdpmc code can be adjusted or refactored
accordingly.

Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Vince Weaver <vince@deater.net>
Cc: "hillf.zj" <hillf.zj@alibaba-inc.com>
Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/a2bdb3cf3a1d70c26980d7c6dddfbaa69f3182bf.1414190806.git.luto@amacapital.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/mmu.h
arch/x86/include/asm/mmu_context.h
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h

index 876e74e8eec76696b5523210292e71e1c861d5f6..09b9620a73b4841893fc2bd01bfd614d43e7e214 100644 (file)
@@ -19,6 +19,8 @@ typedef struct {
 
        struct mutex lock;
        void __user *vdso;
+
+       atomic_t perf_rdpmc_allowed;    /* nonzero if rdpmc is allowed */
 } mm_context_t;
 
 #ifdef CONFIG_SMP
index 52c18359f1dcaf68e093007e17a30a8f71acee37..89c1fece224e7875d8d539c00846823a46df09a9 100644 (file)
@@ -18,6 +18,18 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
 }
 #endif /* !CONFIG_PARAVIRT */
 
+#ifdef CONFIG_PERF_EVENTS
+static inline void load_mm_cr4(struct mm_struct *mm)
+{
+       if (atomic_read(&mm->context.perf_rdpmc_allowed))
+               cr4_set_bits(X86_CR4_PCE);
+       else
+               cr4_clear_bits(X86_CR4_PCE);
+}
+#else
+static inline void load_mm_cr4(struct mm_struct *mm) {}
+#endif
+
 /*
  * Used for LDT copy/destruction.
  */
@@ -52,6 +64,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
                /* Stop flush ipis for the previous mm */
                cpumask_clear_cpu(cpu, mm_cpumask(prev));
 
+               /* Load per-mm CR4 state */
+               load_mm_cr4(next);
+
                /*
                 * Load the LDT, if the LDT is different.
                 *
@@ -87,6 +102,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
                         */
                        load_cr3(next->pgd);
                        trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+                       load_mm_cr4(next);
                        load_LDT_nolock(&next->context);
                }
        }
index 73e84a348de10f94170868c01fba69788de9307a..bec5cff7dc808254e52e4d0721d6aec7974d4395 100644 (file)
@@ -31,6 +31,7 @@
 #include <asm/nmi.h>
 #include <asm/smp.h>
 #include <asm/alternative.h>
+#include <asm/mmu_context.h>
 #include <asm/tlbflush.h>
 #include <asm/timer.h>
 #include <asm/desc.h>
@@ -1328,8 +1329,6 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
                break;
 
        case CPU_STARTING:
-               if (x86_pmu.attr_rdpmc)
-                       cr4_set_bits(X86_CR4_PCE);
                if (x86_pmu.cpu_starting)
                        x86_pmu.cpu_starting(cpu);
                break;
@@ -1805,14 +1804,44 @@ static int x86_pmu_event_init(struct perf_event *event)
                        event->destroy(event);
        }
 
+       if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
+               event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
+
        return err;
 }
 
+static void refresh_pce(void *ignored)
+{
+       if (current->mm)
+               load_mm_cr4(current->mm);
+}
+
+static void x86_pmu_event_mapped(struct perf_event *event)
+{
+       if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
+               return;
+
+       if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
+               on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
+}
+
+static void x86_pmu_event_unmapped(struct perf_event *event)
+{
+       if (!current->mm)
+               return;
+
+       if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
+               return;
+
+       if (atomic_dec_and_test(&current->mm->context.perf_rdpmc_allowed))
+               on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
+}
+
 static int x86_pmu_event_idx(struct perf_event *event)
 {
        int idx = event->hw.idx;
 
-       if (!x86_pmu.attr_rdpmc)
+       if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
                return 0;
 
        if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
@@ -1830,16 +1859,6 @@ static ssize_t get_attr_rdpmc(struct device *cdev,
        return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
 }
 
-static void change_rdpmc(void *info)
-{
-       bool enable = !!(unsigned long)info;
-
-       if (enable)
-               cr4_set_bits(X86_CR4_PCE);
-       else
-               cr4_clear_bits(X86_CR4_PCE);
-}
-
 static ssize_t set_attr_rdpmc(struct device *cdev,
                              struct device_attribute *attr,
                              const char *buf, size_t count)
@@ -1854,11 +1873,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
        if (x86_pmu.attr_rdpmc_broken)
                return -ENOTSUPP;
 
-       if (!!val != !!x86_pmu.attr_rdpmc) {
-               x86_pmu.attr_rdpmc = !!val;
-               on_each_cpu(change_rdpmc, (void *)val, 1);
-       }
-
+       x86_pmu.attr_rdpmc = !!val;
        return count;
 }
 
@@ -1901,6 +1916,9 @@ static struct pmu pmu = {
 
        .event_init             = x86_pmu_event_init,
 
+       .event_mapped           = x86_pmu_event_mapped,
+       .event_unmapped         = x86_pmu_event_unmapped,
+
        .add                    = x86_pmu_add,
        .del                    = x86_pmu_del,
        .start                  = x86_pmu_start,
@@ -1922,7 +1940,8 @@ void arch_perf_update_userpage(struct perf_event *event,
 
        userpg->cap_user_time = 0;
        userpg->cap_user_time_zero = 0;
-       userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
+       userpg->cap_user_rdpmc =
+               !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
        userpg->pmc_width = x86_pmu.cntval_bits;
 
        if (!sched_clock_stable())
index 4e6cdb0ddc70c86472649dc6e1beee3af93ddcae..df525d2be1e814766ac96a82ee3caf33cc4316e4 100644 (file)
@@ -71,6 +71,8 @@ struct event_constraint {
 #define PERF_X86_EVENT_COMMITTED       0x8 /* event passed commit_txn */
 #define PERF_X86_EVENT_PEBS_LD_HSW     0x10 /* haswell style datala, load */
 #define PERF_X86_EVENT_PEBS_NA_HSW     0x20 /* haswell style datala, unknown */
+#define PERF_X86_EVENT_RDPMC_ALLOWED   0x40 /* grant rdpmc permission */
+
 
 struct amd_nb {
        int nb_id;  /* NorthBridge id */