Merge remote-tracking branch 'tip/perf/core' into perf/urgent
authorArnaldo Carvalho de Melo <acme@redhat.com>
Mon, 8 Jul 2019 16:06:57 +0000 (13:06 -0300)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Mon, 8 Jul 2019 16:06:57 +0000 (13:06 -0300)
To pick up fixes.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/perf/tests/shell/record+probe_libc_inet_pton.sh
tools/perf/util/evsel.c
tools/perf/util/header.c
tools/perf/util/machine.c
tools/perf/util/thread.c

index 7b7ac0f6cec9e87c93abbaa2ddffea565c3302b0..d819a3e8b552b47bd3019eeef6c39ccc4500a6c9 100644 (file)
@@ -260,6 +260,13 @@ struct kvm_vcpu_events {
         KVM_REG_SIZE_U256 |                                            \
         ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
 
+/*
+ * Register values for KVM_REG_ARM64_SVE_ZREG(), KVM_REG_ARM64_SVE_PREG() and
+ * KVM_REG_ARM64_SVE_FFR() are represented in memory in an endianness-
+ * invariant layout which differs from the layout used for the FPSIMD
+ * V-registers on big-endian systems: see sigcontext.h for more explanation.
+ */
+
 #define KVM_ARM64_SVE_VQ_MIN __SVE_VQ_MIN
 #define KVM_ARM64_SVE_VQ_MAX __SVE_VQ_MAX
 
index 24a8cd229df6897975528d6097bca80d94204eee..d6ab5b4d15e543800a7a7524517b495fa6305074 100644 (file)
@@ -383,6 +383,9 @@ struct kvm_sync_regs {
 #define KVM_X86_QUIRK_LAPIC_MMIO_HOLE  (1 << 2)
 #define KVM_X86_QUIRK_OUT_7E_INC_RIP   (1 << 3)
 
+#define KVM_STATE_NESTED_FORMAT_VMX    0
+#define KVM_STATE_NESTED_FORMAT_SVM    1       /* unused */
+
 #define KVM_STATE_NESTED_GUEST_MODE    0x00000001
 #define KVM_STATE_NESTED_RUN_PENDING   0x00000002
 #define KVM_STATE_NESTED_EVMCS         0x00000004
@@ -390,7 +393,14 @@ struct kvm_sync_regs {
 #define KVM_STATE_NESTED_SMM_GUEST_MODE        0x00000001
 #define KVM_STATE_NESTED_SMM_VMXON     0x00000002
 
-struct kvm_vmx_nested_state {
+#define KVM_STATE_NESTED_VMX_VMCS_SIZE 0x1000
+
+struct kvm_vmx_nested_state_data {
+       __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
+       __u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
+};
+
+struct kvm_vmx_nested_state_hdr {
        __u64 vmxon_pa;
        __u64 vmcs12_pa;
 
@@ -401,24 +411,25 @@ struct kvm_vmx_nested_state {
 
 /* for KVM_CAP_NESTED_STATE */
 struct kvm_nested_state {
-       /* KVM_STATE_* flags */
        __u16 flags;
-
-       /* 0 for VMX, 1 for SVM.  */
        __u16 format;
-
-       /* 128 for SVM, 128 + VMCS size for VMX.  */
        __u32 size;
 
        union {
-               /* VMXON, VMCS */
-               struct kvm_vmx_nested_state vmx;
+               struct kvm_vmx_nested_state_hdr vmx;
 
                /* Pad the header to 128 bytes.  */
                __u8 pad[120];
-       };
+       } hdr;
 
-       __u8 data[0];
+       /*
+        * Define data region as 0 bytes to preserve backwards-compatability
+        * to old definition of kvm_nested_state in order to avoid changing
+        * KVM_{GET,PUT}_NESTED_STATE ioctl values.
+        */
+       union {
+               struct kvm_vmx_nested_state_data vmx[0];
+       } data;
 };
 
 #endif /* _ASM_X86_KVM_H */
index 9b7632ff70aa370efa5d9ce9ba17bd95d50c4d72..f12a4e2179681bcaceaa8aaaf3c1332d4e1f7266 100755 (executable)
@@ -45,7 +45,7 @@ trace_libc_inet_pton_backtrace() {
                eventattr='max-stack=4'
                echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
                echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
-               echo ".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
+               echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
                ;;
        *)
                eventattr='max-stack=3'
index 5ab31a4a658dd4ce696baada7027dbae7679abd5..7fb4ae82f34cef4ea0ca7dd5dcda188445fc2849 100644 (file)
@@ -1800,14 +1800,8 @@ static int perf_event_open(struct perf_evsel *evsel,
                if (fd >= 0)
                        break;
 
-               /*
-                * Do quick precise_ip fallback if:
-                *  - there is precise_ip set in perf_event_attr
-                *  - maximum precise is requested
-                *  - sys_perf_event_open failed with ENOTSUP error,
-                *    which is associated with wrong precise_ip
-                */
-               if (!precise_ip || !evsel->precise_max || (errno != ENOTSUP))
+               /* Do not try less precise if not requested. */
+               if (!evsel->precise_max)
                        break;
 
                /*
index bf26dc85eaaa3866c792fe4ef52e9d634d91d633..6a93ff5d8db5cd062c9dbfb89e2f7afaf8d43789 100644 (file)
@@ -3683,6 +3683,7 @@ int perf_event__synthesize_features(struct perf_tool *tool,
                return -ENOMEM;
 
        ff.size = sz - sz_hdr;
+       ff.ph = &session->header;
 
        for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
                if (!feat_ops[feat].synthesize) {
index 1b3d7265bca96b1cfdb7fd6c55ef1144b683663a..147ed85ea2bc2619f55eab46d5905dd898ede9be 100644 (file)
@@ -210,6 +210,18 @@ void machine__exit(struct machine *machine)
 
        for (i = 0; i < THREADS__TABLE_SIZE; i++) {
                struct threads *threads = &machine->threads[i];
+               struct thread *thread, *n;
+               /*
+                * Forget about the dead, at this point whatever threads were
+                * left in the dead lists better have a reference count taken
+                * by who is using them, and then, when they drop those references
+                * and it finally hits zero, thread__put() will check and see that
+                * its not in the dead threads list and will not try to remove it
+                * from there, just calling thread__delete() straight away.
+                */
+               list_for_each_entry_safe(thread, n, &threads->dead, node)
+                       list_del_init(&thread->node);
+
                exit_rwsem(&threads->lock);
        }
 }
@@ -1759,9 +1771,11 @@ static void __machine__remove_thread(struct machine *machine, struct thread *th,
        if (threads->last_match == th)
                threads__set_last_match(threads, NULL);
 
-       BUG_ON(refcount_read(&th->refcnt) == 0);
        if (lock)
                down_write(&threads->lock);
+
+       BUG_ON(refcount_read(&th->refcnt) == 0);
+
        rb_erase_cached(&th->rb_node, &threads->entries);
        RB_CLEAR_NODE(&th->rb_node);
        --threads->nr;
@@ -1771,9 +1785,16 @@ static void __machine__remove_thread(struct machine *machine, struct thread *th,
         * will be called and we will remove it from the dead_threads list.
         */
        list_add_tail(&th->node, &threads->dead);
+
+       /*
+        * We need to do the put here because if this is the last refcount,
+        * then we will be touching the threads->dead head when removing the
+        * thread.
+        */
+       thread__put(th);
+
        if (lock)
                up_write(&threads->lock);
-       thread__put(th);
 }
 
 void machine__remove_thread(struct machine *machine, struct thread *th)
index aab7807d445ffbcc14f275f34a8eaf1918208224..3e29a4e8b5e6645097c805d3dee4e343cb83aca2 100644 (file)
@@ -125,10 +125,27 @@ void thread__put(struct thread *thread)
 {
        if (thread && refcount_dec_and_test(&thread->refcnt)) {
                /*
-                * Remove it from the dead_threads list, as last reference
-                * is gone.
+                * Remove it from the dead threads list, as last reference is
+                * gone, if it is in a dead threads list.
+                *
+                * We may not be there anymore if say, the machine where it was
+                * stored was already deleted, so we already removed it from
+                * the dead threads and some other piece of code still keeps a
+                * reference.
+                *
+                * This is what 'perf sched' does and finally drops it in
+                * perf_sched__lat(), where it calls perf_sched__read_events(),
+                * that processes the events by creating a session and deleting
+                * it, which ends up destroying the list heads for the dead
+                * threads, but before it does that it removes all threads from
+                * it using list_del_init().
+                *
+                * So we need to check here if it is in a dead threads list and
+                * if so, remove it before finally deleting the thread, to avoid
+                * an use after free situation.
                 */
-               list_del_init(&thread->node);
+               if (!list_empty(&thread->node))
+                       list_del_init(&thread->node);
                thread__delete(thread);
        }
 }