perf tools: Move event__parse_sample to evsel.c
[linux-block.git] / tools / perf / util / evsel.h
CommitLineData
69aad6f1
ACM
1#ifndef __PERF_EVSEL_H
2#define __PERF_EVSEL_H 1
3
4#include <linux/list.h>
c52b12ed 5#include <stdbool.h>
d030260a 6#include "../../../include/linux/perf_event.h"
69aad6f1
ACM
7#include "types.h"
8#include "xyarray.h"
c52b12ed
ACM
9
10struct perf_counts_values {
11 union {
12 struct {
13 u64 val;
14 u64 ena;
15 u64 run;
16 };
17 u64 values[3];
18 };
19};
20
21struct perf_counts {
22 s8 scaled;
23 struct perf_counts_values aggr;
24 struct perf_counts_values cpu[];
25};
69aad6f1 26
70db7533
ACM
27struct perf_evsel;
28
29/*
30 * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
31 * more than one entry in the evlist.
32 */
33struct perf_sample_id {
34 struct hlist_node node;
35 u64 id;
36 struct perf_evsel *evsel;
37};
38
69aad6f1
ACM
39struct perf_evsel {
40 struct list_head node;
41 struct perf_event_attr attr;
42 char *filter;
43 struct xyarray *fd;
70db7533 44 struct xyarray *id;
c52b12ed 45 struct perf_counts *counts;
69aad6f1
ACM
46 int idx;
47 void *priv;
48};
49
86bd5e86
ACM
50struct cpu_map;
51struct thread_map;
70082dd9 52struct perf_evlist;
86bd5e86 53
23a2f3ab 54struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
69aad6f1
ACM
55void perf_evsel__delete(struct perf_evsel *evsel);
56
57int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
70db7533 58int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
c52b12ed 59int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
70db7533 60int perf_evlist__alloc_mmap(struct perf_evlist *evlist, int ncpus);
69aad6f1 61void perf_evsel__free_fd(struct perf_evsel *evsel);
70db7533 62void perf_evsel__free_id(struct perf_evsel *evsel);
c52b12ed 63void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
69aad6f1 64
f08199d3 65int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
9d04f178 66 struct cpu_map *cpus, bool group, bool inherit);
f08199d3 67int perf_evsel__open_per_thread(struct perf_evsel *evsel,
9d04f178 68 struct thread_map *threads, bool group, bool inherit);
f08199d3 69int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
9d04f178 70 struct thread_map *threads, bool group, bool inherit);
70db7533
ACM
71int perf_evlist__mmap(struct perf_evlist *evlist, struct cpu_map *cpus,
72 struct thread_map *threads, int pages, bool overwrite);
73void perf_evlist__munmap(struct perf_evlist *evlist, int ncpus);
48290609 74
daec78a0
ACM
75#define perf_evsel__match(evsel, t, c) \
76 (evsel->attr.type == PERF_TYPE_##t && \
77 evsel->attr.config == PERF_COUNT_##c)
78
c52b12ed
ACM
79int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
80 int cpu, int thread, bool scale);
81
82/**
83 * perf_evsel__read_on_cpu - Read out the results on a CPU and thread
84 *
85 * @evsel - event selector to read value
86 * @cpu - CPU of interest
87 * @thread - thread of interest
88 */
89static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
90 int cpu, int thread)
91{
92 return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
93}
94
95/**
96 * perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
97 *
98 * @evsel - event selector to read value
99 * @cpu - CPU of interest
100 * @thread - thread of interest
101 */
102static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
103 int cpu, int thread)
104{
105 return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
106}
107
108int __perf_evsel__read(struct perf_evsel *evsel, int ncpus, int nthreads,
109 bool scale);
110
111/**
112 * perf_evsel__read - Read the aggregate results on all CPUs
113 *
114 * @evsel - event selector to read value
115 * @ncpus - Number of cpus affected, from zero
116 * @nthreads - Number of threads affected, from zero
117 */
118static inline int perf_evsel__read(struct perf_evsel *evsel,
119 int ncpus, int nthreads)
120{
121 return __perf_evsel__read(evsel, ncpus, nthreads, false);
122}
123
124/**
125 * perf_evsel__read_scaled - Read the aggregate results on all CPUs, scaled
126 *
127 * @evsel - event selector to read value
128 * @ncpus - Number of cpus affected, from zero
129 * @nthreads - Number of threads affected, from zero
130 */
131static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
132 int ncpus, int nthreads)
133{
134 return __perf_evsel__read(evsel, ncpus, nthreads, true);
135}
136
69aad6f1 137#endif /* __PERF_EVSEL_H */