perf: Add cgroup support
[linux-2.6-block.git] / tools / perf / util / evsel.c
CommitLineData
f8a95309
ACM
1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
69aad6f1 10#include "evsel.h"
70082dd9 11#include "evlist.h"
69aad6f1 12#include "util.h"
86bd5e86 13#include "cpumap.h"
fd78260b 14#include "thread_map.h"
69aad6f1 15
c52b12ed
ACM
16#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
17
ef1d1af2
ACM
18void perf_evsel__init(struct perf_evsel *evsel,
19 struct perf_event_attr *attr, int idx)
20{
21 evsel->idx = idx;
22 evsel->attr = *attr;
23 INIT_LIST_HEAD(&evsel->node);
24}
25
23a2f3ab 26struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
69aad6f1
ACM
27{
28 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
29
ef1d1af2
ACM
30 if (evsel != NULL)
31 perf_evsel__init(evsel, attr, idx);
69aad6f1
ACM
32
33 return evsel;
34}
35
36int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
37{
38 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
39 return evsel->fd != NULL ? 0 : -ENOMEM;
40}
41
70db7533
ACM
42int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
43{
44 evsel->id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
45 return evsel->id != NULL ? 0 : -ENOMEM;
46}
47
c52b12ed
ACM
48int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
49{
50 evsel->counts = zalloc((sizeof(*evsel->counts) +
51 (ncpus * sizeof(struct perf_counts_values))));
52 return evsel->counts != NULL ? 0 : -ENOMEM;
53}
54
69aad6f1
ACM
55void perf_evsel__free_fd(struct perf_evsel *evsel)
56{
57 xyarray__delete(evsel->fd);
58 evsel->fd = NULL;
59}
60
70db7533
ACM
61void perf_evsel__free_id(struct perf_evsel *evsel)
62{
63 xyarray__delete(evsel->id);
64 evsel->id = NULL;
65}
66
c52b12ed
ACM
67void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
68{
69 int cpu, thread;
70
71 for (cpu = 0; cpu < ncpus; cpu++)
72 for (thread = 0; thread < nthreads; ++thread) {
73 close(FD(evsel, cpu, thread));
74 FD(evsel, cpu, thread) = -1;
75 }
76}
77
ef1d1af2 78void perf_evsel__exit(struct perf_evsel *evsel)
69aad6f1
ACM
79{
80 assert(list_empty(&evsel->node));
81 xyarray__delete(evsel->fd);
70db7533 82 xyarray__delete(evsel->id);
ef1d1af2
ACM
83}
84
85void perf_evsel__delete(struct perf_evsel *evsel)
86{
87 perf_evsel__exit(evsel);
69aad6f1
ACM
88 free(evsel);
89}
c52b12ed
ACM
90
91int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
92 int cpu, int thread, bool scale)
93{
94 struct perf_counts_values count;
95 size_t nv = scale ? 3 : 1;
96
97 if (FD(evsel, cpu, thread) < 0)
98 return -EINVAL;
99
4eed11d5
ACM
100 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
101 return -ENOMEM;
102
c52b12ed
ACM
103 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
104 return -errno;
105
106 if (scale) {
107 if (count.run == 0)
108 count.val = 0;
109 else if (count.run < count.ena)
110 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
111 } else
112 count.ena = count.run = 0;
113
114 evsel->counts->cpu[cpu] = count;
115 return 0;
116}
117
118int __perf_evsel__read(struct perf_evsel *evsel,
119 int ncpus, int nthreads, bool scale)
120{
121 size_t nv = scale ? 3 : 1;
122 int cpu, thread;
123 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
124
52bcd994 125 aggr->val = aggr->ena = aggr->run = 0;
c52b12ed
ACM
126
127 for (cpu = 0; cpu < ncpus; cpu++) {
128 for (thread = 0; thread < nthreads; thread++) {
129 if (FD(evsel, cpu, thread) < 0)
130 continue;
131
132 if (readn(FD(evsel, cpu, thread),
133 &count, nv * sizeof(u64)) < 0)
134 return -errno;
135
136 aggr->val += count.val;
137 if (scale) {
138 aggr->ena += count.ena;
139 aggr->run += count.run;
140 }
141 }
142 }
143
144 evsel->counts->scaled = 0;
145 if (scale) {
146 if (aggr->run == 0) {
147 evsel->counts->scaled = -1;
148 aggr->val = 0;
149 return 0;
150 }
151
152 if (aggr->run < aggr->ena) {
153 evsel->counts->scaled = 1;
154 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
155 }
156 } else
157 aggr->ena = aggr->run = 0;
158
159 return 0;
160}
48290609 161
0252208e 162static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
9d04f178 163 struct thread_map *threads, bool group, bool inherit)
48290609 164{
0252208e 165 int cpu, thread;
48290609 166
0252208e
ACM
167 if (evsel->fd == NULL &&
168 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
4eed11d5
ACM
169 return -1;
170
86bd5e86 171 for (cpu = 0; cpu < cpus->nr; cpu++) {
f08199d3
ACM
172 int group_fd = -1;
173
9d04f178
ACM
174 evsel->attr.inherit = (cpus->map[cpu] < 0) && inherit;
175
0252208e
ACM
176 for (thread = 0; thread < threads->nr; thread++) {
177 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
178 threads->map[thread],
f08199d3
ACM
179 cpus->map[cpu],
180 group_fd, 0);
0252208e
ACM
181 if (FD(evsel, cpu, thread) < 0)
182 goto out_close;
f08199d3
ACM
183
184 if (group && group_fd == -1)
185 group_fd = FD(evsel, cpu, thread);
0252208e 186 }
48290609
ACM
187 }
188
189 return 0;
190
191out_close:
0252208e
ACM
192 do {
193 while (--thread >= 0) {
194 close(FD(evsel, cpu, thread));
195 FD(evsel, cpu, thread) = -1;
196 }
197 thread = threads->nr;
198 } while (--cpu >= 0);
48290609
ACM
199 return -1;
200}
201
0252208e
ACM
202static struct {
203 struct cpu_map map;
204 int cpus[1];
205} empty_cpu_map = {
206 .map.nr = 1,
207 .cpus = { -1, },
208};
209
210static struct {
211 struct thread_map map;
212 int threads[1];
213} empty_thread_map = {
214 .map.nr = 1,
215 .threads = { -1, },
216};
217
f08199d3 218int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
9d04f178 219 struct thread_map *threads, bool group, bool inherit)
48290609 220{
0252208e
ACM
221 if (cpus == NULL) {
222 /* Work around old compiler warnings about strict aliasing */
223 cpus = &empty_cpu_map.map;
48290609
ACM
224 }
225
0252208e
ACM
226 if (threads == NULL)
227 threads = &empty_thread_map.map;
48290609 228
9d04f178 229 return __perf_evsel__open(evsel, cpus, threads, group, inherit);
48290609
ACM
230}
231
f08199d3 232int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
9d04f178 233 struct cpu_map *cpus, bool group, bool inherit)
48290609 234{
9d04f178 235 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, inherit);
0252208e 236}
48290609 237
f08199d3 238int perf_evsel__open_per_thread(struct perf_evsel *evsel,
9d04f178 239 struct thread_map *threads, bool group, bool inherit)
0252208e 240{
9d04f178 241 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit);
48290609 242}
70082dd9 243
8115d60c
ACM
244static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
245 struct perf_sample *sample)
d0dd74e8
ACM
246{
247 const u64 *array = event->sample.array;
248
249 array += ((event->header.size -
250 sizeof(event->header)) / sizeof(u64)) - 1;
251
252 if (type & PERF_SAMPLE_CPU) {
253 u32 *p = (u32 *)array;
254 sample->cpu = *p;
255 array--;
256 }
257
258 if (type & PERF_SAMPLE_STREAM_ID) {
259 sample->stream_id = *array;
260 array--;
261 }
262
263 if (type & PERF_SAMPLE_ID) {
264 sample->id = *array;
265 array--;
266 }
267
268 if (type & PERF_SAMPLE_TIME) {
269 sample->time = *array;
270 array--;
271 }
272
273 if (type & PERF_SAMPLE_TID) {
274 u32 *p = (u32 *)array;
275 sample->pid = p[0];
276 sample->tid = p[1];
277 }
278
279 return 0;
280}
281
8115d60c
ACM
282int perf_event__parse_sample(const union perf_event *event, u64 type,
283 bool sample_id_all, struct perf_sample *data)
d0dd74e8
ACM
284{
285 const u64 *array;
286
287 data->cpu = data->pid = data->tid = -1;
288 data->stream_id = data->id = data->time = -1ULL;
289
290 if (event->header.type != PERF_RECORD_SAMPLE) {
291 if (!sample_id_all)
292 return 0;
8115d60c 293 return perf_event__parse_id_sample(event, type, data);
d0dd74e8
ACM
294 }
295
296 array = event->sample.array;
297
298 if (type & PERF_SAMPLE_IP) {
299 data->ip = event->ip.ip;
300 array++;
301 }
302
303 if (type & PERF_SAMPLE_TID) {
304 u32 *p = (u32 *)array;
305 data->pid = p[0];
306 data->tid = p[1];
307 array++;
308 }
309
310 if (type & PERF_SAMPLE_TIME) {
311 data->time = *array;
312 array++;
313 }
314
315 if (type & PERF_SAMPLE_ADDR) {
316 data->addr = *array;
317 array++;
318 }
319
320 data->id = -1ULL;
321 if (type & PERF_SAMPLE_ID) {
322 data->id = *array;
323 array++;
324 }
325
326 if (type & PERF_SAMPLE_STREAM_ID) {
327 data->stream_id = *array;
328 array++;
329 }
330
331 if (type & PERF_SAMPLE_CPU) {
332 u32 *p = (u32 *)array;
333 data->cpu = *p;
334 array++;
335 }
336
337 if (type & PERF_SAMPLE_PERIOD) {
338 data->period = *array;
339 array++;
340 }
341
342 if (type & PERF_SAMPLE_READ) {
343 fprintf(stderr, "PERF_SAMPLE_READ is unsuported for now\n");
344 return -1;
345 }
346
347 if (type & PERF_SAMPLE_CALLCHAIN) {
348 data->callchain = (struct ip_callchain *)array;
349 array += 1 + data->callchain->nr;
350 }
351
352 if (type & PERF_SAMPLE_RAW) {
353 u32 *p = (u32 *)array;
354 data->raw_size = *p;
355 p++;
356 data->raw_data = p;
357 }
358
359 return 0;
360}