perf session: Remove perf_session from dump_event
[linux-2.6-block.git] / tools / perf / util / ordered-events.c
CommitLineData
5f86b80b 1#include <linux/list.h>
cee3ab9c 2#include <linux/compiler.h>
54bf53b1 3#include <linux/string.h>
5f86b80b
JO
4#include "ordered-events.h"
5#include "evlist.h"
6#include "session.h"
7#include "asm/bug.h"
8#include "debug.h"
9
cee3ab9c
JO
10#define pr_N(n, fmt, ...) \
11 eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
12
13#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
14
5f86b80b
JO
15static void queue_event(struct ordered_events *oe, struct ordered_event *new)
16{
17 struct ordered_event *last = oe->last;
18 u64 timestamp = new->timestamp;
19 struct list_head *p;
20
21 ++oe->nr_events;
22 oe->last = new;
23
cee3ab9c
JO
24 pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
25
5f86b80b
JO
26 if (!last) {
27 list_add(&new->list, &oe->events);
28 oe->max_timestamp = timestamp;
29 return;
30 }
31
32 /*
33 * last event might point to some random place in the list as it's
34 * the last queued event. We expect that the new event is close to
35 * this.
36 */
37 if (last->timestamp <= timestamp) {
38 while (last->timestamp <= timestamp) {
39 p = last->list.next;
40 if (p == &oe->events) {
41 list_add_tail(&new->list, &oe->events);
42 oe->max_timestamp = timestamp;
43 return;
44 }
45 last = list_entry(p, struct ordered_event, list);
46 }
47 list_add_tail(&new->list, &last->list);
48 } else {
49 while (last->timestamp > timestamp) {
50 p = last->list.prev;
51 if (p == &oe->events) {
52 list_add(&new->list, &oe->events);
53 return;
54 }
55 last = list_entry(p, struct ordered_event, list);
56 }
57 list_add(&new->list, &last->list);
58 }
59}
60
54bf53b1
AY
61static union perf_event *__dup_event(struct ordered_events *oe,
62 union perf_event *event)
63{
64 union perf_event *new_event = NULL;
65
66 if (oe->cur_alloc_size < oe->max_alloc_size) {
67 new_event = memdup(event, event->header.size);
68 if (new_event)
69 oe->cur_alloc_size += event->header.size;
70 }
71
72 return new_event;
73}
74
75static union perf_event *dup_event(struct ordered_events *oe,
76 union perf_event *event)
77{
78 return oe->copy_on_queue ? __dup_event(oe, event) : event;
79}
80
81static void free_dup_event(struct ordered_events *oe, union perf_event *event)
82{
83 if (oe->copy_on_queue) {
84 oe->cur_alloc_size -= event->header.size;
85 free(event);
86 }
87}
88
5f86b80b 89#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
54bf53b1
AY
90static struct ordered_event *alloc_event(struct ordered_events *oe,
91 union perf_event *event)
5f86b80b
JO
92{
93 struct list_head *cache = &oe->cache;
94 struct ordered_event *new = NULL;
54bf53b1
AY
95 union perf_event *new_event;
96
97 new_event = dup_event(oe, event);
98 if (!new_event)
99 return NULL;
5f86b80b
JO
100
101 if (!list_empty(cache)) {
102 new = list_entry(cache->next, struct ordered_event, list);
103 list_del(&new->list);
104 } else if (oe->buffer) {
105 new = oe->buffer + oe->buffer_idx;
106 if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
107 oe->buffer = NULL;
108 } else if (oe->cur_alloc_size < oe->max_alloc_size) {
109 size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
110
111 oe->buffer = malloc(size);
54bf53b1
AY
112 if (!oe->buffer) {
113 free_dup_event(oe, new_event);
5f86b80b 114 return NULL;
54bf53b1 115 }
5f86b80b 116
cee3ab9c
JO
117 pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
118 oe->cur_alloc_size, size, oe->max_alloc_size);
119
5f86b80b
JO
120 oe->cur_alloc_size += size;
121 list_add(&oe->buffer->list, &oe->to_free);
122
123 /* First entry is abused to maintain the to_free list. */
124 oe->buffer_idx = 2;
125 new = oe->buffer + 1;
cee3ab9c
JO
126 } else {
127 pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
5f86b80b
JO
128 }
129
54bf53b1 130 new->event = new_event;
5f86b80b
JO
131 return new;
132}
133
134struct ordered_event *
54bf53b1
AY
135ordered_events__new(struct ordered_events *oe, u64 timestamp,
136 union perf_event *event)
5f86b80b
JO
137{
138 struct ordered_event *new;
139
54bf53b1 140 new = alloc_event(oe, event);
5f86b80b
JO
141 if (new) {
142 new->timestamp = timestamp;
143 queue_event(oe, new);
144 }
145
146 return new;
147}
148
149void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
150{
fa4e5c67 151 list_move(&event->list, &oe->cache);
5f86b80b 152 oe->nr_events--;
54bf53b1 153 free_dup_event(oe, event->event);
5f86b80b
JO
154}
155
156static int __ordered_events__flush(struct perf_session *s,
157 struct perf_tool *tool)
158{
159 struct ordered_events *oe = &s->ordered_events;
160 struct list_head *head = &oe->events;
161 struct ordered_event *tmp, *iter;
162 struct perf_sample sample;
163 u64 limit = oe->next_flush;
164 u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
165 bool show_progress = limit == ULLONG_MAX;
166 struct ui_progress prog;
167 int ret;
168
169 if (!tool->ordered_events || !limit)
170 return 0;
171
172 if (show_progress)
173 ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
174
175 list_for_each_entry_safe(iter, tmp, head, list) {
176 if (session_done())
177 return 0;
178
179 if (iter->timestamp > limit)
180 break;
181
182 ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
183 if (ret)
184 pr_err("Can't parse sample, err = %d\n", ret);
185 else {
186 ret = perf_session__deliver_event(s, iter->event, &sample, tool,
187 iter->file_offset);
188 if (ret)
189 return ret;
190 }
191
192 ordered_events__delete(oe, iter);
193 oe->last_flush = iter->timestamp;
194
195 if (show_progress)
196 ui_progress__update(&prog, 1);
197 }
198
199 if (list_empty(head))
200 oe->last = NULL;
201 else if (last_ts <= limit)
202 oe->last = list_entry(head->prev, struct ordered_event, list);
203
204 return 0;
205}
206
207int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
208 enum oe_flush how)
209{
210 struct ordered_events *oe = &s->ordered_events;
cee3ab9c 211 static const char * const str[] = {
b0a45203 212 "NONE",
cee3ab9c
JO
213 "FINAL",
214 "ROUND",
215 "HALF ",
216 };
5f86b80b
JO
217 int err;
218
219 switch (how) {
220 case OE_FLUSH__FINAL:
221 oe->next_flush = ULLONG_MAX;
222 break;
223
224 case OE_FLUSH__HALF:
225 {
226 struct ordered_event *first, *last;
227 struct list_head *head = &oe->events;
228
229 first = list_entry(head->next, struct ordered_event, list);
230 last = oe->last;
231
232 /* Warn if we are called before any event got allocated. */
233 if (WARN_ONCE(!last || list_empty(head), "empty queue"))
234 return 0;
235
236 oe->next_flush = first->timestamp;
237 oe->next_flush += (last->timestamp - first->timestamp) / 2;
238 break;
239 }
240
241 case OE_FLUSH__ROUND:
b0a45203 242 case OE_FLUSH__NONE:
5f86b80b
JO
243 default:
244 break;
245 };
246
cee3ab9c
JO
247 pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
248 str[how], oe->nr_events);
249 pr_oe_time(oe->max_timestamp, "max_timestamp\n");
250
5f86b80b
JO
251 err = __ordered_events__flush(s, tool);
252
253 if (!err) {
254 if (how == OE_FLUSH__ROUND)
255 oe->next_flush = oe->max_timestamp;
b0a45203
JO
256
257 oe->last_flush_type = how;
5f86b80b
JO
258 }
259
cee3ab9c
JO
260 pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
261 str[how], oe->nr_events);
262 pr_oe_time(oe->last_flush, "last_flush\n");
263
5f86b80b
JO
264 return err;
265}
36522f5c
JO
266
267void ordered_events__init(struct ordered_events *oe)
268{
269 INIT_LIST_HEAD(&oe->events);
270 INIT_LIST_HEAD(&oe->cache);
271 INIT_LIST_HEAD(&oe->to_free);
272 oe->max_alloc_size = (u64) -1;
273 oe->cur_alloc_size = 0;
274}
adc56ed1
JO
275
276void ordered_events__free(struct ordered_events *oe)
277{
278 while (!list_empty(&oe->to_free)) {
279 struct ordered_event *event;
280
281 event = list_entry(oe->to_free.next, struct ordered_event, list);
282 list_del(&event->list);
54bf53b1 283 free_dup_event(oe, event->event);
adc56ed1
JO
284 free(event);
285 }
286}