perf tools: Add report.queue-size config file option
[linux-2.6-block.git] / tools / perf / util / ordered-events.c
CommitLineData
5f86b80b
JO
1#include <linux/list.h>
2#include "ordered-events.h"
3#include "evlist.h"
4#include "session.h"
5#include "asm/bug.h"
6#include "debug.h"
7
8static void queue_event(struct ordered_events *oe, struct ordered_event *new)
9{
10 struct ordered_event *last = oe->last;
11 u64 timestamp = new->timestamp;
12 struct list_head *p;
13
14 ++oe->nr_events;
15 oe->last = new;
16
17 if (!last) {
18 list_add(&new->list, &oe->events);
19 oe->max_timestamp = timestamp;
20 return;
21 }
22
23 /*
24 * last event might point to some random place in the list as it's
25 * the last queued event. We expect that the new event is close to
26 * this.
27 */
28 if (last->timestamp <= timestamp) {
29 while (last->timestamp <= timestamp) {
30 p = last->list.next;
31 if (p == &oe->events) {
32 list_add_tail(&new->list, &oe->events);
33 oe->max_timestamp = timestamp;
34 return;
35 }
36 last = list_entry(p, struct ordered_event, list);
37 }
38 list_add_tail(&new->list, &last->list);
39 } else {
40 while (last->timestamp > timestamp) {
41 p = last->list.prev;
42 if (p == &oe->events) {
43 list_add(&new->list, &oe->events);
44 return;
45 }
46 last = list_entry(p, struct ordered_event, list);
47 }
48 list_add(&new->list, &last->list);
49 }
50}
51
52#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
53static struct ordered_event *alloc_event(struct ordered_events *oe)
54{
55 struct list_head *cache = &oe->cache;
56 struct ordered_event *new = NULL;
57
58 if (!list_empty(cache)) {
59 new = list_entry(cache->next, struct ordered_event, list);
60 list_del(&new->list);
61 } else if (oe->buffer) {
62 new = oe->buffer + oe->buffer_idx;
63 if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
64 oe->buffer = NULL;
65 } else if (oe->cur_alloc_size < oe->max_alloc_size) {
66 size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
67
68 oe->buffer = malloc(size);
69 if (!oe->buffer)
70 return NULL;
71
72 oe->cur_alloc_size += size;
73 list_add(&oe->buffer->list, &oe->to_free);
74
75 /* First entry is abused to maintain the to_free list. */
76 oe->buffer_idx = 2;
77 new = oe->buffer + 1;
78 }
79
80 return new;
81}
82
83struct ordered_event *
84ordered_events__new(struct ordered_events *oe, u64 timestamp)
85{
86 struct ordered_event *new;
87
88 new = alloc_event(oe);
89 if (new) {
90 new->timestamp = timestamp;
91 queue_event(oe, new);
92 }
93
94 return new;
95}
96
97void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
98{
fa4e5c67 99 list_move(&event->list, &oe->cache);
5f86b80b
JO
100 oe->nr_events--;
101}
102
103static int __ordered_events__flush(struct perf_session *s,
104 struct perf_tool *tool)
105{
106 struct ordered_events *oe = &s->ordered_events;
107 struct list_head *head = &oe->events;
108 struct ordered_event *tmp, *iter;
109 struct perf_sample sample;
110 u64 limit = oe->next_flush;
111 u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
112 bool show_progress = limit == ULLONG_MAX;
113 struct ui_progress prog;
114 int ret;
115
116 if (!tool->ordered_events || !limit)
117 return 0;
118
119 if (show_progress)
120 ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
121
122 list_for_each_entry_safe(iter, tmp, head, list) {
123 if (session_done())
124 return 0;
125
126 if (iter->timestamp > limit)
127 break;
128
129 ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
130 if (ret)
131 pr_err("Can't parse sample, err = %d\n", ret);
132 else {
133 ret = perf_session__deliver_event(s, iter->event, &sample, tool,
134 iter->file_offset);
135 if (ret)
136 return ret;
137 }
138
139 ordered_events__delete(oe, iter);
140 oe->last_flush = iter->timestamp;
141
142 if (show_progress)
143 ui_progress__update(&prog, 1);
144 }
145
146 if (list_empty(head))
147 oe->last = NULL;
148 else if (last_ts <= limit)
149 oe->last = list_entry(head->prev, struct ordered_event, list);
150
151 return 0;
152}
153
154int ordered_events__flush(struct perf_session *s, struct perf_tool *tool,
155 enum oe_flush how)
156{
157 struct ordered_events *oe = &s->ordered_events;
158 int err;
159
160 switch (how) {
161 case OE_FLUSH__FINAL:
162 oe->next_flush = ULLONG_MAX;
163 break;
164
165 case OE_FLUSH__HALF:
166 {
167 struct ordered_event *first, *last;
168 struct list_head *head = &oe->events;
169
170 first = list_entry(head->next, struct ordered_event, list);
171 last = oe->last;
172
173 /* Warn if we are called before any event got allocated. */
174 if (WARN_ONCE(!last || list_empty(head), "empty queue"))
175 return 0;
176
177 oe->next_flush = first->timestamp;
178 oe->next_flush += (last->timestamp - first->timestamp) / 2;
179 break;
180 }
181
182 case OE_FLUSH__ROUND:
183 default:
184 break;
185 };
186
187 err = __ordered_events__flush(s, tool);
188
189 if (!err) {
190 if (how == OE_FLUSH__ROUND)
191 oe->next_flush = oe->max_timestamp;
192 }
193
194 return err;
195}
36522f5c
JO
196
197void ordered_events__init(struct ordered_events *oe)
198{
199 INIT_LIST_HEAD(&oe->events);
200 INIT_LIST_HEAD(&oe->cache);
201 INIT_LIST_HEAD(&oe->to_free);
202 oe->max_alloc_size = (u64) -1;
203 oe->cur_alloc_size = 0;
204}
adc56ed1
JO
205
206void ordered_events__free(struct ordered_events *oe)
207{
208 while (!list_empty(&oe->to_free)) {
209 struct ordered_event *event;
210
211 event = list_entry(oe->to_free.next, struct ordered_event, list);
212 list_del(&event->list);
213 free(event);
214 }
215}