oprofile: remove #ifdef CONFIG_OPROFILE_IBS in non-ibs code
[linux-2.6-block.git] / drivers / oprofile / cpu_buffer.c
CommitLineData
1da177e4
LT
1/**
2 * @file cpu_buffer.c
3 *
2cc28b9f 4 * @remark Copyright 2002-2009 OProfile authors
1da177e4
LT
5 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
345c2573 8 * @author Barry Kasindorf <barry.kasindorf@amd.com>
2cc28b9f 9 * @author Robert Richter <robert.richter@amd.com>
1da177e4
LT
10 *
11 * Each CPU has a local buffer that stores PC value/event
12 * pairs. We also log context switches when we notice them.
13 * Eventually each CPU's buffer is processed into the global
14 * event buffer by sync_buffer().
15 *
16 * We use a local buffer for two reasons: an NMI or similar
17 * interrupt cannot synchronise, and high sampling rates
18 * would lead to catastrophic global synchronisation if
19 * a global buffer was used.
20 */
21
22#include <linux/sched.h>
23#include <linux/oprofile.h>
24#include <linux/vmalloc.h>
25#include <linux/errno.h>
6a18037d 26
1da177e4
LT
27#include "event_buffer.h"
28#include "cpu_buffer.h"
29#include "buffer_sync.h"
30#include "oprof.h"
31
6dad828b
RR
32#define OP_BUFFER_FLAGS 0
33
34/*
35 * Read and write access is using spin locking. Thus, writing to the
36 * buffer by NMI handler (x86) could occur also during critical
37 * sections when reading the buffer. To avoid this, there are 2
38 * buffers for independent read and write access. Read access is in
39 * process context only, write access only in the NMI handler. If the
40 * read buffer runs empty, both buffers are swapped atomically. There
41 * is potentially a small window during swapping where the buffers are
42 * disabled and samples could be lost.
43 *
44 * Using 2 buffers is a little bit overhead, but the solution is clear
45 * and does not require changes in the ring buffer implementation. It
46 * can be changed to a single buffer solution when the ring buffer
47 * access is implemented as non-locking atomic code.
48 */
9966718d
RR
49static struct ring_buffer *op_ring_buffer_read;
50static struct ring_buffer *op_ring_buffer_write;
8b8b4988 51DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
1da177e4 52
c4028958 53static void wq_sync_buffer(struct work_struct *work);
1da177e4
LT
54
55#define DEFAULT_TIMER_EXPIRE (HZ / 10)
56static int work_enabled;
57
a5598ca0
CL
58unsigned long oprofile_get_cpu_buffer_size(void)
59{
bd2172f5 60 return oprofile_cpu_buffer_size;
a5598ca0
CL
61}
62
63void oprofile_cpu_buffer_inc_smpl_lost(void)
64{
65 struct oprofile_cpu_buffer *cpu_buf
66 = &__get_cpu_var(cpu_buffer);
67
68 cpu_buf->sample_lost_overflow++;
69}
70
30015776
RR
71void free_cpu_buffers(void)
72{
73 if (op_ring_buffer_read)
74 ring_buffer_free(op_ring_buffer_read);
75 op_ring_buffer_read = NULL;
76 if (op_ring_buffer_write)
77 ring_buffer_free(op_ring_buffer_write);
78 op_ring_buffer_write = NULL;
79}
80
1da177e4
LT
81int alloc_cpu_buffers(void)
82{
83 int i;
6a18037d 84
bd2172f5 85 unsigned long buffer_size = oprofile_cpu_buffer_size;
6a18037d 86
6dad828b
RR
87 op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
88 if (!op_ring_buffer_read)
89 goto fail;
90 op_ring_buffer_write = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS);
91 if (!op_ring_buffer_write)
92 goto fail;
93
4bd9b9dc 94 for_each_possible_cpu(i) {
608dfddd 95 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
6a18037d 96
1da177e4
LT
97 b->last_task = NULL;
98 b->last_is_kernel = -1;
99 b->tracing = 0;
100 b->buffer_size = buffer_size;
1da177e4
LT
101 b->sample_received = 0;
102 b->sample_lost_overflow = 0;
df9d177a
PE
103 b->backtrace_aborted = 0;
104 b->sample_invalid_eip = 0;
1da177e4 105 b->cpu = i;
c4028958 106 INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
1da177e4
LT
107 }
108 return 0;
109
110fail:
111 free_cpu_buffers();
112 return -ENOMEM;
113}
1da177e4
LT
114
115void start_cpu_work(void)
116{
117 int i;
118
119 work_enabled = 1;
120
121 for_each_online_cpu(i) {
608dfddd 122 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
1da177e4
LT
123
124 /*
125 * Spread the work by 1 jiffy per cpu so they dont all
126 * fire at once.
127 */
128 schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
129 }
130}
131
1da177e4
LT
132void end_cpu_work(void)
133{
134 int i;
135
136 work_enabled = 0;
137
138 for_each_online_cpu(i) {
608dfddd 139 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
1da177e4
LT
140
141 cancel_delayed_work(&b->work);
142 }
143
144 flush_scheduled_work();
145}
146
2cc28b9f
RR
147/*
148 * This function prepares the cpu buffer to write a sample.
149 *
150 * Struct op_entry is used during operations on the ring buffer while
151 * struct op_sample contains the data that is stored in the ring
152 * buffer. Struct entry can be uninitialized. The function reserves a
153 * data array that is specified by size. Use
154 * op_cpu_buffer_write_commit() after preparing the sample. In case of
155 * errors a null pointer is returned, otherwise the pointer to the
156 * sample.
157 *
158 */
159struct op_sample
160*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
9966718d 161{
2cc28b9f
RR
162 entry->event = ring_buffer_lock_reserve
163 (op_ring_buffer_write, sizeof(struct op_sample) +
164 size * sizeof(entry->sample->data[0]), &entry->irq_flags);
9966718d
RR
165 if (entry->event)
166 entry->sample = ring_buffer_event_data(entry->event);
167 else
168 entry->sample = NULL;
169
170 if (!entry->sample)
2cc28b9f 171 return NULL;
9966718d 172
2cc28b9f
RR
173 entry->size = size;
174 entry->data = entry->sample->data;
175
176 return entry->sample;
9966718d
RR
177}
178
179int op_cpu_buffer_write_commit(struct op_entry *entry)
180{
181 return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
182 entry->irq_flags);
183}
184
2d87b14c 185struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
9966718d
RR
186{
187 struct ring_buffer_event *e;
188 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
189 if (e)
2d87b14c 190 goto event;
9966718d
RR
191 if (ring_buffer_swap_cpu(op_ring_buffer_read,
192 op_ring_buffer_write,
193 cpu))
194 return NULL;
195 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
196 if (e)
2d87b14c 197 goto event;
9966718d 198 return NULL;
2d87b14c
RR
199
200event:
201 entry->event = e;
202 entry->sample = ring_buffer_event_data(e);
203 entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
204 / sizeof(entry->sample->data[0]);
205 entry->data = entry->sample->data;
206 return entry->sample;
9966718d
RR
207}
208
209unsigned long op_cpu_buffer_entries(int cpu)
210{
211 return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
212 + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
213}
214
ae735e99
RR
215static int
216op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace,
217 int is_kernel, struct task_struct *task)
218{
219 struct op_entry entry;
220 struct op_sample *sample;
221 unsigned long flags;
222 int size;
223
224 flags = 0;
225
226 if (backtrace)
227 flags |= TRACE_BEGIN;
228
229 /* notice a switch from user->kernel or vice versa */
230 is_kernel = !!is_kernel;
231 if (cpu_buf->last_is_kernel != is_kernel) {
232 cpu_buf->last_is_kernel = is_kernel;
233 flags |= KERNEL_CTX_SWITCH;
234 if (is_kernel)
235 flags |= IS_KERNEL;
236 }
237
238 /* notice a task switch */
239 if (cpu_buf->last_task != task) {
240 cpu_buf->last_task = task;
241 flags |= USER_CTX_SWITCH;
242 }
243
244 if (!flags)
245 /* nothing to do */
246 return 0;
247
248 if (flags & USER_CTX_SWITCH)
249 size = 1;
250 else
251 size = 0;
252
253 sample = op_cpu_buffer_write_reserve(&entry, size);
254 if (!sample)
255 return -ENOMEM;
256
257 sample->eip = ESCAPE_CODE;
258 sample->event = flags;
259
260 if (size)
d9928c25 261 op_cpu_buffer_add_data(&entry, (unsigned long)task);
ae735e99
RR
262
263 op_cpu_buffer_write_commit(&entry);
264
265 return 0;
266}
267
211117ff 268static inline int
d0e23384
RR
269op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
270 unsigned long pc, unsigned long event)
1da177e4 271{
6dad828b 272 struct op_entry entry;
2cc28b9f 273 struct op_sample *sample;
6dad828b 274
2cc28b9f
RR
275 sample = op_cpu_buffer_write_reserve(&entry, 0);
276 if (!sample)
277 return -ENOMEM;
6dad828b 278
2cc28b9f
RR
279 sample->eip = pc;
280 sample->event = event;
6dad828b 281
3967e93e 282 return op_cpu_buffer_write_commit(&entry);
1da177e4
LT
283}
284
ae735e99
RR
285/*
286 * This must be safe from any context.
1da177e4
LT
287 *
288 * is_kernel is needed because on some architectures you cannot
289 * tell if you are in kernel or user space simply by looking at
290 * pc. We tag this in the buffer by generating kernel enter/exit
291 * events whenever is_kernel changes
292 */
ae735e99
RR
293static int
294log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
295 unsigned long backtrace, int is_kernel, unsigned long event)
1da177e4 296{
1da177e4
LT
297 cpu_buf->sample_received++;
298
df9d177a
PE
299 if (pc == ESCAPE_CODE) {
300 cpu_buf->sample_invalid_eip++;
301 return 0;
302 }
303
ae735e99
RR
304 if (op_add_code(cpu_buf, backtrace, is_kernel, current))
305 goto fail;
6a18037d 306
d0e23384 307 if (op_add_sample(cpu_buf, pc, event))
211117ff
RR
308 goto fail;
309
1da177e4 310 return 1;
211117ff
RR
311
312fail:
313 cpu_buf->sample_lost_overflow++;
314 return 0;
1da177e4
LT
315}
316
6352d92d 317static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
1da177e4 318{
1da177e4 319 cpu_buf->tracing = 1;
1da177e4
LT
320}
321
6352d92d 322static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
1da177e4
LT
323{
324 cpu_buf->tracing = 0;
325}
326
d45d23be
RR
327static inline void
328__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
329 unsigned long event, int is_kernel)
1da177e4 330{
608dfddd 331 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
ae735e99 332 unsigned long backtrace = oprofile_backtrace_depth;
1da177e4 333
fd13f6c8
RR
334 /*
335 * if log_sample() fail we can't backtrace since we lost the
336 * source of this event
337 */
ae735e99
RR
338 if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event))
339 /* failed */
340 return;
341
342 if (!backtrace)
343 return;
6352d92d 344
ae735e99
RR
345 oprofile_begin_trace(cpu_buf);
346 oprofile_ops.backtrace(regs, backtrace);
1da177e4
LT
347 oprofile_end_trace(cpu_buf);
348}
349
d45d23be
RR
350void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
351 unsigned long event, int is_kernel)
352{
353 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
354}
355
27357716
BR
356void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
357{
358 int is_kernel = !user_mode(regs);
359 unsigned long pc = profile_pc(regs);
360
d45d23be 361 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
27357716
BR
362}
363
1acda878
RR
364/*
365 * Add samples with data to the ring buffer.
366 *
367 * Use op_cpu_buffer_add_data(&entry, val) to add data and
368 * op_cpu_buffer_write_commit(&entry) to commit the sample.
369 */
370void oprofile_add_data(struct op_entry *entry, struct pt_regs * const regs,
371 unsigned long pc, int code, int size)
345c2573 372{
1acda878 373 struct op_sample *sample;
e2fee276
RR
374 int is_kernel = !user_mode(regs);
375 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
345c2573
BK
376
377 cpu_buf->sample_received++;
378
1acda878
RR
379 /* no backtraces for samples with data */
380 if (op_add_code(cpu_buf, 0, is_kernel, current))
381 goto fail;
382
383 sample = op_cpu_buffer_write_reserve(entry, size + 2);
384 if (!sample)
385 goto fail;
386 sample->eip = ESCAPE_CODE;
387 sample->event = 0; /* no flags */
345c2573 388
1acda878
RR
389 op_cpu_buffer_add_data(entry, code);
390 op_cpu_buffer_add_data(entry, pc);
345c2573 391
1acda878 392 return;
345c2573 393
1acda878
RR
394fail:
395 cpu_buf->sample_lost_overflow++;
345c2573
BK
396}
397
1da177e4
LT
398void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
399{
608dfddd 400 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
ae735e99 401 log_sample(cpu_buf, pc, 0, is_kernel, event);
1da177e4
LT
402}
403
1da177e4
LT
404void oprofile_add_trace(unsigned long pc)
405{
608dfddd 406 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
1da177e4
LT
407
408 if (!cpu_buf->tracing)
409 return;
410
fd13f6c8
RR
411 /*
412 * broken frame can give an eip with the same value as an
413 * escape code, abort the trace if we get it
414 */
211117ff
RR
415 if (pc == ESCAPE_CODE)
416 goto fail;
417
d0e23384 418 if (op_add_sample(cpu_buf, pc, 0))
211117ff 419 goto fail;
1da177e4 420
211117ff
RR
421 return;
422fail:
423 cpu_buf->tracing = 0;
424 cpu_buf->backtrace_aborted++;
425 return;
1da177e4
LT
426}
427
1da177e4
LT
428/*
429 * This serves to avoid cpu buffer overflow, and makes sure
430 * the task mortuary progresses
431 *
432 * By using schedule_delayed_work_on and then schedule_delayed_work
433 * we guarantee this will stay on the correct cpu
434 */
c4028958 435static void wq_sync_buffer(struct work_struct *work)
1da177e4 436{
25ad2913 437 struct oprofile_cpu_buffer *b =
c4028958 438 container_of(work, struct oprofile_cpu_buffer, work.work);
1da177e4 439 if (b->cpu != smp_processor_id()) {
bd17b625 440 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
1da177e4 441 smp_processor_id(), b->cpu);
4bd9b9dc
CA
442
443 if (!cpu_online(b->cpu)) {
444 cancel_delayed_work(&b->work);
445 return;
446 }
1da177e4
LT
447 }
448 sync_buffer(b->cpu);
449
450 /* don't re-add the work if we're shutting down */
451 if (work_enabled)
452 schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
453}