Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ryusuke...
[linux-2.6-block.git] / drivers / oprofile / cpu_buffer.c
CommitLineData
1da177e4
LT
1/**
2 * @file cpu_buffer.c
3 *
2cc28b9f 4 * @remark Copyright 2002-2009 OProfile authors
1da177e4
LT
5 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
345c2573 8 * @author Barry Kasindorf <barry.kasindorf@amd.com>
2cc28b9f 9 * @author Robert Richter <robert.richter@amd.com>
1da177e4
LT
10 *
11 * Each CPU has a local buffer that stores PC value/event
12 * pairs. We also log context switches when we notice them.
13 * Eventually each CPU's buffer is processed into the global
14 * event buffer by sync_buffer().
15 *
16 * We use a local buffer for two reasons: an NMI or similar
17 * interrupt cannot synchronise, and high sampling rates
18 * would lead to catastrophic global synchronisation if
19 * a global buffer was used.
20 */
21
22#include <linux/sched.h>
23#include <linux/oprofile.h>
24#include <linux/vmalloc.h>
25#include <linux/errno.h>
6a18037d 26
1da177e4
LT
27#include "event_buffer.h"
28#include "cpu_buffer.h"
29#include "buffer_sync.h"
30#include "oprof.h"
31
6dad828b
RR
32#define OP_BUFFER_FLAGS 0
33
34/*
35 * Read and write access is using spin locking. Thus, writing to the
36 * buffer by NMI handler (x86) could occur also during critical
37 * sections when reading the buffer. To avoid this, there are 2
38 * buffers for independent read and write access. Read access is in
39 * process context only, write access only in the NMI handler. If the
40 * read buffer runs empty, both buffers are swapped atomically. There
41 * is potentially a small window during swapping where the buffers are
42 * disabled and samples could be lost.
43 *
44 * Using 2 buffers is a little bit overhead, but the solution is clear
45 * and does not require changes in the ring buffer implementation. It
46 * can be changed to a single buffer solution when the ring buffer
47 * access is implemented as non-locking atomic code.
48 */
9966718d
RR
49static struct ring_buffer *op_ring_buffer_read;
50static struct ring_buffer *op_ring_buffer_write;
8b8b4988 51DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
1da177e4 52
c4028958 53static void wq_sync_buffer(struct work_struct *work);
1da177e4
LT
54
55#define DEFAULT_TIMER_EXPIRE (HZ / 10)
56static int work_enabled;
57
a5598ca0
CL
58unsigned long oprofile_get_cpu_buffer_size(void)
59{
bd2172f5 60 return oprofile_cpu_buffer_size;
a5598ca0
CL
61}
62
63void oprofile_cpu_buffer_inc_smpl_lost(void)
64{
65 struct oprofile_cpu_buffer *cpu_buf
66 = &__get_cpu_var(cpu_buffer);
67
68 cpu_buf->sample_lost_overflow++;
69}
70
30015776
RR
71void free_cpu_buffers(void)
72{
73 if (op_ring_buffer_read)
74 ring_buffer_free(op_ring_buffer_read);
75 op_ring_buffer_read = NULL;
76 if (op_ring_buffer_write)
77 ring_buffer_free(op_ring_buffer_write);
78 op_ring_buffer_write = NULL;
79}
80
54f2c841
RR
81#define RB_EVENT_HDR_SIZE 4
82
1da177e4
LT
83int alloc_cpu_buffers(void)
84{
85 int i;
6a18037d 86
bd2172f5 87 unsigned long buffer_size = oprofile_cpu_buffer_size;
54f2c841
RR
88 unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
89 RB_EVENT_HDR_SIZE);
6a18037d 90
54f2c841 91 op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
6dad828b
RR
92 if (!op_ring_buffer_read)
93 goto fail;
54f2c841 94 op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
6dad828b
RR
95 if (!op_ring_buffer_write)
96 goto fail;
97
4bd9b9dc 98 for_each_possible_cpu(i) {
608dfddd 99 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
6a18037d 100
1da177e4
LT
101 b->last_task = NULL;
102 b->last_is_kernel = -1;
103 b->tracing = 0;
104 b->buffer_size = buffer_size;
1da177e4
LT
105 b->sample_received = 0;
106 b->sample_lost_overflow = 0;
df9d177a
PE
107 b->backtrace_aborted = 0;
108 b->sample_invalid_eip = 0;
1da177e4 109 b->cpu = i;
c4028958 110 INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
1da177e4
LT
111 }
112 return 0;
113
114fail:
115 free_cpu_buffers();
116 return -ENOMEM;
117}
1da177e4
LT
118
119void start_cpu_work(void)
120{
121 int i;
122
123 work_enabled = 1;
124
125 for_each_online_cpu(i) {
608dfddd 126 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
1da177e4
LT
127
128 /*
129 * Spread the work by 1 jiffy per cpu so they dont all
130 * fire at once.
131 */
132 schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
133 }
134}
135
1da177e4
LT
136void end_cpu_work(void)
137{
138 int i;
139
140 work_enabled = 0;
141
142 for_each_online_cpu(i) {
608dfddd 143 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
1da177e4
LT
144
145 cancel_delayed_work(&b->work);
146 }
147
148 flush_scheduled_work();
149}
150
2cc28b9f
RR
151/*
152 * This function prepares the cpu buffer to write a sample.
153 *
154 * Struct op_entry is used during operations on the ring buffer while
155 * struct op_sample contains the data that is stored in the ring
156 * buffer. Struct entry can be uninitialized. The function reserves a
157 * data array that is specified by size. Use
158 * op_cpu_buffer_write_commit() after preparing the sample. In case of
159 * errors a null pointer is returned, otherwise the pointer to the
160 * sample.
161 *
162 */
163struct op_sample
164*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
9966718d 165{
2cc28b9f
RR
166 entry->event = ring_buffer_lock_reserve
167 (op_ring_buffer_write, sizeof(struct op_sample) +
304cc6ae 168 size * sizeof(entry->sample->data[0]));
9966718d
RR
169 if (entry->event)
170 entry->sample = ring_buffer_event_data(entry->event);
171 else
172 entry->sample = NULL;
173
174 if (!entry->sample)
2cc28b9f 175 return NULL;
9966718d 176
2cc28b9f
RR
177 entry->size = size;
178 entry->data = entry->sample->data;
179
180 return entry->sample;
9966718d
RR
181}
182
183int op_cpu_buffer_write_commit(struct op_entry *entry)
184{
304cc6ae 185 return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event);
9966718d
RR
186}
187
2d87b14c 188struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
9966718d
RR
189{
190 struct ring_buffer_event *e;
191 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
192 if (e)
2d87b14c 193 goto event;
9966718d
RR
194 if (ring_buffer_swap_cpu(op_ring_buffer_read,
195 op_ring_buffer_write,
196 cpu))
197 return NULL;
198 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
199 if (e)
2d87b14c 200 goto event;
9966718d 201 return NULL;
2d87b14c
RR
202
203event:
204 entry->event = e;
205 entry->sample = ring_buffer_event_data(e);
206 entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
207 / sizeof(entry->sample->data[0]);
208 entry->data = entry->sample->data;
209 return entry->sample;
9966718d
RR
210}
211
212unsigned long op_cpu_buffer_entries(int cpu)
213{
214 return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
215 + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
216}
217
ae735e99
RR
218static int
219op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace,
220 int is_kernel, struct task_struct *task)
221{
222 struct op_entry entry;
223 struct op_sample *sample;
224 unsigned long flags;
225 int size;
226
227 flags = 0;
228
229 if (backtrace)
230 flags |= TRACE_BEGIN;
231
232 /* notice a switch from user->kernel or vice versa */
233 is_kernel = !!is_kernel;
234 if (cpu_buf->last_is_kernel != is_kernel) {
235 cpu_buf->last_is_kernel = is_kernel;
236 flags |= KERNEL_CTX_SWITCH;
237 if (is_kernel)
238 flags |= IS_KERNEL;
239 }
240
241 /* notice a task switch */
242 if (cpu_buf->last_task != task) {
243 cpu_buf->last_task = task;
244 flags |= USER_CTX_SWITCH;
245 }
246
247 if (!flags)
248 /* nothing to do */
249 return 0;
250
251 if (flags & USER_CTX_SWITCH)
252 size = 1;
253 else
254 size = 0;
255
256 sample = op_cpu_buffer_write_reserve(&entry, size);
257 if (!sample)
258 return -ENOMEM;
259
260 sample->eip = ESCAPE_CODE;
261 sample->event = flags;
262
263 if (size)
d9928c25 264 op_cpu_buffer_add_data(&entry, (unsigned long)task);
ae735e99
RR
265
266 op_cpu_buffer_write_commit(&entry);
267
268 return 0;
269}
270
211117ff 271static inline int
d0e23384
RR
272op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
273 unsigned long pc, unsigned long event)
1da177e4 274{
6dad828b 275 struct op_entry entry;
2cc28b9f 276 struct op_sample *sample;
6dad828b 277
2cc28b9f
RR
278 sample = op_cpu_buffer_write_reserve(&entry, 0);
279 if (!sample)
280 return -ENOMEM;
6dad828b 281
2cc28b9f
RR
282 sample->eip = pc;
283 sample->event = event;
6dad828b 284
3967e93e 285 return op_cpu_buffer_write_commit(&entry);
1da177e4
LT
286}
287
ae735e99
RR
288/*
289 * This must be safe from any context.
1da177e4
LT
290 *
291 * is_kernel is needed because on some architectures you cannot
292 * tell if you are in kernel or user space simply by looking at
293 * pc. We tag this in the buffer by generating kernel enter/exit
294 * events whenever is_kernel changes
295 */
ae735e99
RR
296static int
297log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
298 unsigned long backtrace, int is_kernel, unsigned long event)
1da177e4 299{
1da177e4
LT
300 cpu_buf->sample_received++;
301
df9d177a
PE
302 if (pc == ESCAPE_CODE) {
303 cpu_buf->sample_invalid_eip++;
304 return 0;
305 }
306
ae735e99
RR
307 if (op_add_code(cpu_buf, backtrace, is_kernel, current))
308 goto fail;
6a18037d 309
d0e23384 310 if (op_add_sample(cpu_buf, pc, event))
211117ff
RR
311 goto fail;
312
1da177e4 313 return 1;
211117ff
RR
314
315fail:
316 cpu_buf->sample_lost_overflow++;
317 return 0;
1da177e4
LT
318}
319
6352d92d 320static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
1da177e4 321{
1da177e4 322 cpu_buf->tracing = 1;
1da177e4
LT
323}
324
6352d92d 325static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
1da177e4
LT
326{
327 cpu_buf->tracing = 0;
328}
329
d45d23be
RR
330static inline void
331__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
332 unsigned long event, int is_kernel)
1da177e4 333{
608dfddd 334 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
ae735e99 335 unsigned long backtrace = oprofile_backtrace_depth;
1da177e4 336
fd13f6c8
RR
337 /*
338 * if log_sample() fail we can't backtrace since we lost the
339 * source of this event
340 */
ae735e99
RR
341 if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event))
342 /* failed */
343 return;
344
345 if (!backtrace)
346 return;
6352d92d 347
ae735e99
RR
348 oprofile_begin_trace(cpu_buf);
349 oprofile_ops.backtrace(regs, backtrace);
1da177e4
LT
350 oprofile_end_trace(cpu_buf);
351}
352
d45d23be
RR
353void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
354 unsigned long event, int is_kernel)
355{
356 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
357}
358
27357716
BR
359void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
360{
361 int is_kernel = !user_mode(regs);
362 unsigned long pc = profile_pc(regs);
363
d45d23be 364 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
27357716
BR
365}
366
1acda878
RR
367/*
368 * Add samples with data to the ring buffer.
369 *
14f0ca8e
RR
370 * Use oprofile_add_data(&entry, val) to add data and
371 * oprofile_write_commit(&entry) to commit the sample.
1acda878 372 */
14f0ca8e
RR
373void
374oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
1acda878 375 unsigned long pc, int code, int size)
345c2573 376{
1acda878 377 struct op_sample *sample;
e2fee276
RR
378 int is_kernel = !user_mode(regs);
379 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
345c2573
BK
380
381 cpu_buf->sample_received++;
382
1acda878
RR
383 /* no backtraces for samples with data */
384 if (op_add_code(cpu_buf, 0, is_kernel, current))
385 goto fail;
386
387 sample = op_cpu_buffer_write_reserve(entry, size + 2);
388 if (!sample)
389 goto fail;
390 sample->eip = ESCAPE_CODE;
391 sample->event = 0; /* no flags */
345c2573 392
1acda878
RR
393 op_cpu_buffer_add_data(entry, code);
394 op_cpu_buffer_add_data(entry, pc);
345c2573 395
1acda878 396 return;
345c2573 397
1acda878 398fail:
fdb6a8f4 399 entry->event = NULL;
1acda878 400 cpu_buf->sample_lost_overflow++;
345c2573
BK
401}
402
14f0ca8e
RR
403int oprofile_add_data(struct op_entry *entry, unsigned long val)
404{
fdb6a8f4
RR
405 if (!entry->event)
406 return 0;
14f0ca8e
RR
407 return op_cpu_buffer_add_data(entry, val);
408}
409
410int oprofile_write_commit(struct op_entry *entry)
411{
fdb6a8f4
RR
412 if (!entry->event)
413 return -EINVAL;
14f0ca8e
RR
414 return op_cpu_buffer_write_commit(entry);
415}
416
1da177e4
LT
417void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
418{
608dfddd 419 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
ae735e99 420 log_sample(cpu_buf, pc, 0, is_kernel, event);
1da177e4
LT
421}
422
1da177e4
LT
423void oprofile_add_trace(unsigned long pc)
424{
608dfddd 425 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
1da177e4
LT
426
427 if (!cpu_buf->tracing)
428 return;
429
fd13f6c8
RR
430 /*
431 * broken frame can give an eip with the same value as an
432 * escape code, abort the trace if we get it
433 */
211117ff
RR
434 if (pc == ESCAPE_CODE)
435 goto fail;
436
d0e23384 437 if (op_add_sample(cpu_buf, pc, 0))
211117ff 438 goto fail;
1da177e4 439
211117ff
RR
440 return;
441fail:
442 cpu_buf->tracing = 0;
443 cpu_buf->backtrace_aborted++;
444 return;
1da177e4
LT
445}
446
1da177e4
LT
447/*
448 * This serves to avoid cpu buffer overflow, and makes sure
449 * the task mortuary progresses
450 *
451 * By using schedule_delayed_work_on and then schedule_delayed_work
452 * we guarantee this will stay on the correct cpu
453 */
c4028958 454static void wq_sync_buffer(struct work_struct *work)
1da177e4 455{
25ad2913 456 struct oprofile_cpu_buffer *b =
c4028958 457 container_of(work, struct oprofile_cpu_buffer, work.work);
1da177e4 458 if (b->cpu != smp_processor_id()) {
bd17b625 459 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
1da177e4 460 smp_processor_id(), b->cpu);
4bd9b9dc
CA
461
462 if (!cpu_online(b->cpu)) {
463 cancel_delayed_work(&b->work);
464 return;
465 }
1da177e4
LT
466 }
467 sync_buffer(b->cpu);
468
469 /* don't re-add the work if we're shutting down */
470 if (work_enabled)
471 schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
472}