Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /** |
2 | * @file cpu_buffer.c | |
3 | * | |
2cc28b9f | 4 | * @remark Copyright 2002-2009 OProfile authors |
1da177e4 LT |
5 | * @remark Read the file COPYING |
6 | * | |
7 | * @author John Levon <levon@movementarian.org> | |
345c2573 | 8 | * @author Barry Kasindorf <barry.kasindorf@amd.com> |
2cc28b9f | 9 | * @author Robert Richter <robert.richter@amd.com> |
1da177e4 LT |
10 | * |
11 | * Each CPU has a local buffer that stores PC value/event | |
12 | * pairs. We also log context switches when we notice them. | |
13 | * Eventually each CPU's buffer is processed into the global | |
14 | * event buffer by sync_buffer(). | |
15 | * | |
16 | * We use a local buffer for two reasons: an NMI or similar | |
17 | * interrupt cannot synchronise, and high sampling rates | |
18 | * would lead to catastrophic global synchronisation if | |
19 | * a global buffer was used. | |
20 | */ | |
21 | ||
22 | #include <linux/sched.h> | |
23 | #include <linux/oprofile.h> | |
1da177e4 | 24 | #include <linux/errno.h> |
6a18037d | 25 | |
4cf421e5 IM |
26 | #include <asm/ptrace.h> |
27 | ||
1da177e4 LT |
28 | #include "event_buffer.h" |
29 | #include "cpu_buffer.h" | |
30 | #include "buffer_sync.h" | |
31 | #include "oprof.h" | |
32 | ||
6dad828b RR |
33 | #define OP_BUFFER_FLAGS 0 |
34 | ||
cb6e943c | 35 | static struct ring_buffer *op_ring_buffer; |
b3e9f672 | 36 | DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); |
1da177e4 | 37 | |
c4028958 | 38 | static void wq_sync_buffer(struct work_struct *work); |
1da177e4 LT |
39 | |
40 | #define DEFAULT_TIMER_EXPIRE (HZ / 10) | |
41 | static int work_enabled; | |
42 | ||
a5598ca0 CL |
43 | unsigned long oprofile_get_cpu_buffer_size(void) |
44 | { | |
bd2172f5 | 45 | return oprofile_cpu_buffer_size; |
a5598ca0 CL |
46 | } |
47 | ||
48 | void oprofile_cpu_buffer_inc_smpl_lost(void) | |
49 | { | |
879d9274 | 50 | struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); |
a5598ca0 CL |
51 | |
52 | cpu_buf->sample_lost_overflow++; | |
53 | } | |
54 | ||
30015776 RR |
55 | void free_cpu_buffers(void) |
56 | { | |
cb6e943c AK |
57 | if (op_ring_buffer) |
58 | ring_buffer_free(op_ring_buffer); | |
59 | op_ring_buffer = NULL; | |
30015776 RR |
60 | } |
61 | ||
54f2c841 RR |
62 | #define RB_EVENT_HDR_SIZE 4 |
63 | ||
1da177e4 LT |
64 | int alloc_cpu_buffers(void) |
65 | { | |
66 | int i; | |
6a18037d | 67 | |
bd2172f5 | 68 | unsigned long buffer_size = oprofile_cpu_buffer_size; |
54f2c841 RR |
69 | unsigned long byte_size = buffer_size * (sizeof(struct op_sample) + |
70 | RB_EVENT_HDR_SIZE); | |
6a18037d | 71 | |
cb6e943c AK |
72 | op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); |
73 | if (!op_ring_buffer) | |
6dad828b RR |
74 | goto fail; |
75 | ||
4bd9b9dc | 76 | for_each_possible_cpu(i) { |
b3e9f672 | 77 | struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); |
6a18037d | 78 | |
1da177e4 LT |
79 | b->last_task = NULL; |
80 | b->last_is_kernel = -1; | |
81 | b->tracing = 0; | |
82 | b->buffer_size = buffer_size; | |
1da177e4 LT |
83 | b->sample_received = 0; |
84 | b->sample_lost_overflow = 0; | |
df9d177a PE |
85 | b->backtrace_aborted = 0; |
86 | b->sample_invalid_eip = 0; | |
1da177e4 | 87 | b->cpu = i; |
c4028958 | 88 | INIT_DELAYED_WORK(&b->work, wq_sync_buffer); |
1da177e4 LT |
89 | } |
90 | return 0; | |
91 | ||
92 | fail: | |
93 | free_cpu_buffers(); | |
94 | return -ENOMEM; | |
95 | } | |
1da177e4 LT |
96 | |
97 | void start_cpu_work(void) | |
98 | { | |
99 | int i; | |
100 | ||
101 | work_enabled = 1; | |
102 | ||
103 | for_each_online_cpu(i) { | |
b3e9f672 | 104 | struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); |
1da177e4 LT |
105 | |
106 | /* | |
107 | * Spread the work by 1 jiffy per cpu so they dont all | |
108 | * fire at once. | |
109 | */ | |
110 | schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i); | |
111 | } | |
112 | } | |
113 | ||
1da177e4 LT |
114 | void end_cpu_work(void) |
115 | { | |
1da177e4 | 116 | work_enabled = 0; |
3d7851b3 TH |
117 | } |
118 | ||
119 | void flush_cpu_work(void) | |
120 | { | |
121 | int i; | |
1da177e4 LT |
122 | |
123 | for_each_online_cpu(i) { | |
b3e9f672 | 124 | struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); |
1da177e4 | 125 | |
3d7851b3 TH |
126 | /* these works are per-cpu, no need for flush_sync */ |
127 | flush_delayed_work(&b->work); | |
1da177e4 | 128 | } |
1da177e4 LT |
129 | } |
130 | ||
2cc28b9f RR |
131 | /* |
132 | * This function prepares the cpu buffer to write a sample. | |
133 | * | |
134 | * Struct op_entry is used during operations on the ring buffer while | |
135 | * struct op_sample contains the data that is stored in the ring | |
136 | * buffer. Struct entry can be uninitialized. The function reserves a | |
137 | * data array that is specified by size. Use | |
138 | * op_cpu_buffer_write_commit() after preparing the sample. In case of | |
139 | * errors a null pointer is returned, otherwise the pointer to the | |
140 | * sample. | |
141 | * | |
142 | */ | |
143 | struct op_sample | |
144 | *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) | |
9966718d | 145 | { |
2cc28b9f | 146 | entry->event = ring_buffer_lock_reserve |
cb6e943c | 147 | (op_ring_buffer, sizeof(struct op_sample) + |
304cc6ae | 148 | size * sizeof(entry->sample->data[0])); |
cb6e943c | 149 | if (!entry->event) |
2cc28b9f | 150 | return NULL; |
cb6e943c | 151 | entry->sample = ring_buffer_event_data(entry->event); |
2cc28b9f RR |
152 | entry->size = size; |
153 | entry->data = entry->sample->data; | |
154 | ||
155 | return entry->sample; | |
9966718d RR |
156 | } |
157 | ||
158 | int op_cpu_buffer_write_commit(struct op_entry *entry) | |
159 | { | |
cb6e943c | 160 | return ring_buffer_unlock_commit(op_ring_buffer, entry->event); |
9966718d RR |
161 | } |
162 | ||
2d87b14c | 163 | struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) |
9966718d RR |
164 | { |
165 | struct ring_buffer_event *e; | |
b971f061 | 166 | e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL); |
cb6e943c | 167 | if (!e) |
9966718d | 168 | return NULL; |
2d87b14c | 169 | |
2d87b14c RR |
170 | entry->event = e; |
171 | entry->sample = ring_buffer_event_data(e); | |
172 | entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) | |
173 | / sizeof(entry->sample->data[0]); | |
174 | entry->data = entry->sample->data; | |
175 | return entry->sample; | |
9966718d RR |
176 | } |
177 | ||
178 | unsigned long op_cpu_buffer_entries(int cpu) | |
179 | { | |
cb6e943c | 180 | return ring_buffer_entries_cpu(op_ring_buffer, cpu); |
9966718d RR |
181 | } |
182 | ||
ae735e99 RR |
183 | static int |
184 | op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace, | |
185 | int is_kernel, struct task_struct *task) | |
186 | { | |
187 | struct op_entry entry; | |
188 | struct op_sample *sample; | |
189 | unsigned long flags; | |
190 | int size; | |
191 | ||
192 | flags = 0; | |
193 | ||
194 | if (backtrace) | |
195 | flags |= TRACE_BEGIN; | |
196 | ||
197 | /* notice a switch from user->kernel or vice versa */ | |
198 | is_kernel = !!is_kernel; | |
199 | if (cpu_buf->last_is_kernel != is_kernel) { | |
200 | cpu_buf->last_is_kernel = is_kernel; | |
201 | flags |= KERNEL_CTX_SWITCH; | |
202 | if (is_kernel) | |
203 | flags |= IS_KERNEL; | |
204 | } | |
205 | ||
206 | /* notice a task switch */ | |
207 | if (cpu_buf->last_task != task) { | |
208 | cpu_buf->last_task = task; | |
209 | flags |= USER_CTX_SWITCH; | |
210 | } | |
211 | ||
212 | if (!flags) | |
213 | /* nothing to do */ | |
214 | return 0; | |
215 | ||
216 | if (flags & USER_CTX_SWITCH) | |
217 | size = 1; | |
218 | else | |
219 | size = 0; | |
220 | ||
221 | sample = op_cpu_buffer_write_reserve(&entry, size); | |
222 | if (!sample) | |
223 | return -ENOMEM; | |
224 | ||
225 | sample->eip = ESCAPE_CODE; | |
226 | sample->event = flags; | |
227 | ||
228 | if (size) | |
d9928c25 | 229 | op_cpu_buffer_add_data(&entry, (unsigned long)task); |
ae735e99 RR |
230 | |
231 | op_cpu_buffer_write_commit(&entry); | |
232 | ||
233 | return 0; | |
234 | } | |
235 | ||
211117ff | 236 | static inline int |
d0e23384 RR |
237 | op_add_sample(struct oprofile_cpu_buffer *cpu_buf, |
238 | unsigned long pc, unsigned long event) | |
1da177e4 | 239 | { |
6dad828b | 240 | struct op_entry entry; |
2cc28b9f | 241 | struct op_sample *sample; |
6dad828b | 242 | |
2cc28b9f RR |
243 | sample = op_cpu_buffer_write_reserve(&entry, 0); |
244 | if (!sample) | |
245 | return -ENOMEM; | |
6dad828b | 246 | |
2cc28b9f RR |
247 | sample->eip = pc; |
248 | sample->event = event; | |
6dad828b | 249 | |
3967e93e | 250 | return op_cpu_buffer_write_commit(&entry); |
1da177e4 LT |
251 | } |
252 | ||
ae735e99 RR |
253 | /* |
254 | * This must be safe from any context. | |
1da177e4 LT |
255 | * |
256 | * is_kernel is needed because on some architectures you cannot | |
257 | * tell if you are in kernel or user space simply by looking at | |
258 | * pc. We tag this in the buffer by generating kernel enter/exit | |
259 | * events whenever is_kernel changes | |
260 | */ | |
ae735e99 RR |
261 | static int |
262 | log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, | |
54ebbe7b HG |
263 | unsigned long backtrace, int is_kernel, unsigned long event, |
264 | struct task_struct *task) | |
1da177e4 | 265 | { |
54ebbe7b | 266 | struct task_struct *tsk = task ? task : current; |
1da177e4 LT |
267 | cpu_buf->sample_received++; |
268 | ||
df9d177a PE |
269 | if (pc == ESCAPE_CODE) { |
270 | cpu_buf->sample_invalid_eip++; | |
271 | return 0; | |
272 | } | |
273 | ||
54ebbe7b | 274 | if (op_add_code(cpu_buf, backtrace, is_kernel, tsk)) |
ae735e99 | 275 | goto fail; |
6a18037d | 276 | |
d0e23384 | 277 | if (op_add_sample(cpu_buf, pc, event)) |
211117ff RR |
278 | goto fail; |
279 | ||
1da177e4 | 280 | return 1; |
211117ff RR |
281 | |
282 | fail: | |
283 | cpu_buf->sample_lost_overflow++; | |
284 | return 0; | |
1da177e4 LT |
285 | } |
286 | ||
6352d92d | 287 | static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) |
1da177e4 | 288 | { |
1da177e4 | 289 | cpu_buf->tracing = 1; |
1da177e4 LT |
290 | } |
291 | ||
6352d92d | 292 | static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) |
1da177e4 LT |
293 | { |
294 | cpu_buf->tracing = 0; | |
295 | } | |
296 | ||
d45d23be RR |
297 | static inline void |
298 | __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, | |
54ebbe7b HG |
299 | unsigned long event, int is_kernel, |
300 | struct task_struct *task) | |
1da177e4 | 301 | { |
879d9274 | 302 | struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); |
ae735e99 | 303 | unsigned long backtrace = oprofile_backtrace_depth; |
1da177e4 | 304 | |
fd13f6c8 RR |
305 | /* |
306 | * if log_sample() fail we can't backtrace since we lost the | |
307 | * source of this event | |
308 | */ | |
54ebbe7b | 309 | if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, task)) |
ae735e99 RR |
310 | /* failed */ |
311 | return; | |
312 | ||
313 | if (!backtrace) | |
314 | return; | |
6352d92d | 315 | |
ae735e99 RR |
316 | oprofile_begin_trace(cpu_buf); |
317 | oprofile_ops.backtrace(regs, backtrace); | |
1da177e4 LT |
318 | oprofile_end_trace(cpu_buf); |
319 | } | |
320 | ||
54ebbe7b HG |
321 | void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs, |
322 | unsigned long event, int is_kernel, | |
323 | struct task_struct *task) | |
324 | { | |
325 | __oprofile_add_ext_sample(pc, regs, event, is_kernel, task); | |
326 | } | |
327 | ||
d45d23be RR |
328 | void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, |
329 | unsigned long event, int is_kernel) | |
330 | { | |
54ebbe7b | 331 | __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL); |
d45d23be RR |
332 | } |
333 | ||
27357716 BR |
334 | void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) |
335 | { | |
9414e996 PC |
336 | int is_kernel; |
337 | unsigned long pc; | |
338 | ||
339 | if (likely(regs)) { | |
340 | is_kernel = !user_mode(regs); | |
341 | pc = profile_pc(regs); | |
342 | } else { | |
343 | is_kernel = 0; /* This value will not be used */ | |
344 | pc = ESCAPE_CODE; /* as this causes an early return. */ | |
345 | } | |
27357716 | 346 | |
54ebbe7b | 347 | __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL); |
27357716 BR |
348 | } |
349 | ||
1acda878 RR |
350 | /* |
351 | * Add samples with data to the ring buffer. | |
352 | * | |
14f0ca8e RR |
353 | * Use oprofile_add_data(&entry, val) to add data and |
354 | * oprofile_write_commit(&entry) to commit the sample. | |
1acda878 | 355 | */ |
14f0ca8e RR |
356 | void |
357 | oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, | |
1acda878 | 358 | unsigned long pc, int code, int size) |
345c2573 | 359 | { |
1acda878 | 360 | struct op_sample *sample; |
e2fee276 | 361 | int is_kernel = !user_mode(regs); |
879d9274 | 362 | struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); |
345c2573 BK |
363 | |
364 | cpu_buf->sample_received++; | |
365 | ||
1acda878 RR |
366 | /* no backtraces for samples with data */ |
367 | if (op_add_code(cpu_buf, 0, is_kernel, current)) | |
368 | goto fail; | |
369 | ||
370 | sample = op_cpu_buffer_write_reserve(entry, size + 2); | |
371 | if (!sample) | |
372 | goto fail; | |
373 | sample->eip = ESCAPE_CODE; | |
374 | sample->event = 0; /* no flags */ | |
345c2573 | 375 | |
1acda878 RR |
376 | op_cpu_buffer_add_data(entry, code); |
377 | op_cpu_buffer_add_data(entry, pc); | |
345c2573 | 378 | |
1acda878 | 379 | return; |
345c2573 | 380 | |
1acda878 | 381 | fail: |
fdb6a8f4 | 382 | entry->event = NULL; |
1acda878 | 383 | cpu_buf->sample_lost_overflow++; |
345c2573 BK |
384 | } |
385 | ||
14f0ca8e RR |
386 | int oprofile_add_data(struct op_entry *entry, unsigned long val) |
387 | { | |
fdb6a8f4 RR |
388 | if (!entry->event) |
389 | return 0; | |
14f0ca8e RR |
390 | return op_cpu_buffer_add_data(entry, val); |
391 | } | |
392 | ||
51563a0e RR |
393 | int oprofile_add_data64(struct op_entry *entry, u64 val) |
394 | { | |
395 | if (!entry->event) | |
396 | return 0; | |
397 | if (op_cpu_buffer_get_size(entry) < 2) | |
398 | /* | |
399 | * the function returns 0 to indicate a too small | |
400 | * buffer, even if there is some space left | |
401 | */ | |
402 | return 0; | |
403 | if (!op_cpu_buffer_add_data(entry, (u32)val)) | |
404 | return 0; | |
405 | return op_cpu_buffer_add_data(entry, (u32)(val >> 32)); | |
406 | } | |
407 | ||
14f0ca8e RR |
408 | int oprofile_write_commit(struct op_entry *entry) |
409 | { | |
fdb6a8f4 RR |
410 | if (!entry->event) |
411 | return -EINVAL; | |
14f0ca8e RR |
412 | return op_cpu_buffer_write_commit(entry); |
413 | } | |
414 | ||
1da177e4 LT |
415 | void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) |
416 | { | |
879d9274 | 417 | struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); |
54ebbe7b | 418 | log_sample(cpu_buf, pc, 0, is_kernel, event, NULL); |
1da177e4 LT |
419 | } |
420 | ||
1da177e4 LT |
421 | void oprofile_add_trace(unsigned long pc) |
422 | { | |
879d9274 | 423 | struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer); |
1da177e4 LT |
424 | |
425 | if (!cpu_buf->tracing) | |
426 | return; | |
427 | ||
fd13f6c8 RR |
428 | /* |
429 | * broken frame can give an eip with the same value as an | |
430 | * escape code, abort the trace if we get it | |
431 | */ | |
211117ff RR |
432 | if (pc == ESCAPE_CODE) |
433 | goto fail; | |
434 | ||
d0e23384 | 435 | if (op_add_sample(cpu_buf, pc, 0)) |
211117ff | 436 | goto fail; |
1da177e4 | 437 | |
211117ff RR |
438 | return; |
439 | fail: | |
440 | cpu_buf->tracing = 0; | |
441 | cpu_buf->backtrace_aborted++; | |
442 | return; | |
1da177e4 LT |
443 | } |
444 | ||
1da177e4 LT |
445 | /* |
446 | * This serves to avoid cpu buffer overflow, and makes sure | |
447 | * the task mortuary progresses | |
448 | * | |
449 | * By using schedule_delayed_work_on and then schedule_delayed_work | |
450 | * we guarantee this will stay on the correct cpu | |
451 | */ | |
c4028958 | 452 | static void wq_sync_buffer(struct work_struct *work) |
1da177e4 | 453 | { |
25ad2913 | 454 | struct oprofile_cpu_buffer *b = |
c4028958 | 455 | container_of(work, struct oprofile_cpu_buffer, work.work); |
61bccf19 RR |
456 | if (b->cpu != smp_processor_id() && !cpu_online(b->cpu)) { |
457 | cancel_delayed_work(&b->work); | |
458 | return; | |
1da177e4 LT |
459 | } |
460 | sync_buffer(b->cpu); | |
461 | ||
462 | /* don't re-add the work if we're shutting down */ | |
463 | if (work_enabled) | |
464 | schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); | |
465 | } |