Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /** |
2 | * @file buffer_sync.c | |
3 | * | |
4 | * @remark Copyright 2002 OProfile authors | |
5 | * @remark Read the file COPYING | |
6 | * | |
7 | * @author John Levon <levon@movementarian.org> | |
8 | * | |
9 | * This is the core of the buffer management. Each | |
10 | * CPU buffer is processed and entered into the | |
11 | * global event buffer. Such processing is necessary | |
12 | * in several circumstances, mentioned below. | |
13 | * | |
14 | * The processing does the job of converting the | |
15 | * transitory EIP value into a persistent dentry/offset | |
16 | * value that the profiler can record at its leisure. | |
17 | * | |
18 | * See fs/dcookies.c for a description of the dentry/offset | |
19 | * objects. | |
20 | */ | |
21 | ||
22 | #include <linux/mm.h> | |
23 | #include <linux/workqueue.h> | |
24 | #include <linux/notifier.h> | |
25 | #include <linux/dcookies.h> | |
26 | #include <linux/profile.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/fs.h> | |
e8edc6e0 | 29 | #include <linux/sched.h> |
1da177e4 LT |
30 | |
31 | #include "oprofile_stats.h" | |
32 | #include "event_buffer.h" | |
33 | #include "cpu_buffer.h" | |
34 | #include "buffer_sync.h" | |
35 | ||
36 | static LIST_HEAD(dying_tasks); | |
37 | static LIST_HEAD(dead_tasks); | |
38 | static cpumask_t marked_cpus = CPU_MASK_NONE; | |
39 | static DEFINE_SPINLOCK(task_mortuary); | |
40 | static void process_task_mortuary(void); | |
41 | ||
42 | ||
43 | /* Take ownership of the task struct and place it on the | |
44 | * list for processing. Only after two full buffer syncs | |
45 | * does the task eventually get freed, because by then | |
46 | * we are sure we will not reference it again. | |
4369ef3c PM |
47 | * Can be invoked from softirq via RCU callback due to |
48 | * call_rcu() of the task struct, hence the _irqsave. | |
1da177e4 LT |
49 | */ |
50 | static int task_free_notify(struct notifier_block * self, unsigned long val, void * data) | |
51 | { | |
4369ef3c | 52 | unsigned long flags; |
1da177e4 | 53 | struct task_struct * task = data; |
4369ef3c | 54 | spin_lock_irqsave(&task_mortuary, flags); |
1da177e4 | 55 | list_add(&task->tasks, &dying_tasks); |
4369ef3c | 56 | spin_unlock_irqrestore(&task_mortuary, flags); |
1da177e4 LT |
57 | return NOTIFY_OK; |
58 | } | |
59 | ||
60 | ||
61 | /* The task is on its way out. A sync of the buffer means we can catch | |
62 | * any remaining samples for this task. | |
63 | */ | |
64 | static int task_exit_notify(struct notifier_block * self, unsigned long val, void * data) | |
65 | { | |
66 | /* To avoid latency problems, we only process the current CPU, | |
67 | * hoping that most samples for the task are on this CPU | |
68 | */ | |
39c715b7 | 69 | sync_buffer(raw_smp_processor_id()); |
1da177e4 LT |
70 | return 0; |
71 | } | |
72 | ||
73 | ||
74 | /* The task is about to try a do_munmap(). We peek at what it's going to | |
75 | * do, and if it's an executable region, process the samples first, so | |
76 | * we don't lose any. This does not have to be exact, it's a QoI issue | |
77 | * only. | |
78 | */ | |
79 | static int munmap_notify(struct notifier_block * self, unsigned long val, void * data) | |
80 | { | |
81 | unsigned long addr = (unsigned long)data; | |
82 | struct mm_struct * mm = current->mm; | |
83 | struct vm_area_struct * mpnt; | |
84 | ||
85 | down_read(&mm->mmap_sem); | |
86 | ||
87 | mpnt = find_vma(mm, addr); | |
88 | if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) { | |
89 | up_read(&mm->mmap_sem); | |
90 | /* To avoid latency problems, we only process the current CPU, | |
91 | * hoping that most samples for the task are on this CPU | |
92 | */ | |
39c715b7 | 93 | sync_buffer(raw_smp_processor_id()); |
1da177e4 LT |
94 | return 0; |
95 | } | |
96 | ||
97 | up_read(&mm->mmap_sem); | |
98 | return 0; | |
99 | } | |
100 | ||
101 | ||
102 | /* We need to be told about new modules so we don't attribute to a previously | |
103 | * loaded module, or drop the samples on the floor. | |
104 | */ | |
105 | static int module_load_notify(struct notifier_block * self, unsigned long val, void * data) | |
106 | { | |
107 | #ifdef CONFIG_MODULES | |
108 | if (val != MODULE_STATE_COMING) | |
109 | return 0; | |
110 | ||
111 | /* FIXME: should we process all CPU buffers ? */ | |
59cc185a | 112 | mutex_lock(&buffer_mutex); |
1da177e4 LT |
113 | add_event_entry(ESCAPE_CODE); |
114 | add_event_entry(MODULE_LOADED_CODE); | |
59cc185a | 115 | mutex_unlock(&buffer_mutex); |
1da177e4 LT |
116 | #endif |
117 | return 0; | |
118 | } | |
119 | ||
120 | ||
121 | static struct notifier_block task_free_nb = { | |
122 | .notifier_call = task_free_notify, | |
123 | }; | |
124 | ||
125 | static struct notifier_block task_exit_nb = { | |
126 | .notifier_call = task_exit_notify, | |
127 | }; | |
128 | ||
129 | static struct notifier_block munmap_nb = { | |
130 | .notifier_call = munmap_notify, | |
131 | }; | |
132 | ||
133 | static struct notifier_block module_load_nb = { | |
134 | .notifier_call = module_load_notify, | |
135 | }; | |
136 | ||
137 | ||
138 | static void end_sync(void) | |
139 | { | |
140 | end_cpu_work(); | |
141 | /* make sure we don't leak task structs */ | |
142 | process_task_mortuary(); | |
143 | process_task_mortuary(); | |
144 | } | |
145 | ||
146 | ||
147 | int sync_start(void) | |
148 | { | |
149 | int err; | |
150 | ||
151 | start_cpu_work(); | |
152 | ||
153 | err = task_handoff_register(&task_free_nb); | |
154 | if (err) | |
155 | goto out1; | |
156 | err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb); | |
157 | if (err) | |
158 | goto out2; | |
159 | err = profile_event_register(PROFILE_MUNMAP, &munmap_nb); | |
160 | if (err) | |
161 | goto out3; | |
162 | err = register_module_notifier(&module_load_nb); | |
163 | if (err) | |
164 | goto out4; | |
165 | ||
166 | out: | |
167 | return err; | |
168 | out4: | |
169 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); | |
170 | out3: | |
171 | profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); | |
172 | out2: | |
173 | task_handoff_unregister(&task_free_nb); | |
174 | out1: | |
175 | end_sync(); | |
176 | goto out; | |
177 | } | |
178 | ||
179 | ||
180 | void sync_stop(void) | |
181 | { | |
182 | unregister_module_notifier(&module_load_nb); | |
183 | profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); | |
184 | profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); | |
185 | task_handoff_unregister(&task_free_nb); | |
186 | end_sync(); | |
187 | } | |
188 | ||
189 | ||
190 | /* Optimisation. We can manage without taking the dcookie sem | |
191 | * because we cannot reach this code without at least one | |
192 | * dcookie user still being registered (namely, the reader | |
193 | * of the event buffer). */ | |
194 | static inline unsigned long fast_get_dcookie(struct dentry * dentry, | |
195 | struct vfsmount * vfsmnt) | |
196 | { | |
197 | unsigned long cookie; | |
198 | ||
199 | if (dentry->d_cookie) | |
200 | return (unsigned long)dentry; | |
201 | get_dcookie(dentry, vfsmnt, &cookie); | |
202 | return cookie; | |
203 | } | |
204 | ||
205 | ||
206 | /* Look up the dcookie for the task's first VM_EXECUTABLE mapping, | |
207 | * which corresponds loosely to "application name". This is | |
208 | * not strictly necessary but allows oprofile to associate | |
209 | * shared-library samples with particular applications | |
210 | */ | |
211 | static unsigned long get_exec_dcookie(struct mm_struct * mm) | |
212 | { | |
0c0a400d | 213 | unsigned long cookie = NO_COOKIE; |
1da177e4 LT |
214 | struct vm_area_struct * vma; |
215 | ||
216 | if (!mm) | |
217 | goto out; | |
218 | ||
219 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | |
220 | if (!vma->vm_file) | |
221 | continue; | |
222 | if (!(vma->vm_flags & VM_EXECUTABLE)) | |
223 | continue; | |
1fb1430b JS |
224 | cookie = fast_get_dcookie(vma->vm_file->f_path.dentry, |
225 | vma->vm_file->f_path.mnt); | |
1da177e4 LT |
226 | break; |
227 | } | |
228 | ||
229 | out: | |
230 | return cookie; | |
231 | } | |
232 | ||
233 | ||
234 | /* Convert the EIP value of a sample into a persistent dentry/offset | |
235 | * pair that can then be added to the global event buffer. We make | |
236 | * sure to do this lookup before a mm->mmap modification happens so | |
237 | * we don't lose track. | |
238 | */ | |
239 | static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, off_t * offset) | |
240 | { | |
0c0a400d | 241 | unsigned long cookie = NO_COOKIE; |
1da177e4 LT |
242 | struct vm_area_struct * vma; |
243 | ||
244 | for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { | |
245 | ||
1da177e4 LT |
246 | if (addr < vma->vm_start || addr >= vma->vm_end) |
247 | continue; | |
248 | ||
0c0a400d | 249 | if (vma->vm_file) { |
1fb1430b JS |
250 | cookie = fast_get_dcookie(vma->vm_file->f_path.dentry, |
251 | vma->vm_file->f_path.mnt); | |
0c0a400d JL |
252 | *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr - |
253 | vma->vm_start; | |
254 | } else { | |
255 | /* must be an anonymous map */ | |
256 | *offset = addr; | |
257 | } | |
258 | ||
1da177e4 LT |
259 | break; |
260 | } | |
261 | ||
0c0a400d JL |
262 | if (!vma) |
263 | cookie = INVALID_COOKIE; | |
264 | ||
1da177e4 LT |
265 | return cookie; |
266 | } | |
267 | ||
268 | ||
0c0a400d | 269 | static unsigned long last_cookie = INVALID_COOKIE; |
1da177e4 LT |
270 | |
271 | static void add_cpu_switch(int i) | |
272 | { | |
273 | add_event_entry(ESCAPE_CODE); | |
274 | add_event_entry(CPU_SWITCH_CODE); | |
275 | add_event_entry(i); | |
0c0a400d | 276 | last_cookie = INVALID_COOKIE; |
1da177e4 LT |
277 | } |
278 | ||
279 | static void add_kernel_ctx_switch(unsigned int in_kernel) | |
280 | { | |
281 | add_event_entry(ESCAPE_CODE); | |
282 | if (in_kernel) | |
283 | add_event_entry(KERNEL_ENTER_SWITCH_CODE); | |
284 | else | |
285 | add_event_entry(KERNEL_EXIT_SWITCH_CODE); | |
286 | } | |
287 | ||
288 | static void | |
289 | add_user_ctx_switch(struct task_struct const * task, unsigned long cookie) | |
290 | { | |
291 | add_event_entry(ESCAPE_CODE); | |
292 | add_event_entry(CTX_SWITCH_CODE); | |
293 | add_event_entry(task->pid); | |
294 | add_event_entry(cookie); | |
295 | /* Another code for daemon back-compat */ | |
296 | add_event_entry(ESCAPE_CODE); | |
297 | add_event_entry(CTX_TGID_CODE); | |
298 | add_event_entry(task->tgid); | |
299 | } | |
300 | ||
301 | ||
302 | static void add_cookie_switch(unsigned long cookie) | |
303 | { | |
304 | add_event_entry(ESCAPE_CODE); | |
305 | add_event_entry(COOKIE_SWITCH_CODE); | |
306 | add_event_entry(cookie); | |
307 | } | |
308 | ||
309 | ||
310 | static void add_trace_begin(void) | |
311 | { | |
312 | add_event_entry(ESCAPE_CODE); | |
313 | add_event_entry(TRACE_BEGIN_CODE); | |
314 | } | |
315 | ||
316 | ||
317 | static void add_sample_entry(unsigned long offset, unsigned long event) | |
318 | { | |
319 | add_event_entry(offset); | |
320 | add_event_entry(event); | |
321 | } | |
322 | ||
323 | ||
324 | static int add_us_sample(struct mm_struct * mm, struct op_sample * s) | |
325 | { | |
326 | unsigned long cookie; | |
327 | off_t offset; | |
328 | ||
329 | cookie = lookup_dcookie(mm, s->eip, &offset); | |
330 | ||
0c0a400d | 331 | if (cookie == INVALID_COOKIE) { |
1da177e4 LT |
332 | atomic_inc(&oprofile_stats.sample_lost_no_mapping); |
333 | return 0; | |
334 | } | |
335 | ||
336 | if (cookie != last_cookie) { | |
337 | add_cookie_switch(cookie); | |
338 | last_cookie = cookie; | |
339 | } | |
340 | ||
341 | add_sample_entry(offset, s->event); | |
342 | ||
343 | return 1; | |
344 | } | |
345 | ||
346 | ||
347 | /* Add a sample to the global event buffer. If possible the | |
348 | * sample is converted into a persistent dentry/offset pair | |
349 | * for later lookup from userspace. | |
350 | */ | |
351 | static int | |
352 | add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel) | |
353 | { | |
354 | if (in_kernel) { | |
355 | add_sample_entry(s->eip, s->event); | |
356 | return 1; | |
357 | } else if (mm) { | |
358 | return add_us_sample(mm, s); | |
359 | } else { | |
360 | atomic_inc(&oprofile_stats.sample_lost_no_mm); | |
361 | } | |
362 | return 0; | |
363 | } | |
364 | ||
365 | ||
366 | static void release_mm(struct mm_struct * mm) | |
367 | { | |
368 | if (!mm) | |
369 | return; | |
370 | up_read(&mm->mmap_sem); | |
371 | mmput(mm); | |
372 | } | |
373 | ||
374 | ||
375 | static struct mm_struct * take_tasks_mm(struct task_struct * task) | |
376 | { | |
377 | struct mm_struct * mm = get_task_mm(task); | |
378 | if (mm) | |
379 | down_read(&mm->mmap_sem); | |
380 | return mm; | |
381 | } | |
382 | ||
383 | ||
384 | static inline int is_code(unsigned long val) | |
385 | { | |
386 | return val == ESCAPE_CODE; | |
387 | } | |
388 | ||
389 | ||
390 | /* "acquire" as many cpu buffer slots as we can */ | |
391 | static unsigned long get_slots(struct oprofile_cpu_buffer * b) | |
392 | { | |
393 | unsigned long head = b->head_pos; | |
394 | unsigned long tail = b->tail_pos; | |
395 | ||
396 | /* | |
397 | * Subtle. This resets the persistent last_task | |
398 | * and in_kernel values used for switching notes. | |
399 | * BUT, there is a small window between reading | |
400 | * head_pos, and this call, that means samples | |
401 | * can appear at the new head position, but not | |
402 | * be prefixed with the notes for switching | |
403 | * kernel mode or a task switch. This small hole | |
404 | * can lead to mis-attribution or samples where | |
405 | * we don't know if it's in the kernel or not, | |
406 | * at the start of an event buffer. | |
407 | */ | |
408 | cpu_buffer_reset(b); | |
409 | ||
410 | if (head >= tail) | |
411 | return head - tail; | |
412 | ||
413 | return head + (b->buffer_size - tail); | |
414 | } | |
415 | ||
416 | ||
417 | static void increment_tail(struct oprofile_cpu_buffer * b) | |
418 | { | |
419 | unsigned long new_tail = b->tail_pos + 1; | |
420 | ||
421 | rmb(); | |
422 | ||
423 | if (new_tail < b->buffer_size) | |
424 | b->tail_pos = new_tail; | |
425 | else | |
426 | b->tail_pos = 0; | |
427 | } | |
428 | ||
429 | ||
430 | /* Move tasks along towards death. Any tasks on dead_tasks | |
431 | * will definitely have no remaining references in any | |
432 | * CPU buffers at this point, because we use two lists, | |
433 | * and to have reached the list, it must have gone through | |
434 | * one full sync already. | |
435 | */ | |
436 | static void process_task_mortuary(void) | |
437 | { | |
4369ef3c PM |
438 | unsigned long flags; |
439 | LIST_HEAD(local_dead_tasks); | |
1da177e4 | 440 | struct task_struct * task; |
4369ef3c | 441 | struct task_struct * ttask; |
1da177e4 | 442 | |
4369ef3c | 443 | spin_lock_irqsave(&task_mortuary, flags); |
1da177e4 | 444 | |
4369ef3c PM |
445 | list_splice_init(&dead_tasks, &local_dead_tasks); |
446 | list_splice_init(&dying_tasks, &dead_tasks); | |
1da177e4 | 447 | |
4369ef3c PM |
448 | spin_unlock_irqrestore(&task_mortuary, flags); |
449 | ||
450 | list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) { | |
1da177e4 | 451 | list_del(&task->tasks); |
4369ef3c | 452 | free_task(task); |
1da177e4 | 453 | } |
1da177e4 LT |
454 | } |
455 | ||
456 | ||
457 | static void mark_done(int cpu) | |
458 | { | |
459 | int i; | |
460 | ||
461 | cpu_set(cpu, marked_cpus); | |
462 | ||
463 | for_each_online_cpu(i) { | |
464 | if (!cpu_isset(i, marked_cpus)) | |
465 | return; | |
466 | } | |
467 | ||
468 | /* All CPUs have been processed at least once, | |
469 | * we can process the mortuary once | |
470 | */ | |
471 | process_task_mortuary(); | |
472 | ||
473 | cpus_clear(marked_cpus); | |
474 | } | |
475 | ||
476 | ||
477 | /* FIXME: this is not sufficient if we implement syscall barrier backtrace | |
478 | * traversal, the code switch to sb_sample_start at first kernel enter/exit | |
479 | * switch so we need a fifth state and some special handling in sync_buffer() | |
480 | */ | |
481 | typedef enum { | |
482 | sb_bt_ignore = -2, | |
483 | sb_buffer_start, | |
484 | sb_bt_start, | |
485 | sb_sample_start, | |
486 | } sync_buffer_state; | |
487 | ||
488 | /* Sync one of the CPU's buffers into the global event buffer. | |
489 | * Here we need to go through each batch of samples punctuated | |
490 | * by context switch notes, taking the task's mmap_sem and doing | |
491 | * lookup in task->mm->mmap to convert EIP into dcookie/offset | |
492 | * value. | |
493 | */ | |
494 | void sync_buffer(int cpu) | |
495 | { | |
496 | struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[cpu]; | |
497 | struct mm_struct *mm = NULL; | |
498 | struct task_struct * new; | |
499 | unsigned long cookie = 0; | |
500 | int in_kernel = 1; | |
501 | unsigned int i; | |
502 | sync_buffer_state state = sb_buffer_start; | |
503 | unsigned long available; | |
504 | ||
59cc185a | 505 | mutex_lock(&buffer_mutex); |
1da177e4 LT |
506 | |
507 | add_cpu_switch(cpu); | |
508 | ||
509 | /* Remember, only we can modify tail_pos */ | |
510 | ||
511 | available = get_slots(cpu_buf); | |
512 | ||
513 | for (i = 0; i < available; ++i) { | |
514 | struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos]; | |
515 | ||
516 | if (is_code(s->eip)) { | |
517 | if (s->event <= CPU_IS_KERNEL) { | |
518 | /* kernel/userspace switch */ | |
519 | in_kernel = s->event; | |
520 | if (state == sb_buffer_start) | |
521 | state = sb_sample_start; | |
522 | add_kernel_ctx_switch(s->event); | |
523 | } else if (s->event == CPU_TRACE_BEGIN) { | |
524 | state = sb_bt_start; | |
525 | add_trace_begin(); | |
526 | } else { | |
527 | struct mm_struct * oldmm = mm; | |
528 | ||
529 | /* userspace context switch */ | |
530 | new = (struct task_struct *)s->event; | |
531 | ||
532 | release_mm(oldmm); | |
533 | mm = take_tasks_mm(new); | |
534 | if (mm != oldmm) | |
535 | cookie = get_exec_dcookie(mm); | |
536 | add_user_ctx_switch(new, cookie); | |
537 | } | |
538 | } else { | |
539 | if (state >= sb_bt_start && | |
540 | !add_sample(mm, s, in_kernel)) { | |
541 | if (state == sb_bt_start) { | |
542 | state = sb_bt_ignore; | |
543 | atomic_inc(&oprofile_stats.bt_lost_no_mapping); | |
544 | } | |
545 | } | |
546 | } | |
547 | ||
548 | increment_tail(cpu_buf); | |
549 | } | |
550 | release_mm(mm); | |
551 | ||
552 | mark_done(cpu); | |
553 | ||
59cc185a | 554 | mutex_unlock(&buffer_mutex); |
1da177e4 | 555 | } |