mm: update get_user_pages_longterm to migrate pages allocated from CMA region
[linux-2.6-block.git] / drivers / oprofile / buffer_sync.c
CommitLineData
1da177e4
LT
1/**
2 * @file buffer_sync.c
3 *
ae735e99 4 * @remark Copyright 2002-2009 OProfile authors
1da177e4
LT
5 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
345c2573 8 * @author Barry Kasindorf
ae735e99 9 * @author Robert Richter <robert.richter@amd.com>
1da177e4
LT
10 *
11 * This is the core of the buffer management. Each
12 * CPU buffer is processed and entered into the
13 * global event buffer. Such processing is necessary
14 * in several circumstances, mentioned below.
15 *
16 * The processing does the job of converting the
17 * transitory EIP value into a persistent dentry/offset
18 * value that the profiler can record at its leisure.
19 *
20 * See fs/dcookies.c for a description of the dentry/offset
21 * objects.
22 */
23
11163348 24#include <linux/file.h>
1da177e4
LT
25#include <linux/mm.h>
26#include <linux/workqueue.h>
27#include <linux/notifier.h>
28#include <linux/dcookies.h>
29#include <linux/profile.h>
30#include <linux/module.h>
31#include <linux/fs.h>
1474855d 32#include <linux/oprofile.h>
e8edc6e0 33#include <linux/sched.h>
6e84f315 34#include <linux/sched/mm.h>
0881e7bd 35#include <linux/sched/task.h>
5a0e3ad6 36#include <linux/gfp.h>
1474855d 37
1da177e4
LT
38#include "oprofile_stats.h"
39#include "event_buffer.h"
40#include "cpu_buffer.h"
41#include "buffer_sync.h"
73185e0a 42
1da177e4
LT
43static LIST_HEAD(dying_tasks);
44static LIST_HEAD(dead_tasks);
f7df8ed1 45static cpumask_var_t marked_cpus;
1da177e4
LT
46static DEFINE_SPINLOCK(task_mortuary);
47static void process_task_mortuary(void);
48
1da177e4
LT
49/* Take ownership of the task struct and place it on the
50 * list for processing. Only after two full buffer syncs
51 * does the task eventually get freed, because by then
52 * we are sure we will not reference it again.
4369ef3c
PM
53 * Can be invoked from softirq via RCU callback due to
54 * call_rcu() of the task struct, hence the _irqsave.
1da177e4 55 */
73185e0a
RR
56static int
57task_free_notify(struct notifier_block *self, unsigned long val, void *data)
1da177e4 58{
4369ef3c 59 unsigned long flags;
73185e0a 60 struct task_struct *task = data;
4369ef3c 61 spin_lock_irqsave(&task_mortuary, flags);
1da177e4 62 list_add(&task->tasks, &dying_tasks);
4369ef3c 63 spin_unlock_irqrestore(&task_mortuary, flags);
1da177e4
LT
64 return NOTIFY_OK;
65}
66
67
68/* The task is on its way out. A sync of the buffer means we can catch
69 * any remaining samples for this task.
70 */
73185e0a
RR
71static int
72task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
1da177e4
LT
73{
74 /* To avoid latency problems, we only process the current CPU,
75 * hoping that most samples for the task are on this CPU
76 */
39c715b7 77 sync_buffer(raw_smp_processor_id());
73185e0a 78 return 0;
1da177e4
LT
79}
80
81
82/* The task is about to try a do_munmap(). We peek at what it's going to
83 * do, and if it's an executable region, process the samples first, so
84 * we don't lose any. This does not have to be exact, it's a QoI issue
85 * only.
86 */
73185e0a
RR
87static int
88munmap_notify(struct notifier_block *self, unsigned long val, void *data)
1da177e4
LT
89{
90 unsigned long addr = (unsigned long)data;
73185e0a
RR
91 struct mm_struct *mm = current->mm;
92 struct vm_area_struct *mpnt;
1da177e4
LT
93
94 down_read(&mm->mmap_sem);
95
96 mpnt = find_vma(mm, addr);
97 if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
98 up_read(&mm->mmap_sem);
99 /* To avoid latency problems, we only process the current CPU,
100 * hoping that most samples for the task are on this CPU
101 */
39c715b7 102 sync_buffer(raw_smp_processor_id());
1da177e4
LT
103 return 0;
104 }
105
106 up_read(&mm->mmap_sem);
107 return 0;
108}
109
73185e0a 110
1da177e4
LT
111/* We need to be told about new modules so we don't attribute to a previously
112 * loaded module, or drop the samples on the floor.
113 */
73185e0a
RR
114static int
115module_load_notify(struct notifier_block *self, unsigned long val, void *data)
1da177e4
LT
116{
117#ifdef CONFIG_MODULES
118 if (val != MODULE_STATE_COMING)
119 return 0;
120
121 /* FIXME: should we process all CPU buffers ? */
59cc185a 122 mutex_lock(&buffer_mutex);
1da177e4
LT
123 add_event_entry(ESCAPE_CODE);
124 add_event_entry(MODULE_LOADED_CODE);
59cc185a 125 mutex_unlock(&buffer_mutex);
1da177e4
LT
126#endif
127 return 0;
128}
129
73185e0a 130
1da177e4
LT
131static struct notifier_block task_free_nb = {
132 .notifier_call = task_free_notify,
133};
134
135static struct notifier_block task_exit_nb = {
136 .notifier_call = task_exit_notify,
137};
138
139static struct notifier_block munmap_nb = {
140 .notifier_call = munmap_notify,
141};
142
143static struct notifier_block module_load_nb = {
144 .notifier_call = module_load_notify,
145};
146
6ac6519b
RR
147static void free_all_tasks(void)
148{
149 /* make sure we don't leak task structs */
150 process_task_mortuary();
151 process_task_mortuary();
152}
153
1da177e4
LT
154int sync_start(void)
155{
156 int err;
157
79f55997 158 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
4c50d9ea 159 return -ENOMEM;
4c50d9ea 160
1da177e4
LT
161 err = task_handoff_register(&task_free_nb);
162 if (err)
163 goto out1;
164 err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
165 if (err)
166 goto out2;
167 err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
168 if (err)
169 goto out3;
170 err = register_module_notifier(&module_load_nb);
171 if (err)
172 goto out4;
173
750d857c
RR
174 start_cpu_work();
175
1da177e4
LT
176out:
177 return err;
178out4:
179 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
180out3:
181 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
182out2:
183 task_handoff_unregister(&task_free_nb);
6ac6519b 184 free_all_tasks();
1da177e4 185out1:
4c50d9ea 186 free_cpumask_var(marked_cpus);
1da177e4
LT
187 goto out;
188}
189
190
191void sync_stop(void)
192{
750d857c 193 end_cpu_work();
1da177e4
LT
194 unregister_module_notifier(&module_load_nb);
195 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
196 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
197 task_handoff_unregister(&task_free_nb);
130c5ce7
RR
198 barrier(); /* do all of the above first */
199
3d7851b3 200 flush_cpu_work();
750d857c 201
6ac6519b 202 free_all_tasks();
4c50d9ea 203 free_cpumask_var(marked_cpus);
1da177e4
LT
204}
205
448678a0 206
1da177e4
LT
207/* Optimisation. We can manage without taking the dcookie sem
208 * because we cannot reach this code without at least one
209 * dcookie user still being registered (namely, the reader
210 * of the event buffer). */
71215a75 211static inline unsigned long fast_get_dcookie(const struct path *path)
1da177e4
LT
212{
213 unsigned long cookie;
448678a0 214
c2452f32 215 if (path->dentry->d_flags & DCACHE_COOKIE)
448678a0
JB
216 return (unsigned long)path->dentry;
217 get_dcookie(path, &cookie);
1da177e4
LT
218 return cookie;
219}
220
448678a0 221
2dd8ad81 222/* Look up the dcookie for the task's mm->exe_file,
1da177e4
LT
223 * which corresponds loosely to "application name". This is
224 * not strictly necessary but allows oprofile to associate
225 * shared-library samples with particular applications
226 */
73185e0a 227static unsigned long get_exec_dcookie(struct mm_struct *mm)
1da177e4 228{
0c0a400d 229 unsigned long cookie = NO_COOKIE;
11163348 230 struct file *exe_file;
73185e0a 231
11163348
DB
232 if (!mm)
233 goto done;
234
235 exe_file = get_mm_exe_file(mm);
236 if (!exe_file)
237 goto done;
1da177e4 238
11163348
DB
239 cookie = fast_get_dcookie(&exe_file->f_path);
240 fput(exe_file);
241done:
1da177e4
LT
242 return cookie;
243}
244
245
246/* Convert the EIP value of a sample into a persistent dentry/offset
247 * pair that can then be added to the global event buffer. We make
248 * sure to do this lookup before a mm->mmap modification happens so
249 * we don't lose track.
11163348
DB
250 *
251 * The caller must ensure the mm is not nil (ie: not a kernel thread).
1da177e4 252 */
73185e0a
RR
253static unsigned long
254lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
1da177e4 255{
0c0a400d 256 unsigned long cookie = NO_COOKIE;
73185e0a 257 struct vm_area_struct *vma;
1da177e4 258
11163348 259 down_read(&mm->mmap_sem);
1da177e4 260 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
73185e0a 261
1da177e4
LT
262 if (addr < vma->vm_start || addr >= vma->vm_end)
263 continue;
264
0c0a400d 265 if (vma->vm_file) {
448678a0 266 cookie = fast_get_dcookie(&vma->vm_file->f_path);
0c0a400d
JL
267 *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
268 vma->vm_start;
269 } else {
270 /* must be an anonymous map */
271 *offset = addr;
272 }
273
1da177e4
LT
274 break;
275 }
276
0c0a400d
JL
277 if (!vma)
278 cookie = INVALID_COOKIE;
11163348 279 up_read(&mm->mmap_sem);
0c0a400d 280
1da177e4
LT
281 return cookie;
282}
283
0c0a400d 284static unsigned long last_cookie = INVALID_COOKIE;
73185e0a 285
1da177e4
LT
286static void add_cpu_switch(int i)
287{
288 add_event_entry(ESCAPE_CODE);
289 add_event_entry(CPU_SWITCH_CODE);
290 add_event_entry(i);
0c0a400d 291 last_cookie = INVALID_COOKIE;
1da177e4
LT
292}
293
294static void add_kernel_ctx_switch(unsigned int in_kernel)
295{
296 add_event_entry(ESCAPE_CODE);
297 if (in_kernel)
73185e0a 298 add_event_entry(KERNEL_ENTER_SWITCH_CODE);
1da177e4 299 else
73185e0a 300 add_event_entry(KERNEL_EXIT_SWITCH_CODE);
1da177e4 301}
73185e0a 302
1da177e4 303static void
73185e0a 304add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
1da177e4
LT
305{
306 add_event_entry(ESCAPE_CODE);
73185e0a 307 add_event_entry(CTX_SWITCH_CODE);
1da177e4
LT
308 add_event_entry(task->pid);
309 add_event_entry(cookie);
310 /* Another code for daemon back-compat */
311 add_event_entry(ESCAPE_CODE);
312 add_event_entry(CTX_TGID_CODE);
313 add_event_entry(task->tgid);
314}
315
73185e0a 316
1da177e4
LT
317static void add_cookie_switch(unsigned long cookie)
318{
319 add_event_entry(ESCAPE_CODE);
320 add_event_entry(COOKIE_SWITCH_CODE);
321 add_event_entry(cookie);
322}
323
73185e0a 324
1da177e4
LT
325static void add_trace_begin(void)
326{
327 add_event_entry(ESCAPE_CODE);
328 add_event_entry(TRACE_BEGIN_CODE);
329}
330
1acda878 331static void add_data(struct op_entry *entry, struct mm_struct *mm)
345c2573 332{
1acda878
RR
333 unsigned long code, pc, val;
334 unsigned long cookie;
345c2573 335 off_t offset;
345c2573 336
1acda878
RR
337 if (!op_cpu_buffer_get_data(entry, &code))
338 return;
339 if (!op_cpu_buffer_get_data(entry, &pc))
340 return;
341 if (!op_cpu_buffer_get_size(entry))
dbe6e283 342 return;
345c2573
BK
343
344 if (mm) {
d358e75f 345 cookie = lookup_dcookie(mm, pc, &offset);
345c2573 346
d358e75f
RR
347 if (cookie == NO_COOKIE)
348 offset = pc;
349 if (cookie == INVALID_COOKIE) {
345c2573 350 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
d358e75f 351 offset = pc;
345c2573 352 }
d358e75f
RR
353 if (cookie != last_cookie) {
354 add_cookie_switch(cookie);
355 last_cookie = cookie;
345c2573
BK
356 }
357 } else
d358e75f 358 offset = pc;
345c2573
BK
359
360 add_event_entry(ESCAPE_CODE);
361 add_event_entry(code);
362 add_event_entry(offset); /* Offset from Dcookie */
363
1acda878
RR
364 while (op_cpu_buffer_get_data(entry, &val))
365 add_event_entry(val);
345c2573 366}
1da177e4 367
6368a1f4 368static inline void add_sample_entry(unsigned long offset, unsigned long event)
1da177e4
LT
369{
370 add_event_entry(offset);
371 add_event_entry(event);
372}
373
374
9741b309
RR
375/*
376 * Add a sample to the global event buffer. If possible the
377 * sample is converted into a persistent dentry/offset pair
378 * for later lookup from userspace. Return 0 on failure.
379 */
380static int
381add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
1da177e4
LT
382{
383 unsigned long cookie;
384 off_t offset;
73185e0a 385
9741b309
RR
386 if (in_kernel) {
387 add_sample_entry(s->eip, s->event);
388 return 1;
389 }
390
391 /* add userspace sample */
392
393 if (!mm) {
394 atomic_inc(&oprofile_stats.sample_lost_no_mm);
395 return 0;
396 }
397
73185e0a
RR
398 cookie = lookup_dcookie(mm, s->eip, &offset);
399
0c0a400d 400 if (cookie == INVALID_COOKIE) {
1da177e4
LT
401 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
402 return 0;
403 }
404
405 if (cookie != last_cookie) {
406 add_cookie_switch(cookie);
407 last_cookie = cookie;
408 }
409
410 add_sample_entry(offset, s->event);
411
412 return 1;
413}
414
73185e0a 415
73185e0a 416static void release_mm(struct mm_struct *mm)
1da177e4
LT
417{
418 if (!mm)
419 return;
1da177e4
LT
420 mmput(mm);
421}
422
1da177e4
LT
423static inline int is_code(unsigned long val)
424{
425 return val == ESCAPE_CODE;
426}
73185e0a 427
1da177e4 428
1da177e4
LT
429/* Move tasks along towards death. Any tasks on dead_tasks
430 * will definitely have no remaining references in any
431 * CPU buffers at this point, because we use two lists,
432 * and to have reached the list, it must have gone through
433 * one full sync already.
434 */
435static void process_task_mortuary(void)
436{
4369ef3c
PM
437 unsigned long flags;
438 LIST_HEAD(local_dead_tasks);
73185e0a
RR
439 struct task_struct *task;
440 struct task_struct *ttask;
1da177e4 441
4369ef3c 442 spin_lock_irqsave(&task_mortuary, flags);
1da177e4 443
4369ef3c
PM
444 list_splice_init(&dead_tasks, &local_dead_tasks);
445 list_splice_init(&dying_tasks, &dead_tasks);
1da177e4 446
4369ef3c
PM
447 spin_unlock_irqrestore(&task_mortuary, flags);
448
449 list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
1da177e4 450 list_del(&task->tasks);
4369ef3c 451 free_task(task);
1da177e4 452 }
1da177e4
LT
453}
454
455
456static void mark_done(int cpu)
457{
458 int i;
459
f7df8ed1 460 cpumask_set_cpu(cpu, marked_cpus);
1da177e4
LT
461
462 for_each_online_cpu(i) {
f7df8ed1 463 if (!cpumask_test_cpu(i, marked_cpus))
1da177e4
LT
464 return;
465 }
466
467 /* All CPUs have been processed at least once,
468 * we can process the mortuary once
469 */
470 process_task_mortuary();
471
f7df8ed1 472 cpumask_clear(marked_cpus);
1da177e4
LT
473}
474
475
476/* FIXME: this is not sufficient if we implement syscall barrier backtrace
477 * traversal, the code switch to sb_sample_start at first kernel enter/exit
478 * switch so we need a fifth state and some special handling in sync_buffer()
479 */
480typedef enum {
481 sb_bt_ignore = -2,
482 sb_buffer_start,
483 sb_bt_start,
484 sb_sample_start,
485} sync_buffer_state;
486
487/* Sync one of the CPU's buffers into the global event buffer.
488 * Here we need to go through each batch of samples punctuated
489 * by context switch notes, taking the task's mmap_sem and doing
490 * lookup in task->mm->mmap to convert EIP into dcookie/offset
491 * value.
492 */
493void sync_buffer(int cpu)
494{
1da177e4 495 struct mm_struct *mm = NULL;
fd7826d5 496 struct mm_struct *oldmm;
bd7dc46f 497 unsigned long val;
73185e0a 498 struct task_struct *new;
1da177e4
LT
499 unsigned long cookie = 0;
500 int in_kernel = 1;
1da177e4 501 sync_buffer_state state = sb_buffer_start;
9b1f2611 502 unsigned int i;
1da177e4 503 unsigned long available;
ae735e99 504 unsigned long flags;
2d87b14c
RR
505 struct op_entry entry;
506 struct op_sample *sample;
1da177e4 507
59cc185a 508 mutex_lock(&buffer_mutex);
73185e0a 509
1da177e4
LT
510 add_cpu_switch(cpu);
511
6d2c53f3
RR
512 op_cpu_buffer_reset(cpu);
513 available = op_cpu_buffer_entries(cpu);
1da177e4
LT
514
515 for (i = 0; i < available; ++i) {
2d87b14c
RR
516 sample = op_cpu_buffer_read_entry(&entry, cpu);
517 if (!sample)
6dad828b 518 break;
73185e0a 519
2d87b14c 520 if (is_code(sample->eip)) {
ae735e99
RR
521 flags = sample->event;
522 if (flags & TRACE_BEGIN) {
523 state = sb_bt_start;
524 add_trace_begin();
525 }
526 if (flags & KERNEL_CTX_SWITCH) {
1da177e4 527 /* kernel/userspace switch */
ae735e99 528 in_kernel = flags & IS_KERNEL;
1da177e4
LT
529 if (state == sb_buffer_start)
530 state = sb_sample_start;
ae735e99
RR
531 add_kernel_ctx_switch(flags & IS_KERNEL);
532 }
bd7dc46f
RR
533 if (flags & USER_CTX_SWITCH
534 && op_cpu_buffer_get_data(&entry, &val)) {
1da177e4 535 /* userspace context switch */
bd7dc46f 536 new = (struct task_struct *)val;
fd7826d5 537 oldmm = mm;
1da177e4 538 release_mm(oldmm);
11163348 539 mm = get_task_mm(new);
1da177e4
LT
540 if (mm != oldmm)
541 cookie = get_exec_dcookie(mm);
542 add_user_ctx_switch(new, cookie);
1da177e4 543 }
1acda878
RR
544 if (op_cpu_buffer_get_size(&entry))
545 add_data(&entry, mm);
317f33bc
RR
546 continue;
547 }
548
549 if (state < sb_bt_start)
550 /* ignore sample */
551 continue;
552
2d87b14c 553 if (add_sample(mm, sample, in_kernel))
317f33bc
RR
554 continue;
555
556 /* ignore backtraces if failed to add a sample */
557 if (state == sb_bt_start) {
558 state = sb_bt_ignore;
559 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
1da177e4 560 }
1da177e4
LT
561 }
562 release_mm(mm);
563
564 mark_done(cpu);
565
59cc185a 566 mutex_unlock(&buffer_mutex);
1da177e4 567}
a5598ca0
CL
568
569/* The function can be used to add a buffer worth of data directly to
570 * the kernel buffer. The buffer is assumed to be a circular buffer.
571 * Take the entries from index start and end at index end, wrapping
572 * at max_entries.
573 */
574void oprofile_put_buff(unsigned long *buf, unsigned int start,
575 unsigned int stop, unsigned int max)
576{
577 int i;
578
579 i = start;
580
581 mutex_lock(&buffer_mutex);
582 while (i != stop) {
583 add_event_entry(buf[i++]);
584
585 if (i >= max)
586 i = 0;
587 }
588
589 mutex_unlock(&buffer_mutex);
590}
591