Merge branch 'proc-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/adobriyan...
[linux-2.6-block.git] / kernel / latencytop.c
CommitLineData
9745512c
AV
1/*
2 * latencytop.c: Latency display infrastructure
3 *
4 * (C) Copyright 2008 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12#include <linux/latencytop.h>
13#include <linux/kallsyms.h>
14#include <linux/seq_file.h>
15#include <linux/notifier.h>
16#include <linux/spinlock.h>
17#include <linux/proc_fs.h>
18#include <linux/module.h>
19#include <linux/sched.h>
20#include <linux/list.h>
21#include <linux/slab.h>
22#include <linux/stacktrace.h>
23
24static DEFINE_SPINLOCK(latency_lock);
25
26#define MAXLR 128
27static struct latency_record latency_record[MAXLR];
28
29int latencytop_enabled;
30
31void clear_all_latency_tracing(struct task_struct *p)
32{
33 unsigned long flags;
34
35 if (!latencytop_enabled)
36 return;
37
38 spin_lock_irqsave(&latency_lock, flags);
39 memset(&p->latency_record, 0, sizeof(p->latency_record));
40 p->latency_record_count = 0;
41 spin_unlock_irqrestore(&latency_lock, flags);
42}
43
44static void clear_global_latency_tracing(void)
45{
46 unsigned long flags;
47
48 spin_lock_irqsave(&latency_lock, flags);
49 memset(&latency_record, 0, sizeof(latency_record));
50 spin_unlock_irqrestore(&latency_lock, flags);
51}
52
53static void __sched
54account_global_scheduler_latency(struct task_struct *tsk, struct latency_record *lat)
55{
56 int firstnonnull = MAXLR + 1;
57 int i;
58
59 if (!latencytop_enabled)
60 return;
61
62 /* skip kernel threads for now */
63 if (!tsk->mm)
64 return;
65
66 for (i = 0; i < MAXLR; i++) {
19fb518c
DA
67 int q, same = 1;
68
9745512c
AV
69 /* Nothing stored: */
70 if (!latency_record[i].backtrace[0]) {
71 if (firstnonnull > i)
72 firstnonnull = i;
73 continue;
74 }
75 for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
19fb518c
DA
76 unsigned long record = lat->backtrace[q];
77
78 if (latency_record[i].backtrace[q] != record) {
9745512c 79 same = 0;
9745512c 80 break;
19fb518c
DA
81 }
82
83 /* 0 and ULONG_MAX entries mean end of backtrace: */
84 if (record == 0 || record == ULONG_MAX)
9745512c
AV
85 break;
86 }
87 if (same) {
88 latency_record[i].count++;
89 latency_record[i].time += lat->time;
90 if (lat->time > latency_record[i].max)
91 latency_record[i].max = lat->time;
92 return;
93 }
94 }
95
96 i = firstnonnull;
97 if (i >= MAXLR - 1)
98 return;
99
100 /* Allocted a new one: */
101 memcpy(&latency_record[i], lat, sizeof(struct latency_record));
102}
103
104static inline void store_stacktrace(struct task_struct *tsk, struct latency_record *lat)
105{
106 struct stack_trace trace;
107
108 memset(&trace, 0, sizeof(trace));
109 trace.max_entries = LT_BACKTRACEDEPTH;
110 trace.entries = &lat->backtrace[0];
111 trace.skip = 0;
112 save_stack_trace_tsk(tsk, &trace);
113}
114
115void __sched
116account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
117{
118 unsigned long flags;
119 int i, q;
120 struct latency_record lat;
121
122 if (!latencytop_enabled)
123 return;
124
125 /* Long interruptible waits are generally user requested... */
126 if (inter && usecs > 5000)
127 return;
128
129 memset(&lat, 0, sizeof(lat));
130 lat.count = 1;
131 lat.time = usecs;
132 lat.max = usecs;
133 store_stacktrace(tsk, &lat);
134
135 spin_lock_irqsave(&latency_lock, flags);
136
137 account_global_scheduler_latency(tsk, &lat);
138
139 /*
140 * short term hack; if we're > 32 we stop; future we recycle:
141 */
142 tsk->latency_record_count++;
143 if (tsk->latency_record_count >= LT_SAVECOUNT)
144 goto out_unlock;
145
146 for (i = 0; i < LT_SAVECOUNT ; i++) {
147 struct latency_record *mylat;
148 int same = 1;
19fb518c 149
9745512c
AV
150 mylat = &tsk->latency_record[i];
151 for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
19fb518c
DA
152 unsigned long record = lat.backtrace[q];
153
154 if (mylat->backtrace[q] != record) {
9745512c 155 same = 0;
9745512c 156 break;
19fb518c
DA
157 }
158
159 /* 0 and ULONG_MAX entries mean end of backtrace: */
160 if (record == 0 || record == ULONG_MAX)
9745512c
AV
161 break;
162 }
163 if (same) {
164 mylat->count++;
165 mylat->time += lat.time;
166 if (lat.time > mylat->max)
167 mylat->max = lat.time;
168 goto out_unlock;
169 }
170 }
171
172 /* Allocated a new one: */
173 i = tsk->latency_record_count;
174 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
175
176out_unlock:
177 spin_unlock_irqrestore(&latency_lock, flags);
178}
179
180static int lstats_show(struct seq_file *m, void *v)
181{
182 int i;
183
184 seq_puts(m, "Latency Top version : v0.1\n");
185
186 for (i = 0; i < MAXLR; i++) {
187 if (latency_record[i].backtrace[0]) {
188 int q;
189 seq_printf(m, "%i %li %li ",
190 latency_record[i].count,
191 latency_record[i].time,
192 latency_record[i].max);
193 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
9c246247 194 char sym[KSYM_SYMBOL_LEN];
9745512c
AV
195 char *c;
196 if (!latency_record[i].backtrace[q])
197 break;
198 if (latency_record[i].backtrace[q] == ULONG_MAX)
199 break;
200 sprint_symbol(sym, latency_record[i].backtrace[q]);
201 c = strchr(sym, '+');
202 if (c)
203 *c = 0;
204 seq_printf(m, "%s ", sym);
205 }
206 seq_printf(m, "\n");
207 }
208 }
209 return 0;
210}
211
212static ssize_t
213lstats_write(struct file *file, const char __user *buf, size_t count,
214 loff_t *offs)
215{
216 clear_global_latency_tracing();
217
218 return count;
219}
220
221static int lstats_open(struct inode *inode, struct file *filp)
222{
223 return single_open(filp, lstats_show, NULL);
224}
225
226static struct file_operations lstats_fops = {
227 .open = lstats_open,
228 .read = seq_read,
229 .write = lstats_write,
230 .llseek = seq_lseek,
231 .release = single_release,
232};
233
234static int __init init_lstats_procfs(void)
235{
c33fff0a 236 proc_create("latency_stats", 0644, NULL, &lstats_fops);
9745512c
AV
237 return 0;
238}
239__initcall(init_lstats_procfs);