Commit | Line | Data |
---|---|---|
425e0968 IM |
1 | |
2 | #ifdef CONFIG_SCHEDSTATS | |
3 | /* | |
4 | * bump this up when changing the output format or the meaning of an existing | |
5 | * format, so that tools can adapt (or abort) | |
6 | */ | |
7 | #define SCHEDSTAT_VERSION 14 | |
8 | ||
9 | static int show_schedstat(struct seq_file *seq, void *v) | |
10 | { | |
11 | int cpu; | |
12 | ||
13 | seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); | |
14 | seq_printf(seq, "timestamp %lu\n", jiffies); | |
15 | for_each_online_cpu(cpu) { | |
16 | struct rq *rq = cpu_rq(cpu); | |
17 | #ifdef CONFIG_SMP | |
18 | struct sched_domain *sd; | |
2d72376b | 19 | int dcount = 0; |
425e0968 IM |
20 | #endif |
21 | ||
22 | /* runqueue-specific stats */ | |
23 | seq_printf(seq, | |
480b9434 | 24 | "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu", |
425e0968 | 25 | cpu, rq->yld_both_empty, |
2d72376b IM |
26 | rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count, |
27 | rq->sched_switch, rq->sched_count, rq->sched_goidle, | |
28 | rq->ttwu_count, rq->ttwu_local, | |
425e0968 | 29 | rq->rq_sched_info.cpu_time, |
2d72376b | 30 | rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); |
425e0968 IM |
31 | |
32 | seq_printf(seq, "\n"); | |
33 | ||
34 | #ifdef CONFIG_SMP | |
35 | /* domain-specific stats */ | |
36 | preempt_disable(); | |
37 | for_each_domain(cpu, sd) { | |
38 | enum cpu_idle_type itype; | |
39 | char mask_str[NR_CPUS]; | |
40 | ||
41 | cpumask_scnprintf(mask_str, NR_CPUS, sd->span); | |
2d72376b | 42 | seq_printf(seq, "domain%d %s", dcount++, mask_str); |
425e0968 IM |
43 | for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; |
44 | itype++) { | |
480b9434 | 45 | seq_printf(seq, " %u %u %u %u %u %u %u %u", |
2d72376b | 46 | sd->lb_count[itype], |
425e0968 IM |
47 | sd->lb_balanced[itype], |
48 | sd->lb_failed[itype], | |
49 | sd->lb_imbalance[itype], | |
50 | sd->lb_gained[itype], | |
51 | sd->lb_hot_gained[itype], | |
52 | sd->lb_nobusyq[itype], | |
53 | sd->lb_nobusyg[itype]); | |
54 | } | |
f95e0d1c IM |
55 | seq_printf(seq, |
56 | " %u %u %u %u %u %u %u %u %u %u %u %u\n", | |
2d72376b IM |
57 | sd->alb_count, sd->alb_failed, sd->alb_pushed, |
58 | sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, | |
59 | sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, | |
425e0968 IM |
60 | sd->ttwu_wake_remote, sd->ttwu_move_affine, |
61 | sd->ttwu_move_balance); | |
62 | } | |
63 | preempt_enable(); | |
64 | #endif | |
65 | } | |
66 | return 0; | |
67 | } | |
68 | ||
69 | static int schedstat_open(struct inode *inode, struct file *file) | |
70 | { | |
71 | unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32); | |
72 | char *buf = kmalloc(size, GFP_KERNEL); | |
73 | struct seq_file *m; | |
74 | int res; | |
75 | ||
76 | if (!buf) | |
77 | return -ENOMEM; | |
78 | res = single_open(file, show_schedstat, NULL); | |
79 | if (!res) { | |
80 | m = file->private_data; | |
81 | m->buf = buf; | |
82 | m->size = size; | |
83 | } else | |
84 | kfree(buf); | |
85 | return res; | |
86 | } | |
87 | ||
88 | const struct file_operations proc_schedstat_operations = { | |
89 | .open = schedstat_open, | |
90 | .read = seq_read, | |
91 | .llseek = seq_lseek, | |
92 | .release = single_release, | |
93 | }; | |
94 | ||
95 | /* | |
96 | * Expects runqueue lock to be held for atomicity of update | |
97 | */ | |
98 | static inline void | |
99 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | |
100 | { | |
101 | if (rq) { | |
102 | rq->rq_sched_info.run_delay += delta; | |
2d72376b | 103 | rq->rq_sched_info.pcount++; |
425e0968 IM |
104 | } |
105 | } | |
106 | ||
107 | /* | |
108 | * Expects runqueue lock to be held for atomicity of update | |
109 | */ | |
110 | static inline void | |
111 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | |
112 | { | |
113 | if (rq) | |
114 | rq->rq_sched_info.cpu_time += delta; | |
115 | } | |
116 | # define schedstat_inc(rq, field) do { (rq)->field++; } while (0) | |
117 | # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0) | |
c3c70119 | 118 | # define schedstat_set(var, val) do { var = (val); } while (0) |
425e0968 IM |
119 | #else /* !CONFIG_SCHEDSTATS */ |
120 | static inline void | |
121 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | |
122 | {} | |
123 | static inline void | |
124 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | |
125 | {} | |
126 | # define schedstat_inc(rq, field) do { } while (0) | |
127 | # define schedstat_add(rq, field, amt) do { } while (0) | |
c3c70119 | 128 | # define schedstat_set(var, val) do { } while (0) |
425e0968 IM |
129 | #endif |
130 | ||
9a41785c | 131 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
425e0968 IM |
132 | /* |
133 | * Called when a process is dequeued from the active array and given | |
134 | * the cpu. We should note that with the exception of interactive | |
135 | * tasks, the expired queue will become the active queue after the active | |
136 | * queue is empty, without explicitly dequeuing and requeuing tasks in the | |
137 | * expired queue. (Interactive tasks may be requeued directly to the | |
138 | * active queue, thus delaying tasks in the expired queue from running; | |
139 | * see scheduler_tick()). | |
140 | * | |
141 | * This function is only called from sched_info_arrive(), rather than | |
142 | * dequeue_task(). Even though a task may be queued and dequeued multiple | |
143 | * times as it is shuffled about, we're really interested in knowing how | |
144 | * long it was from the *first* time it was queued to the time that it | |
145 | * finally hit a cpu. | |
146 | */ | |
147 | static inline void sched_info_dequeued(struct task_struct *t) | |
148 | { | |
149 | t->sched_info.last_queued = 0; | |
150 | } | |
151 | ||
152 | /* | |
153 | * Called when a task finally hits the cpu. We can now calculate how | |
154 | * long it was waiting to run. We also note when it began so that we | |
155 | * can keep stats on how long its timeslice is. | |
156 | */ | |
157 | static void sched_info_arrive(struct task_struct *t) | |
158 | { | |
9a41785c | 159 | unsigned long long now = task_rq(t)->clock, delta = 0; |
425e0968 IM |
160 | |
161 | if (t->sched_info.last_queued) | |
162 | delta = now - t->sched_info.last_queued; | |
163 | sched_info_dequeued(t); | |
164 | t->sched_info.run_delay += delta; | |
165 | t->sched_info.last_arrival = now; | |
2d72376b | 166 | t->sched_info.pcount++; |
425e0968 IM |
167 | |
168 | rq_sched_info_arrive(task_rq(t), delta); | |
169 | } | |
170 | ||
171 | /* | |
172 | * Called when a process is queued into either the active or expired | |
173 | * array. The time is noted and later used to determine how long we | |
174 | * had to wait for us to reach the cpu. Since the expired queue will | |
175 | * become the active queue after active queue is empty, without dequeuing | |
176 | * and requeuing any tasks, we are interested in queuing to either. It | |
177 | * is unusual but not impossible for tasks to be dequeued and immediately | |
178 | * requeued in the same or another array: this can happen in sched_yield(), | |
179 | * set_user_nice(), and even load_balance() as it moves tasks from runqueue | |
180 | * to runqueue. | |
181 | * | |
182 | * This function is only called from enqueue_task(), but also only updates | |
183 | * the timestamp if it is already not set. It's assumed that | |
184 | * sched_info_dequeued() will clear that stamp when appropriate. | |
185 | */ | |
186 | static inline void sched_info_queued(struct task_struct *t) | |
187 | { | |
188 | if (unlikely(sched_info_on())) | |
189 | if (!t->sched_info.last_queued) | |
9a41785c | 190 | t->sched_info.last_queued = task_rq(t)->clock; |
425e0968 IM |
191 | } |
192 | ||
193 | /* | |
194 | * Called when a process ceases being the active-running process, either | |
195 | * voluntarily or involuntarily. Now we can calculate how long we ran. | |
196 | */ | |
197 | static inline void sched_info_depart(struct task_struct *t) | |
198 | { | |
9a41785c BS |
199 | unsigned long long delta = task_rq(t)->clock - |
200 | t->sched_info.last_arrival; | |
425e0968 IM |
201 | |
202 | t->sched_info.cpu_time += delta; | |
203 | rq_sched_info_depart(task_rq(t), delta); | |
204 | } | |
205 | ||
206 | /* | |
207 | * Called when tasks are switched involuntarily due, typically, to expiring | |
208 | * their time slice. (This may also be called when switching to or from | |
209 | * the idle task.) We are only called when prev != next. | |
210 | */ | |
211 | static inline void | |
212 | __sched_info_switch(struct task_struct *prev, struct task_struct *next) | |
213 | { | |
214 | struct rq *rq = task_rq(prev); | |
215 | ||
216 | /* | |
217 | * prev now departs the cpu. It's not interesting to record | |
218 | * stats about how efficient we were at scheduling the idle | |
219 | * process, however. | |
220 | */ | |
221 | if (prev != rq->idle) | |
222 | sched_info_depart(prev); | |
223 | ||
224 | if (next != rq->idle) | |
225 | sched_info_arrive(next); | |
226 | } | |
227 | static inline void | |
228 | sched_info_switch(struct task_struct *prev, struct task_struct *next) | |
229 | { | |
230 | if (unlikely(sched_info_on())) | |
231 | __sched_info_switch(prev, next); | |
232 | } | |
233 | #else | |
234 | #define sched_info_queued(t) do { } while (0) | |
235 | #define sched_info_switch(t, next) do { } while (0) | |
9a41785c | 236 | #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ |
425e0968 | 237 |