Merge tag 'for-6.1-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux-block.git] / kernel / sched / debug.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
43ae34cb 2/*
391e43da 3 * kernel/sched/debug.c
43ae34cb 4 *
325ea10c 5 * Print the CFS rbtree and other debugging details
43ae34cb
IM
6 *
7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
43ae34cb 8 */
029632fb 9
43ae34cb
IM
10/*
11 * This allows printing both to /proc/sched_debug and
12 * to the console
13 */
14#define SEQ_printf(m, x...) \
15 do { \
16 if (m) \
17 seq_printf(m, x); \
18 else \
a8c024cd 19 pr_cont(x); \
43ae34cb
IM
20 } while (0)
21
ef83a571
IM
22/*
23 * Ease the printing of nsec fields:
24 */
90b2628f 25static long long nsec_high(unsigned long long nsec)
ef83a571 26{
90b2628f 27 if ((long long)nsec < 0) {
ef83a571
IM
28 nsec = -nsec;
29 do_div(nsec, 1000000);
30 return -nsec;
31 }
32 do_div(nsec, 1000000);
33
34 return nsec;
35}
36
90b2628f 37static unsigned long nsec_low(unsigned long long nsec)
ef83a571 38{
90b2628f 39 if ((long long)nsec < 0)
ef83a571
IM
40 nsec = -nsec;
41
42 return do_div(nsec, 1000000);
43}
44
45#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
46
d6ca41d7
SRRH
47#define SCHED_FEAT(name, enabled) \
48 #name ,
49
50static const char * const sched_feat_names[] = {
51#include "features.h"
52};
53
54#undef SCHED_FEAT
55
56static int sched_feat_show(struct seq_file *m, void *v)
57{
58 int i;
59
60 for (i = 0; i < __SCHED_FEAT_NR; i++) {
61 if (!(sysctl_sched_features & (1UL << i)))
62 seq_puts(m, "NO_");
63 seq_printf(m, "%s ", sched_feat_names[i]);
64 }
65 seq_puts(m, "\n");
66
67 return 0;
68}
69
e9666d10 70#ifdef CONFIG_JUMP_LABEL
d6ca41d7
SRRH
71
72#define jump_label_key__true STATIC_KEY_INIT_TRUE
73#define jump_label_key__false STATIC_KEY_INIT_FALSE
74
75#define SCHED_FEAT(name, enabled) \
76 jump_label_key__##enabled ,
77
78struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
79#include "features.h"
80};
81
82#undef SCHED_FEAT
83
84static void sched_feat_disable(int i)
85{
e73e8197 86 static_key_disable_cpuslocked(&sched_feat_keys[i]);
d6ca41d7
SRRH
87}
88
89static void sched_feat_enable(int i)
90{
e73e8197 91 static_key_enable_cpuslocked(&sched_feat_keys[i]);
d6ca41d7
SRRH
92}
93#else
94static void sched_feat_disable(int i) { };
95static void sched_feat_enable(int i) { };
e9666d10 96#endif /* CONFIG_JUMP_LABEL */
d6ca41d7
SRRH
97
98static int sched_feat_set(char *cmp)
99{
100 int i;
101 int neg = 0;
102
103 if (strncmp(cmp, "NO_", 3) == 0) {
104 neg = 1;
105 cmp += 3;
106 }
107
8f894bf4
YX
108 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
109 if (i < 0)
110 return i;
111
112 if (neg) {
113 sysctl_sched_features &= ~(1UL << i);
114 sched_feat_disable(i);
115 } else {
116 sysctl_sched_features |= (1UL << i);
117 sched_feat_enable(i);
d6ca41d7
SRRH
118 }
119
8f894bf4 120 return 0;
d6ca41d7
SRRH
121}
122
123static ssize_t
124sched_feat_write(struct file *filp, const char __user *ubuf,
125 size_t cnt, loff_t *ppos)
126{
127 char buf[64];
128 char *cmp;
8f894bf4 129 int ret;
d6ca41d7
SRRH
130 struct inode *inode;
131
132 if (cnt > 63)
133 cnt = 63;
134
135 if (copy_from_user(&buf, ubuf, cnt))
136 return -EFAULT;
137
138 buf[cnt] = 0;
139 cmp = strstrip(buf);
140
141 /* Ensure the static_key remains in a consistent state */
142 inode = file_inode(filp);
e73e8197 143 cpus_read_lock();
d6ca41d7 144 inode_lock(inode);
8f894bf4 145 ret = sched_feat_set(cmp);
d6ca41d7 146 inode_unlock(inode);
e73e8197 147 cpus_read_unlock();
8f894bf4
YX
148 if (ret < 0)
149 return ret;
d6ca41d7
SRRH
150
151 *ppos += cnt;
152
153 return cnt;
154}
155
156static int sched_feat_open(struct inode *inode, struct file *filp)
157{
158 return single_open(filp, sched_feat_show, NULL);
159}
160
161static const struct file_operations sched_feat_fops = {
162 .open = sched_feat_open,
163 .write = sched_feat_write,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167};
168
8a99b683
PZ
169#ifdef CONFIG_SMP
170
171static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
172 size_t cnt, loff_t *ppos)
173{
174 char buf[16];
70306618 175 unsigned int scaling;
8a99b683
PZ
176
177 if (cnt > 15)
178 cnt = 15;
179
180 if (copy_from_user(&buf, ubuf, cnt))
181 return -EFAULT;
70306618 182 buf[cnt] = '\0';
8a99b683 183
70306618 184 if (kstrtouint(buf, 10, &scaling))
8a99b683
PZ
185 return -EINVAL;
186
70306618
MG
187 if (scaling >= SCHED_TUNABLESCALING_END)
188 return -EINVAL;
189
190 sysctl_sched_tunable_scaling = scaling;
8a99b683
PZ
191 if (sched_update_scaling())
192 return -EINVAL;
193
194 *ppos += cnt;
195 return cnt;
196}
197
198static int sched_scaling_show(struct seq_file *m, void *v)
199{
200 seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
201 return 0;
202}
203
204static int sched_scaling_open(struct inode *inode, struct file *filp)
205{
206 return single_open(filp, sched_scaling_show, NULL);
207}
208
209static const struct file_operations sched_scaling_fops = {
210 .open = sched_scaling_open,
211 .write = sched_scaling_write,
212 .read = seq_read,
213 .llseek = seq_lseek,
214 .release = single_release,
215};
216
217#endif /* SMP */
218
1011dcce
PZ
219#ifdef CONFIG_PREEMPT_DYNAMIC
220
221static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
222 size_t cnt, loff_t *ppos)
223{
224 char buf[16];
225 int mode;
226
227 if (cnt > 15)
228 cnt = 15;
229
230 if (copy_from_user(&buf, ubuf, cnt))
231 return -EFAULT;
232
233 buf[cnt] = 0;
234 mode = sched_dynamic_mode(strstrip(buf));
235 if (mode < 0)
236 return mode;
237
238 sched_dynamic_update(mode);
239
240 *ppos += cnt;
241
242 return cnt;
243}
244
245static int sched_dynamic_show(struct seq_file *m, void *v)
246{
247 static const char * preempt_modes[] = {
248 "none", "voluntary", "full"
249 };
250 int i;
251
252 for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
253 if (preempt_dynamic_mode == i)
254 seq_puts(m, "(");
255 seq_puts(m, preempt_modes[i]);
256 if (preempt_dynamic_mode == i)
257 seq_puts(m, ")");
258
259 seq_puts(m, " ");
260 }
261
262 seq_puts(m, "\n");
263 return 0;
264}
265
266static int sched_dynamic_open(struct inode *inode, struct file *filp)
267{
268 return single_open(filp, sched_dynamic_show, NULL);
269}
270
271static const struct file_operations sched_dynamic_fops = {
272 .open = sched_dynamic_open,
273 .write = sched_dynamic_write,
274 .read = seq_read,
275 .llseek = seq_lseek,
276 .release = single_release,
277};
278
279#endif /* CONFIG_PREEMPT_DYNAMIC */
280
9406415f 281__read_mostly bool sched_debug_verbose;
9469eb01 282
d27e9ae2
PZ
283static const struct seq_operations sched_debug_sops;
284
285static int sched_debug_open(struct inode *inode, struct file *filp)
286{
287 return seq_open(filp, &sched_debug_sops);
288}
289
290static const struct file_operations sched_debug_fops = {
291 .open = sched_debug_open,
292 .read = seq_read,
293 .llseek = seq_lseek,
294 .release = seq_release,
295};
296
1011dcce 297static struct dentry *debugfs_sched;
8a99b683 298
d6ca41d7
SRRH
299static __init int sched_init_debug(void)
300{
8a99b683 301 struct dentry __maybe_unused *numa;
d6ca41d7 302
8a99b683
PZ
303 debugfs_sched = debugfs_create_dir("sched", NULL);
304
305 debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
9406415f 306 debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
1011dcce
PZ
307#ifdef CONFIG_PREEMPT_DYNAMIC
308 debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
309#endif
8a99b683
PZ
310
311 debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
312 debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
51ce83ed 313 debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
8a99b683
PZ
314 debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
315
c006fac5
PT
316 debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
317 debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
318
8a99b683
PZ
319#ifdef CONFIG_SMP
320 debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
321 debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
322 debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
3b87f136
PZ
323
324 mutex_lock(&sched_domains_mutex);
325 update_sched_domain_debugfs();
326 mutex_unlock(&sched_domains_mutex);
8a99b683
PZ
327#endif
328
329#ifdef CONFIG_NUMA_BALANCING
330 numa = debugfs_create_dir("numa_balancing", debugfs_sched);
331
332 debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
333 debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
334 debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
335 debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
33024536 336 debugfs_create_u32("hot_threshold_ms", 0644, numa, &sysctl_numa_balancing_hot_threshold);
8a99b683 337#endif
9469eb01 338
d27e9ae2
PZ
339 debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
340
d6ca41d7
SRRH
341 return 0;
342}
343late_initcall(sched_init_debug);
344
3866e845
SRRH
345#ifdef CONFIG_SMP
346
3b87f136
PZ
347static cpumask_var_t sd_sysctl_cpus;
348static struct dentry *sd_dentry;
3866e845 349
3b87f136 350static int sd_flags_show(struct seq_file *m, void *v)
5b9f8ff7 351{
3b87f136 352 unsigned long flags = *(unsigned int *)m->private;
5b9f8ff7
VS
353 int idx;
354
5b9f8ff7 355 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
3b87f136
PZ
356 seq_puts(m, sd_flag_debug[idx].name);
357 seq_puts(m, " ");
5b9f8ff7 358 }
3b87f136 359 seq_puts(m, "\n");
5b9f8ff7
VS
360
361 return 0;
362}
363
3b87f136 364static int sd_flags_open(struct inode *inode, struct file *file)
3866e845 365{
3b87f136 366 return single_open(file, sd_flags_show, inode->i_private);
3866e845
SRRH
367}
368
3b87f136
PZ
369static const struct file_operations sd_flags_fops = {
370 .open = sd_flags_open,
371 .read = seq_read,
372 .llseek = seq_lseek,
373 .release = single_release,
374};
bbdacdfe 375
3b87f136 376static void register_sd(struct sched_domain *sd, struct dentry *parent)
3866e845 377{
3b87f136
PZ
378#define SDM(type, mode, member) \
379 debugfs_create_##type(#member, mode, parent, &sd->member)
3866e845 380
3b87f136
PZ
381 SDM(ulong, 0644, min_interval);
382 SDM(ulong, 0644, max_interval);
383 SDM(u64, 0644, max_newidle_lb_cost);
384 SDM(u32, 0644, busy_factor);
385 SDM(u32, 0644, imbalance_pct);
386 SDM(u32, 0644, cache_nice_tries);
387 SDM(str, 0444, name);
3866e845 388
3b87f136 389#undef SDM
bbdacdfe 390
3b87f136
PZ
391 debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
392}
bbdacdfe 393
3b87f136
PZ
394void update_sched_domain_debugfs(void)
395{
396 int cpu, i;
bbdacdfe 397
459b09b5
VS
398 /*
399 * This can unfortunately be invoked before sched_debug_init() creates
400 * the debug directory. Don't touch sd_sysctl_cpus until then.
401 */
402 if (!debugfs_sched)
403 return;
404
bbdacdfe
PZ
405 if (!cpumask_available(sd_sysctl_cpus)) {
406 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
407 return;
bbdacdfe
PZ
408 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
409 }
410
3b87f136
PZ
411 if (!sd_dentry)
412 sd_dentry = debugfs_create_dir("domains", debugfs_sched);
413
414 for_each_cpu(cpu, sd_sysctl_cpus) {
415 struct sched_domain *sd;
416 struct dentry *d_cpu;
417 char buf[32];
418
419 snprintf(buf, sizeof(buf), "cpu%d", cpu);
c2e40659 420 debugfs_lookup_and_remove(buf, sd_dentry);
3b87f136
PZ
421 d_cpu = debugfs_create_dir(buf, sd_dentry);
422
423 i = 0;
424 for_each_domain(cpu, sd) {
425 struct dentry *d_sd;
bbdacdfe 426
3b87f136
PZ
427 snprintf(buf, sizeof(buf), "domain%d", i);
428 d_sd = debugfs_create_dir(buf, d_cpu);
bbdacdfe 429
3b87f136
PZ
430 register_sd(sd, d_sd);
431 i++;
bbdacdfe 432 }
bbdacdfe 433
3b87f136 434 __cpumask_clear_cpu(cpu, sd_sysctl_cpus);
3866e845 435 }
3866e845
SRRH
436}
437
bbdacdfe
PZ
438void dirty_sched_domain_sysctl(int cpu)
439{
440 if (cpumask_available(sd_sysctl_cpus))
441 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
442}
443
3866e845
SRRH
444#endif /* CONFIG_SMP */
445
ff9b48c3 446#ifdef CONFIG_FAIR_GROUP_SCHED
5091faa4 447static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
ff9b48c3
BR
448{
449 struct sched_entity *se = tg->se[cpu];
ff9b48c3 450
97fb7a0a 451#define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
ceeadb83
YS
452#define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", \
453 #F, (long long)schedstat_val(stats->F))
97fb7a0a 454#define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
ceeadb83
YS
455#define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", \
456 #F, SPLIT_NS((long long)schedstat_val(stats->F)))
ff9b48c3 457
cd126afe 458 if (!se)
18bf2805 459 return;
18bf2805 460
ff9b48c3
BR
461 PN(se->exec_start);
462 PN(se->vruntime);
463 PN(se->sum_exec_runtime);
97fb7a0a 464
cb251765 465 if (schedstat_enabled()) {
769fdf83
PZ
466 struct sched_statistics *stats;
467 stats = __schedstats_from_se(se);
ceeadb83
YS
468
469 PN_SCHEDSTAT(wait_start);
470 PN_SCHEDSTAT(sleep_start);
471 PN_SCHEDSTAT(block_start);
472 PN_SCHEDSTAT(sleep_max);
473 PN_SCHEDSTAT(block_max);
474 PN_SCHEDSTAT(exec_max);
475 PN_SCHEDSTAT(slice_max);
476 PN_SCHEDSTAT(wait_max);
477 PN_SCHEDSTAT(wait_sum);
478 P_SCHEDSTAT(wait_count);
cb251765 479 }
97fb7a0a 480
ff9b48c3 481 P(se->load.weight);
9d85f21c 482#ifdef CONFIG_SMP
9d89c257
YD
483 P(se->avg.load_avg);
484 P(se->avg.util_avg);
9f683953 485 P(se->avg.runnable_avg);
9d85f21c 486#endif
4fa8d299
JP
487
488#undef PN_SCHEDSTAT
ff9b48c3 489#undef PN
4fa8d299 490#undef P_SCHEDSTAT
ff9b48c3
BR
491#undef P
492}
493#endif
494
efe25c2c 495#ifdef CONFIG_CGROUP_SCHED
ad789f84 496static DEFINE_SPINLOCK(sched_debug_lock);
efe25c2c
BR
497static char group_path[PATH_MAX];
498
ad789f84 499static void task_group_path(struct task_group *tg, char *path, int plen)
efe25c2c 500{
ad789f84
WL
501 if (autogroup_path(tg, path, plen))
502 return;
8ecedd7a 503
ad789f84
WL
504 cgroup_path(tg->css.cgroup, path, plen);
505}
97fb7a0a 506
ad789f84
WL
507/*
508 * Only 1 SEQ_printf_task_group_path() caller can use the full length
509 * group_path[] for cgroup path. Other simultaneous callers will have
510 * to use a shorter stack buffer. A "..." suffix is appended at the end
511 * of the stack buffer so that it will show up in case the output length
512 * matches the given buffer size to indicate possible path name truncation.
513 */
514#define SEQ_printf_task_group_path(m, tg, fmt...) \
515{ \
516 if (spin_trylock(&sched_debug_lock)) { \
517 task_group_path(tg, group_path, sizeof(group_path)); \
518 SEQ_printf(m, fmt, group_path); \
519 spin_unlock(&sched_debug_lock); \
520 } else { \
521 char buf[128]; \
522 char *bufend = buf + sizeof(buf) - 3; \
523 task_group_path(tg, buf, bufend - buf); \
524 strcpy(bufend - 1, "..."); \
525 SEQ_printf(m, fmt, buf); \
526 } \
efe25c2c
BR
527}
528#endif
529
43ae34cb 530static void
a48da48b 531print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
43ae34cb 532{
65bcf072 533 if (task_current(rq, p))
e8c16495 534 SEQ_printf(m, ">R");
20435d84
XX
535 else
536 SEQ_printf(m, " %c", task_state_to_char(p));
43ae34cb 537
f080d93e 538 SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
fc840914 539 p->comm, task_pid_nr(p),
ef83a571 540 SPLIT_NS(p->se.vruntime),
43ae34cb 541 (long long)(p->nvcsw + p->nivcsw),
6f605d83 542 p->prio);
9c572591 543
847fc0cd 544 SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld %9lld.%06ld",
ceeadb83 545 SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)),
33d6176e 546 SPLIT_NS(p->se.sum_exec_runtime),
847fc0cd
YS
547 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
548 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
9c572591 549
b32e86b4 550#ifdef CONFIG_NUMA_BALANCING
e3d24d0a 551 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
b32e86b4 552#endif
efe25c2c 553#ifdef CONFIG_CGROUP_SCHED
ad789f84 554 SEQ_printf_task_group_path(m, task_group(p), " %s")
efe25c2c 555#endif
d19ca308 556
d19ca308 557 SEQ_printf(m, "\n");
43ae34cb
IM
558}
559
a48da48b 560static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
43ae34cb
IM
561{
562 struct task_struct *g, *p;
563
e9ca2670
JL
564 SEQ_printf(m, "\n");
565 SEQ_printf(m, "runnable tasks:\n");
f080d93e 566 SEQ_printf(m, " S task PID tree-key switches prio"
e9ca2670
JL
567 " wait-time sum-exec sum-sleep\n");
568 SEQ_printf(m, "-------------------------------------------------------"
f080d93e 569 "------------------------------------------------------\n");
43ae34cb 570
5bd96ab6 571 rcu_read_lock();
d38e83c7 572 for_each_process_thread(g, p) {
b32e86b4 573 if (task_cpu(p) != rq_cpu)
43ae34cb
IM
574 continue;
575
a48da48b 576 print_task(m, rq, p);
d38e83c7 577 }
5bd96ab6 578 rcu_read_unlock();
43ae34cb
IM
579}
580
5cef9eca 581void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
43ae34cb 582{
86d9560c
IM
583 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
584 spread, rq0_min_vruntime, spread0;
348ec61e 585 struct rq *rq = cpu_rq(cpu);
67e12eac
IM
586 struct sched_entity *last;
587 unsigned long flags;
588
efe25c2c 589#ifdef CONFIG_FAIR_GROUP_SCHED
e9ca2670 590 SEQ_printf(m, "\n");
ad789f84 591 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
efe25c2c 592#else
e9ca2670
JL
593 SEQ_printf(m, "\n");
594 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
efe25c2c 595#endif
ef83a571
IM
596 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
597 SPLIT_NS(cfs_rq->exec_clock));
67e12eac 598
5cb9eaa3 599 raw_spin_rq_lock_irqsave(rq, flags);
bfb06889 600 if (rb_first_cached(&cfs_rq->tasks_timeline))
ac53db59 601 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
67e12eac
IM
602 last = __pick_last_entity(cfs_rq);
603 if (last)
604 max_vruntime = last->vruntime;
5ac5c4d6 605 min_vruntime = cfs_rq->min_vruntime;
348ec61e 606 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
5cb9eaa3 607 raw_spin_rq_unlock_irqrestore(rq, flags);
ef83a571
IM
608 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
609 SPLIT_NS(MIN_vruntime));
610 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
611 SPLIT_NS(min_vruntime));
612 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
613 SPLIT_NS(max_vruntime));
67e12eac 614 spread = max_vruntime - MIN_vruntime;
ef83a571
IM
615 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
616 SPLIT_NS(spread));
86d9560c 617 spread0 = min_vruntime - rq0_min_vruntime;
ef83a571
IM
618 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
619 SPLIT_NS(spread0));
5ac5c4d6 620 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
ddc97297 621 cfs_rq->nr_spread_over);
c82513e5 622 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
30400039 623 SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
a480adde
JD
624 SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
625 cfs_rq->idle_nr_running);
30400039
JD
626 SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running",
627 cfs_rq->idle_h_nr_running);
2069dd75 628 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
c09595f6 629#ifdef CONFIG_SMP
9d89c257
YD
630 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
631 cfs_rq->avg.load_avg);
9f683953
VG
632 SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
633 cfs_rq->avg.runnable_avg);
9d89c257
YD
634 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
635 cfs_rq->avg.util_avg);
7f65ea42
PB
636 SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued",
637 cfs_rq->avg.util_est.enqueued);
2a2f5d4e
PZ
638 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
639 cfs_rq->removed.load_avg);
640 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
641 cfs_rq->removed.util_avg);
9f683953
VG
642 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg",
643 cfs_rq->removed.runnable_avg);
333bb864 644#ifdef CONFIG_FAIR_GROUP_SCHED
9d89c257
YD
645 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
646 cfs_rq->tg_load_avg_contrib);
333bb864
AS
647 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
648 atomic_long_read(&cfs_rq->tg->load_avg));
c09595f6 649#endif
333bb864 650#endif
f9f9ffc2 651#ifdef CONFIG_CFS_BANDWIDTH
f9f9ffc2
BS
652 SEQ_printf(m, " .%-30s: %d\n", "throttled",
653 cfs_rq->throttled);
654 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
655 cfs_rq->throttle_count);
656#endif
2069dd75 657
333bb864 658#ifdef CONFIG_FAIR_GROUP_SCHED
ff9b48c3 659 print_cfs_group_stats(m, cpu, cfs_rq->tg);
c09595f6 660#endif
43ae34cb
IM
661}
662
ada18de2
PZ
663void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
664{
efe25c2c 665#ifdef CONFIG_RT_GROUP_SCHED
e9ca2670 666 SEQ_printf(m, "\n");
ad789f84 667 SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
efe25c2c 668#else
e9ca2670
JL
669 SEQ_printf(m, "\n");
670 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
efe25c2c 671#endif
ada18de2
PZ
672
673#define P(x) \
674 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
48365b38
DBO
675#define PU(x) \
676 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
ada18de2
PZ
677#define PN(x) \
678 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
679
48365b38
DBO
680 PU(rt_nr_running);
681#ifdef CONFIG_SMP
682 PU(rt_nr_migratory);
683#endif
ada18de2
PZ
684 P(rt_throttled);
685 PN(rt_time);
686 PN(rt_runtime);
687
688#undef PN
48365b38 689#undef PU
ada18de2
PZ
690#undef P
691}
692
acb32132
WL
693void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
694{
ef477183
SRRH
695 struct dl_bw *dl_bw;
696
e9ca2670
JL
697 SEQ_printf(m, "\n");
698 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
48365b38
DBO
699
700#define PU(x) \
701 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
702
703 PU(dl_nr_running);
ef477183 704#ifdef CONFIG_SMP
48365b38 705 PU(dl_nr_migratory);
ef477183
SRRH
706 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
707#else
708 dl_bw = &dl_rq->dl_bw;
709#endif
710 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
711 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
48365b38
DBO
712
713#undef PU
acb32132
WL
714}
715
a48da48b 716static void print_cpu(struct seq_file *m, int cpu)
43ae34cb 717{
348ec61e 718 struct rq *rq = cpu_rq(cpu);
43ae34cb
IM
719
720#ifdef CONFIG_X86
721 {
722 unsigned int freq = cpu_khz ? : 1;
723
bbbfeac9 724 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
43ae34cb
IM
725 cpu, freq / 1000, (freq % 1000));
726 }
727#else
bbbfeac9 728 SEQ_printf(m, "cpu#%d\n", cpu);
43ae34cb
IM
729#endif
730
13e099d2
PZ
731#define P(x) \
732do { \
733 if (sizeof(rq->x) == 4) \
734 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
735 else \
736 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
737} while (0)
738
ef83a571
IM
739#define PN(x) \
740 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
43ae34cb
IM
741
742 P(nr_running);
43ae34cb 743 P(nr_switches);
43ae34cb 744 P(nr_uninterruptible);
ef83a571 745 PN(next_balance);
fc840914 746 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
ef83a571 747 PN(clock);
5a537597 748 PN(clock_task);
43ae34cb 749#undef P
ef83a571 750#undef PN
43ae34cb 751
1b9508f6 752#ifdef CONFIG_SMP
db6ea2fb 753#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
1b9508f6 754 P64(avg_idle);
37e6bae8 755 P64(max_idle_balance_cost);
db6ea2fb 756#undef P64
1b9508f6 757#endif
5ac5c4d6 758
4fa8d299 759#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
cb251765
MG
760 if (schedstat_enabled()) {
761 P(yld_count);
762 P(sched_count);
763 P(sched_goidle);
764 P(ttwu_count);
765 P(ttwu_local);
766 }
5ac5c4d6 767#undef P
4fa8d299 768
5cef9eca 769 print_cfs_stats(m, cpu);
ada18de2 770 print_rt_stats(m, cpu);
acb32132 771 print_dl_stats(m, cpu);
43ae34cb 772
a48da48b 773 print_rq(m, rq, cpu);
bbbfeac9 774 SEQ_printf(m, "\n");
43ae34cb
IM
775}
776
1983a922
CE
777static const char *sched_tunable_scaling_names[] = {
778 "none",
ad2e379d 779 "logarithmic",
1983a922
CE
780 "linear"
781};
782
bbbfeac9 783static void sched_debug_header(struct seq_file *m)
43ae34cb 784{
5bb6b1ea
PZ
785 u64 ktime, sched_clk, cpu_clk;
786 unsigned long flags;
43ae34cb 787
5bb6b1ea
PZ
788 local_irq_save(flags);
789 ktime = ktime_to_ns(ktime_get());
790 sched_clk = sched_clock();
791 cpu_clk = local_clock();
792 local_irq_restore(flags);
793
b32e86b4 794 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
43ae34cb
IM
795 init_utsname()->release,
796 (int)strcspn(init_utsname()->version, " "),
797 init_utsname()->version);
798
5bb6b1ea
PZ
799#define P(x) \
800 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
801#define PN(x) \
802 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
803 PN(ktime);
804 PN(sched_clk);
805 PN(cpu_clk);
806 P(jiffies);
807#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
35af99e6 808 P(sched_clock_stable());
5bb6b1ea
PZ
809#endif
810#undef PN
811#undef P
812
813 SEQ_printf(m, "\n");
814 SEQ_printf(m, "sysctl_sched\n");
43ae34cb 815
1aa4731e 816#define P(x) \
d822cece 817 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
1aa4731e 818#define PN(x) \
d822cece 819 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
1aa4731e 820 PN(sysctl_sched_latency);
b2be5e96 821 PN(sysctl_sched_min_granularity);
51ce83ed 822 PN(sysctl_sched_idle_min_granularity);
1aa4731e 823 PN(sysctl_sched_wakeup_granularity);
eebef746 824 P(sysctl_sched_child_runs_first);
1aa4731e
IM
825 P(sysctl_sched_features);
826#undef PN
827#undef P
828
bbbfeac9
NZ
829 SEQ_printf(m, " .%-40s: %d (%s)\n",
830 "sysctl_sched_tunable_scaling",
1983a922
CE
831 sysctl_sched_tunable_scaling,
832 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
bbbfeac9
NZ
833 SEQ_printf(m, "\n");
834}
1983a922 835
bbbfeac9
NZ
836static int sched_debug_show(struct seq_file *m, void *v)
837{
838 int cpu = (unsigned long)(v - 2);
43ae34cb 839
bbbfeac9
NZ
840 if (cpu != -1)
841 print_cpu(m, cpu);
842 else
843 sched_debug_header(m);
43ae34cb
IM
844
845 return 0;
846}
847
029632fb 848void sysrq_sched_debug_show(void)
43ae34cb 849{
bbbfeac9
NZ
850 int cpu;
851
852 sched_debug_header(NULL);
02d4ac58
WL
853 for_each_online_cpu(cpu) {
854 /*
855 * Need to reset softlockup watchdogs on all CPUs, because
856 * another CPU might be blocked waiting for us to process
857 * an IPI or stop_machine.
858 */
859 touch_nmi_watchdog();
860 touch_all_softlockup_watchdogs();
bbbfeac9 861 print_cpu(NULL, cpu);
02d4ac58 862 }
bbbfeac9
NZ
863}
864
865/*
3b03706f 866 * This iterator needs some explanation.
bbbfeac9 867 * It returns 1 for the header position.
97fb7a0a
IM
868 * This means 2 is CPU 0.
869 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
870 * to use cpumask_* to iterate over the CPUs.
bbbfeac9
NZ
871 */
872static void *sched_debug_start(struct seq_file *file, loff_t *offset)
873{
874 unsigned long n = *offset;
875
876 if (n == 0)
877 return (void *) 1;
878
879 n--;
880
881 if (n > 0)
882 n = cpumask_next(n - 1, cpu_online_mask);
883 else
884 n = cpumask_first(cpu_online_mask);
885
886 *offset = n + 1;
887
888 if (n < nr_cpu_ids)
889 return (void *)(unsigned long)(n + 2);
97fb7a0a 890
bbbfeac9
NZ
891 return NULL;
892}
893
894static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
895{
896 (*offset)++;
897 return sched_debug_start(file, offset);
898}
899
900static void sched_debug_stop(struct seq_file *file, void *data)
901{
902}
903
904static const struct seq_operations sched_debug_sops = {
97fb7a0a
IM
905 .start = sched_debug_start,
906 .next = sched_debug_next,
907 .stop = sched_debug_stop,
908 .show = sched_debug_show,
bbbfeac9
NZ
909};
910
9e3bf946
VS
911#define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
912#define __P(F) __PS(#F, F)
913#define P(F) __PS(#F, p->F)
68d7a190 914#define PM(F, M) __PS(#F, p->F & (M))
9e3bf946
VS
915#define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
916#define __PN(F) __PSN(#F, F)
917#define PN(F) __PSN(#F, p->F)
b32e86b4
IM
918
919
397f2378
SD
920#ifdef CONFIG_NUMA_BALANCING
921void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
922 unsigned long tpf, unsigned long gsf, unsigned long gpf)
923{
924 SEQ_printf(m, "numa_faults node=%d ", node);
67d9f6c2
SD
925 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
926 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
397f2378
SD
927}
928#endif
929
930
b32e86b4
IM
931static void sched_show_numa(struct task_struct *p, struct seq_file *m)
932{
933#ifdef CONFIG_NUMA_BALANCING
b32e86b4
IM
934 if (p->mm)
935 P(mm->numa_scan_seq);
936
397f2378
SD
937 P(numa_pages_migrated);
938 P(numa_preferred_nid);
939 P(total_numa_faults);
940 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
941 task_node(p), task_numa_group_id(p));
942 show_numa_stats(p, m);
b32e86b4
IM
943#endif
944}
945
74dc3384
AS
946void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
947 struct seq_file *m)
43ae34cb 948{
cc367732 949 unsigned long nr_switches;
43ae34cb 950
74dc3384 951 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
5089a976 952 get_nr_threads(p));
2d92f227 953 SEQ_printf(m,
add332a1
KB
954 "---------------------------------------------------------"
955 "----------\n");
9e3bf946 956
ceeadb83
YS
957#define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->stats.F))
958#define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->stats.F))
43ae34cb 959
ef83a571
IM
960 PN(se.exec_start);
961 PN(se.vruntime);
962 PN(se.sum_exec_runtime);
6cfb0d5d 963
cc367732
IM
964 nr_switches = p->nvcsw + p->nivcsw;
965
cc367732 966 P(se.nr_migrations);
cc367732 967
cb251765 968 if (schedstat_enabled()) {
cc367732
IM
969 u64 avg_atom, avg_per_cpu;
970
ceeadb83 971 PN_SCHEDSTAT(sum_sleep_runtime);
847fc0cd 972 PN_SCHEDSTAT(sum_block_runtime);
ceeadb83
YS
973 PN_SCHEDSTAT(wait_start);
974 PN_SCHEDSTAT(sleep_start);
975 PN_SCHEDSTAT(block_start);
976 PN_SCHEDSTAT(sleep_max);
977 PN_SCHEDSTAT(block_max);
978 PN_SCHEDSTAT(exec_max);
979 PN_SCHEDSTAT(slice_max);
980 PN_SCHEDSTAT(wait_max);
981 PN_SCHEDSTAT(wait_sum);
982 P_SCHEDSTAT(wait_count);
983 PN_SCHEDSTAT(iowait_sum);
984 P_SCHEDSTAT(iowait_count);
985 P_SCHEDSTAT(nr_migrations_cold);
986 P_SCHEDSTAT(nr_failed_migrations_affine);
987 P_SCHEDSTAT(nr_failed_migrations_running);
988 P_SCHEDSTAT(nr_failed_migrations_hot);
989 P_SCHEDSTAT(nr_forced_migrations);
990 P_SCHEDSTAT(nr_wakeups);
991 P_SCHEDSTAT(nr_wakeups_sync);
992 P_SCHEDSTAT(nr_wakeups_migrate);
993 P_SCHEDSTAT(nr_wakeups_local);
994 P_SCHEDSTAT(nr_wakeups_remote);
995 P_SCHEDSTAT(nr_wakeups_affine);
996 P_SCHEDSTAT(nr_wakeups_affine_attempts);
997 P_SCHEDSTAT(nr_wakeups_passive);
998 P_SCHEDSTAT(nr_wakeups_idle);
cb251765 999
cc367732
IM
1000 avg_atom = p->se.sum_exec_runtime;
1001 if (nr_switches)
b0ab99e7 1002 avg_atom = div64_ul(avg_atom, nr_switches);
cc367732
IM
1003 else
1004 avg_atom = -1LL;
1005
1006 avg_per_cpu = p->se.sum_exec_runtime;
c1a89740 1007 if (p->se.nr_migrations) {
6f6d6a1a
RZ
1008 avg_per_cpu = div64_u64(avg_per_cpu,
1009 p->se.nr_migrations);
c1a89740 1010 } else {
cc367732 1011 avg_per_cpu = -1LL;
c1a89740 1012 }
cc367732
IM
1013
1014 __PN(avg_atom);
1015 __PN(avg_per_cpu);
4feee7d1
JD
1016
1017#ifdef CONFIG_SCHED_CORE
1018 PN_SCHEDSTAT(core_forceidle_sum);
1019#endif
cc367732 1020 }
4fa8d299 1021
cc367732 1022 __P(nr_switches);
9e3bf946
VS
1023 __PS("nr_voluntary_switches", p->nvcsw);
1024 __PS("nr_involuntary_switches", p->nivcsw);
cc367732 1025
43ae34cb 1026 P(se.load.weight);
333bb864 1027#ifdef CONFIG_SMP
9d89c257 1028 P(se.avg.load_sum);
9f683953 1029 P(se.avg.runnable_sum);
9d89c257
YD
1030 P(se.avg.util_sum);
1031 P(se.avg.load_avg);
9f683953 1032 P(se.avg.runnable_avg);
9d89c257
YD
1033 P(se.avg.util_avg);
1034 P(se.avg.last_update_time);
7f65ea42 1035 P(se.avg.util_est.ewma);
68d7a190 1036 PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
96e74ebf
VS
1037#endif
1038#ifdef CONFIG_UCLAMP_TASK
ad32bb41
PK
1039 __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1040 __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
96e74ebf
VS
1041 __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1042 __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
939fd731 1043#endif
43ae34cb
IM
1044 P(policy);
1045 P(prio);
1da1843f 1046 if (task_has_dl_policy(p)) {
59f8c298
TC
1047 P(dl.runtime);
1048 P(dl.deadline);
1049 }
4fa8d299 1050#undef PN_SCHEDSTAT
4fa8d299 1051#undef P_SCHEDSTAT
43ae34cb
IM
1052
1053 {
29d7b90c 1054 unsigned int this_cpu = raw_smp_processor_id();
43ae34cb
IM
1055 u64 t0, t1;
1056
29d7b90c
IM
1057 t0 = cpu_clock(this_cpu);
1058 t1 = cpu_clock(this_cpu);
9e3bf946 1059 __PS("clock-delta", t1-t0);
43ae34cb 1060 }
b32e86b4
IM
1061
1062 sched_show_numa(p, m);
43ae34cb
IM
1063}
1064
1065void proc_sched_set_task(struct task_struct *p)
1066{
6cfb0d5d 1067#ifdef CONFIG_SCHEDSTATS
ceeadb83 1068 memset(&p->stats, 0, sizeof(p->stats));
6cfb0d5d 1069#endif
43ae34cb 1070}
c006fac5
PT
1071
1072void resched_latency_warn(int cpu, u64 latency)
1073{
1074 static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1075
1076 WARN(__ratelimit(&latency_check_ratelimit),
1077 "sched: CPU %d need_resched set for > %llu ns (%d ticks) "
1078 "without schedule\n",
1079 cpu, latency, cpu_rq(cpu)->ticks_without_resched);
1080}