Merge tag 'pci-v6.16-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci
[linux-2.6-block.git] / kernel / sched / debug.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
43ae34cb 2/*
391e43da 3 * kernel/sched/debug.c
43ae34cb 4 *
325ea10c 5 * Print the CFS rbtree and other debugging details
43ae34cb
IM
6 *
7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
43ae34cb 8 */
029632fb 9
43ae34cb 10/*
622f0a1d 11 * This allows printing both to /sys/kernel/debug/sched/debug and
43ae34cb
IM
12 * to the console
13 */
14#define SEQ_printf(m, x...) \
15 do { \
16 if (m) \
17 seq_printf(m, x); \
18 else \
a8c024cd 19 pr_cont(x); \
43ae34cb
IM
20 } while (0)
21
ef83a571
IM
22/*
23 * Ease the printing of nsec fields:
24 */
90b2628f 25static long long nsec_high(unsigned long long nsec)
ef83a571 26{
90b2628f 27 if ((long long)nsec < 0) {
ef83a571
IM
28 nsec = -nsec;
29 do_div(nsec, 1000000);
30 return -nsec;
31 }
32 do_div(nsec, 1000000);
33
34 return nsec;
35}
36
90b2628f 37static unsigned long nsec_low(unsigned long long nsec)
ef83a571 38{
90b2628f 39 if ((long long)nsec < 0)
ef83a571
IM
40 nsec = -nsec;
41
42 return do_div(nsec, 1000000);
43}
44
45#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
46
d6ca41d7
SRRH
47#define SCHED_FEAT(name, enabled) \
48 #name ,
49
50static const char * const sched_feat_names[] = {
51#include "features.h"
52};
53
54#undef SCHED_FEAT
55
56static int sched_feat_show(struct seq_file *m, void *v)
57{
58 int i;
59
60 for (i = 0; i < __SCHED_FEAT_NR; i++) {
61 if (!(sysctl_sched_features & (1UL << i)))
62 seq_puts(m, "NO_");
63 seq_printf(m, "%s ", sched_feat_names[i]);
64 }
65 seq_puts(m, "\n");
66
67 return 0;
68}
69
e9666d10 70#ifdef CONFIG_JUMP_LABEL
d6ca41d7
SRRH
71
72#define jump_label_key__true STATIC_KEY_INIT_TRUE
73#define jump_label_key__false STATIC_KEY_INIT_FALSE
74
75#define SCHED_FEAT(name, enabled) \
76 jump_label_key__##enabled ,
77
78struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
79#include "features.h"
80};
81
82#undef SCHED_FEAT
83
84static void sched_feat_disable(int i)
85{
e73e8197 86 static_key_disable_cpuslocked(&sched_feat_keys[i]);
d6ca41d7
SRRH
87}
88
89static void sched_feat_enable(int i)
90{
e73e8197 91 static_key_enable_cpuslocked(&sched_feat_keys[i]);
d6ca41d7
SRRH
92}
93#else
94static void sched_feat_disable(int i) { };
95static void sched_feat_enable(int i) { };
e9666d10 96#endif /* CONFIG_JUMP_LABEL */
d6ca41d7
SRRH
97
98static int sched_feat_set(char *cmp)
99{
100 int i;
101 int neg = 0;
102
103 if (strncmp(cmp, "NO_", 3) == 0) {
104 neg = 1;
105 cmp += 3;
106 }
107
8f894bf4
YX
108 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
109 if (i < 0)
110 return i;
111
112 if (neg) {
113 sysctl_sched_features &= ~(1UL << i);
114 sched_feat_disable(i);
115 } else {
116 sysctl_sched_features |= (1UL << i);
117 sched_feat_enable(i);
d6ca41d7
SRRH
118 }
119
8f894bf4 120 return 0;
d6ca41d7
SRRH
121}
122
123static ssize_t
124sched_feat_write(struct file *filp, const char __user *ubuf,
125 size_t cnt, loff_t *ppos)
126{
127 char buf[64];
128 char *cmp;
8f894bf4 129 int ret;
d6ca41d7
SRRH
130 struct inode *inode;
131
132 if (cnt > 63)
133 cnt = 63;
134
135 if (copy_from_user(&buf, ubuf, cnt))
136 return -EFAULT;
137
138 buf[cnt] = 0;
139 cmp = strstrip(buf);
140
141 /* Ensure the static_key remains in a consistent state */
142 inode = file_inode(filp);
e73e8197 143 cpus_read_lock();
d6ca41d7 144 inode_lock(inode);
8f894bf4 145 ret = sched_feat_set(cmp);
d6ca41d7 146 inode_unlock(inode);
e73e8197 147 cpus_read_unlock();
8f894bf4
YX
148 if (ret < 0)
149 return ret;
d6ca41d7
SRRH
150
151 *ppos += cnt;
152
153 return cnt;
154}
155
156static int sched_feat_open(struct inode *inode, struct file *filp)
157{
158 return single_open(filp, sched_feat_show, NULL);
159}
160
161static const struct file_operations sched_feat_fops = {
162 .open = sched_feat_open,
163 .write = sched_feat_write,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167};
168
8a99b683
PZ
169#ifdef CONFIG_SMP
170
171static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
172 size_t cnt, loff_t *ppos)
173{
174 char buf[16];
70306618 175 unsigned int scaling;
8a99b683
PZ
176
177 if (cnt > 15)
178 cnt = 15;
179
180 if (copy_from_user(&buf, ubuf, cnt))
181 return -EFAULT;
70306618 182 buf[cnt] = '\0';
8a99b683 183
70306618 184 if (kstrtouint(buf, 10, &scaling))
8a99b683
PZ
185 return -EINVAL;
186
70306618
MG
187 if (scaling >= SCHED_TUNABLESCALING_END)
188 return -EINVAL;
189
190 sysctl_sched_tunable_scaling = scaling;
8a99b683
PZ
191 if (sched_update_scaling())
192 return -EINVAL;
193
194 *ppos += cnt;
195 return cnt;
196}
197
198static int sched_scaling_show(struct seq_file *m, void *v)
199{
200 seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
201 return 0;
202}
203
204static int sched_scaling_open(struct inode *inode, struct file *filp)
205{
206 return single_open(filp, sched_scaling_show, NULL);
207}
208
209static const struct file_operations sched_scaling_fops = {
210 .open = sched_scaling_open,
211 .write = sched_scaling_write,
212 .read = seq_read,
213 .llseek = seq_lseek,
214 .release = single_release,
215};
216
217#endif /* SMP */
218
1011dcce
PZ
219#ifdef CONFIG_PREEMPT_DYNAMIC
220
221static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
222 size_t cnt, loff_t *ppos)
223{
224 char buf[16];
225 int mode;
226
227 if (cnt > 15)
228 cnt = 15;
229
230 if (copy_from_user(&buf, ubuf, cnt))
231 return -EFAULT;
232
233 buf[cnt] = 0;
234 mode = sched_dynamic_mode(strstrip(buf));
235 if (mode < 0)
236 return mode;
237
238 sched_dynamic_update(mode);
239
240 *ppos += cnt;
241
242 return cnt;
243}
244
245static int sched_dynamic_show(struct seq_file *m, void *v)
246{
35772d62 247 int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
8bdc5daa
SAS
248 int j;
249
250 /* Count entries in NULL terminated preempt_modes */
251 for (j = 0; preempt_modes[j]; j++)
252 ;
253 j -= !IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY);
1011dcce 254
35772d62 255 for (; i < j; i++) {
1011dcce
PZ
256 if (preempt_dynamic_mode == i)
257 seq_puts(m, "(");
258 seq_puts(m, preempt_modes[i]);
259 if (preempt_dynamic_mode == i)
260 seq_puts(m, ")");
261
262 seq_puts(m, " ");
263 }
264
265 seq_puts(m, "\n");
266 return 0;
267}
268
269static int sched_dynamic_open(struct inode *inode, struct file *filp)
270{
271 return single_open(filp, sched_dynamic_show, NULL);
272}
273
274static const struct file_operations sched_dynamic_fops = {
275 .open = sched_dynamic_open,
276 .write = sched_dynamic_write,
277 .read = seq_read,
278 .llseek = seq_lseek,
279 .release = single_release,
280};
281
282#endif /* CONFIG_PREEMPT_DYNAMIC */
283
9406415f 284__read_mostly bool sched_debug_verbose;
9469eb01 285
34320745
PA
286#ifdef CONFIG_SMP
287static struct dentry *sd_dentry;
288
289
290static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf,
291 size_t cnt, loff_t *ppos)
292{
293 ssize_t result;
294 bool orig;
295
296 cpus_read_lock();
56209334 297 sched_domains_mutex_lock();
34320745
PA
298
299 orig = sched_debug_verbose;
300 result = debugfs_write_file_bool(filp, ubuf, cnt, ppos);
301
302 if (sched_debug_verbose && !orig)
303 update_sched_domain_debugfs();
304 else if (!sched_debug_verbose && orig) {
305 debugfs_remove(sd_dentry);
306 sd_dentry = NULL;
307 }
308
56209334 309 sched_domains_mutex_unlock();
34320745
PA
310 cpus_read_unlock();
311
312 return result;
313}
314#else
315#define sched_verbose_write debugfs_write_file_bool
316#endif
317
318static const struct file_operations sched_verbose_fops = {
319 .read = debugfs_read_file_bool,
320 .write = sched_verbose_write,
321 .open = simple_open,
322 .llseek = default_llseek,
323};
324
d27e9ae2
PZ
325static const struct seq_operations sched_debug_sops;
326
327static int sched_debug_open(struct inode *inode, struct file *filp)
328{
329 return seq_open(filp, &sched_debug_sops);
330}
331
332static const struct file_operations sched_debug_fops = {
333 .open = sched_debug_open,
334 .read = seq_read,
335 .llseek = seq_lseek,
336 .release = seq_release,
337};
338
d741f297
DBO
339enum dl_param {
340 DL_RUNTIME = 0,
341 DL_PERIOD,
342};
343
4ae0c2b9 344static unsigned long fair_server_period_max = (1UL << 22) * NSEC_PER_USEC; /* ~4 seconds */
d741f297
DBO
345static unsigned long fair_server_period_min = (100) * NSEC_PER_USEC; /* 100 us */
346
347static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubuf,
348 size_t cnt, loff_t *ppos, enum dl_param param)
349{
350 long cpu = (long) ((struct seq_file *) filp->private_data)->private;
351 struct rq *rq = cpu_rq(cpu);
352 u64 runtime, period;
353 size_t err;
354 int retval;
355 u64 value;
356
357 err = kstrtoull_from_user(ubuf, cnt, 10, &value);
358 if (err)
359 return err;
360
361 scoped_guard (rq_lock_irqsave, rq) {
362 runtime = rq->fair_server.dl_runtime;
363 period = rq->fair_server.dl_period;
364
365 switch (param) {
366 case DL_RUNTIME:
367 if (runtime == value)
368 break;
369 runtime = value;
370 break;
371 case DL_PERIOD:
372 if (value == period)
373 break;
374 period = value;
375 break;
376 }
377
378 if (runtime > period ||
379 period > fair_server_period_max ||
380 period < fair_server_period_min) {
381 return -EINVAL;
382 }
383
7b8a702d 384 if (rq->cfs.h_nr_queued) {
d741f297
DBO
385 update_rq_clock(rq);
386 dl_server_stop(&rq->fair_server);
387 }
388
389 retval = dl_server_apply_params(&rq->fair_server, runtime, period, 0);
390 if (retval)
391 cnt = retval;
392
393 if (!runtime)
394 printk_deferred("Fair server disabled in CPU %d, system may crash due to starvation.\n",
395 cpu_of(rq));
396
7b8a702d 397 if (rq->cfs.h_nr_queued)
d741f297
DBO
398 dl_server_start(&rq->fair_server);
399 }
400
401 *ppos += cnt;
402 return cnt;
403}
404
405static size_t sched_fair_server_show(struct seq_file *m, void *v, enum dl_param param)
406{
407 unsigned long cpu = (unsigned long) m->private;
408 struct rq *rq = cpu_rq(cpu);
409 u64 value;
410
411 switch (param) {
412 case DL_RUNTIME:
413 value = rq->fair_server.dl_runtime;
414 break;
415 case DL_PERIOD:
416 value = rq->fair_server.dl_period;
417 break;
418 }
419
420 seq_printf(m, "%llu\n", value);
421 return 0;
422
423}
424
425static ssize_t
426sched_fair_server_runtime_write(struct file *filp, const char __user *ubuf,
427 size_t cnt, loff_t *ppos)
428{
429 return sched_fair_server_write(filp, ubuf, cnt, ppos, DL_RUNTIME);
430}
431
432static int sched_fair_server_runtime_show(struct seq_file *m, void *v)
433{
434 return sched_fair_server_show(m, v, DL_RUNTIME);
435}
436
437static int sched_fair_server_runtime_open(struct inode *inode, struct file *filp)
438{
439 return single_open(filp, sched_fair_server_runtime_show, inode->i_private);
440}
441
442static const struct file_operations fair_server_runtime_fops = {
443 .open = sched_fair_server_runtime_open,
444 .write = sched_fair_server_runtime_write,
445 .read = seq_read,
446 .llseek = seq_lseek,
447 .release = single_release,
448};
449
450static ssize_t
451sched_fair_server_period_write(struct file *filp, const char __user *ubuf,
452 size_t cnt, loff_t *ppos)
453{
454 return sched_fair_server_write(filp, ubuf, cnt, ppos, DL_PERIOD);
455}
456
457static int sched_fair_server_period_show(struct seq_file *m, void *v)
458{
459 return sched_fair_server_show(m, v, DL_PERIOD);
460}
461
462static int sched_fair_server_period_open(struct inode *inode, struct file *filp)
463{
464 return single_open(filp, sched_fair_server_period_show, inode->i_private);
465}
466
467static const struct file_operations fair_server_period_fops = {
468 .open = sched_fair_server_period_open,
469 .write = sched_fair_server_period_write,
470 .read = seq_read,
471 .llseek = seq_lseek,
472 .release = single_release,
473};
474
1011dcce 475static struct dentry *debugfs_sched;
8a99b683 476
d741f297
DBO
477static void debugfs_fair_server_init(void)
478{
479 struct dentry *d_fair;
480 unsigned long cpu;
481
482 d_fair = debugfs_create_dir("fair_server", debugfs_sched);
483 if (!d_fair)
484 return;
485
486 for_each_possible_cpu(cpu) {
487 struct dentry *d_cpu;
488 char buf[32];
489
490 snprintf(buf, sizeof(buf), "cpu%lu", cpu);
491 d_cpu = debugfs_create_dir(buf, d_fair);
492
493 debugfs_create_file("runtime", 0644, d_cpu, (void *) cpu, &fair_server_runtime_fops);
494 debugfs_create_file("period", 0644, d_cpu, (void *) cpu, &fair_server_period_fops);
495 }
496}
497
d6ca41d7
SRRH
498static __init int sched_init_debug(void)
499{
8a99b683 500 struct dentry __maybe_unused *numa;
d6ca41d7 501
8a99b683
PZ
502 debugfs_sched = debugfs_create_dir("sched", NULL);
503
504 debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
34320745 505 debugfs_create_file_unsafe("verbose", 0644, debugfs_sched, &sched_debug_verbose, &sched_verbose_fops);
1011dcce
PZ
506#ifdef CONFIG_PREEMPT_DYNAMIC
507 debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
508#endif
8a99b683 509
e4ec3318 510 debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
8a99b683 511
c006fac5
PT
512 debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
513 debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
514
8a99b683
PZ
515#ifdef CONFIG_SMP
516 debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
517 debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
518 debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
3b87f136 519
56209334 520 sched_domains_mutex_lock();
3b87f136 521 update_sched_domain_debugfs();
56209334 522 sched_domains_mutex_unlock();
8a99b683
PZ
523#endif
524
525#ifdef CONFIG_NUMA_BALANCING
526 numa = debugfs_create_dir("numa_balancing", debugfs_sched);
527
528 debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
529 debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
530 debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
531 debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
33024536 532 debugfs_create_u32("hot_threshold_ms", 0644, numa, &sysctl_numa_balancing_hot_threshold);
8a99b683 533#endif
9469eb01 534
d27e9ae2
PZ
535 debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
536
d741f297
DBO
537 debugfs_fair_server_init();
538
d6ca41d7
SRRH
539 return 0;
540}
541late_initcall(sched_init_debug);
542
3866e845
SRRH
543#ifdef CONFIG_SMP
544
3b87f136 545static cpumask_var_t sd_sysctl_cpus;
3866e845 546
3b87f136 547static int sd_flags_show(struct seq_file *m, void *v)
5b9f8ff7 548{
3b87f136 549 unsigned long flags = *(unsigned int *)m->private;
5b9f8ff7
VS
550 int idx;
551
5b9f8ff7 552 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
3b87f136
PZ
553 seq_puts(m, sd_flag_debug[idx].name);
554 seq_puts(m, " ");
5b9f8ff7 555 }
3b87f136 556 seq_puts(m, "\n");
5b9f8ff7
VS
557
558 return 0;
559}
560
3b87f136 561static int sd_flags_open(struct inode *inode, struct file *file)
3866e845 562{
3b87f136 563 return single_open(file, sd_flags_show, inode->i_private);
3866e845
SRRH
564}
565
3b87f136
PZ
566static const struct file_operations sd_flags_fops = {
567 .open = sd_flags_open,
568 .read = seq_read,
569 .llseek = seq_lseek,
570 .release = single_release,
571};
bbdacdfe 572
3b87f136 573static void register_sd(struct sched_domain *sd, struct dentry *parent)
3866e845 574{
3b87f136
PZ
575#define SDM(type, mode, member) \
576 debugfs_create_##type(#member, mode, parent, &sd->member)
3866e845 577
3b87f136
PZ
578 SDM(ulong, 0644, min_interval);
579 SDM(ulong, 0644, max_interval);
580 SDM(u64, 0644, max_newidle_lb_cost);
581 SDM(u32, 0644, busy_factor);
582 SDM(u32, 0644, imbalance_pct);
583 SDM(u32, 0644, cache_nice_tries);
584 SDM(str, 0444, name);
3866e845 585
3b87f136 586#undef SDM
bbdacdfe 587
3b87f136 588 debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
ed74cc49 589 debugfs_create_file("groups_flags", 0444, parent, &sd->groups->flags, &sd_flags_fops);
287372fa 590 debugfs_create_u32("level", 0444, parent, (u32 *)&sd->level);
44671e21
PN
591
592 if (sd->flags & SD_ASYM_PACKING)
593 debugfs_create_u32("group_asym_prefer_cpu", 0444, parent,
594 (u32 *)&sd->groups->asym_prefer_cpu);
3b87f136 595}
bbdacdfe 596
3b87f136
PZ
597void update_sched_domain_debugfs(void)
598{
599 int cpu, i;
bbdacdfe 600
459b09b5
VS
601 /*
602 * This can unfortunately be invoked before sched_debug_init() creates
603 * the debug directory. Don't touch sd_sysctl_cpus until then.
604 */
605 if (!debugfs_sched)
606 return;
607
34320745
PA
608 if (!sched_debug_verbose)
609 return;
610
bbdacdfe
PZ
611 if (!cpumask_available(sd_sysctl_cpus)) {
612 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
613 return;
bbdacdfe
PZ
614 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
615 }
616
34320745 617 if (!sd_dentry) {
3b87f136
PZ
618 sd_dentry = debugfs_create_dir("domains", debugfs_sched);
619
34320745
PA
620 /* rebuild sd_sysctl_cpus if empty since it gets cleared below */
621 if (cpumask_empty(sd_sysctl_cpus))
622 cpumask_copy(sd_sysctl_cpus, cpu_online_mask);
623 }
624
3b87f136
PZ
625 for_each_cpu(cpu, sd_sysctl_cpus) {
626 struct sched_domain *sd;
627 struct dentry *d_cpu;
628 char buf[32];
629
630 snprintf(buf, sizeof(buf), "cpu%d", cpu);
c2e40659 631 debugfs_lookup_and_remove(buf, sd_dentry);
3b87f136
PZ
632 d_cpu = debugfs_create_dir(buf, sd_dentry);
633
634 i = 0;
635 for_each_domain(cpu, sd) {
636 struct dentry *d_sd;
bbdacdfe 637
3b87f136
PZ
638 snprintf(buf, sizeof(buf), "domain%d", i);
639 d_sd = debugfs_create_dir(buf, d_cpu);
bbdacdfe 640
3b87f136
PZ
641 register_sd(sd, d_sd);
642 i++;
bbdacdfe 643 }
bbdacdfe 644
3b87f136 645 __cpumask_clear_cpu(cpu, sd_sysctl_cpus);
3866e845 646 }
3866e845
SRRH
647}
648
bbdacdfe
PZ
649void dirty_sched_domain_sysctl(int cpu)
650{
651 if (cpumask_available(sd_sysctl_cpus))
652 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
653}
654
3866e845
SRRH
655#endif /* CONFIG_SMP */
656
ff9b48c3 657#ifdef CONFIG_FAIR_GROUP_SCHED
5091faa4 658static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
ff9b48c3
BR
659{
660 struct sched_entity *se = tg->se[cpu];
ff9b48c3 661
97fb7a0a 662#define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
ceeadb83
YS
663#define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", \
664 #F, (long long)schedstat_val(stats->F))
97fb7a0a 665#define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
ceeadb83
YS
666#define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", \
667 #F, SPLIT_NS((long long)schedstat_val(stats->F)))
ff9b48c3 668
cd126afe 669 if (!se)
18bf2805 670 return;
18bf2805 671
ff9b48c3
BR
672 PN(se->exec_start);
673 PN(se->vruntime);
674 PN(se->sum_exec_runtime);
97fb7a0a 675
cb251765 676 if (schedstat_enabled()) {
769fdf83
PZ
677 struct sched_statistics *stats;
678 stats = __schedstats_from_se(se);
ceeadb83
YS
679
680 PN_SCHEDSTAT(wait_start);
681 PN_SCHEDSTAT(sleep_start);
682 PN_SCHEDSTAT(block_start);
683 PN_SCHEDSTAT(sleep_max);
684 PN_SCHEDSTAT(block_max);
685 PN_SCHEDSTAT(exec_max);
686 PN_SCHEDSTAT(slice_max);
687 PN_SCHEDSTAT(wait_max);
688 PN_SCHEDSTAT(wait_sum);
689 P_SCHEDSTAT(wait_count);
cb251765 690 }
97fb7a0a 691
ff9b48c3 692 P(se->load.weight);
9d85f21c 693#ifdef CONFIG_SMP
9d89c257
YD
694 P(se->avg.load_avg);
695 P(se->avg.util_avg);
9f683953 696 P(se->avg.runnable_avg);
9d85f21c 697#endif
4fa8d299
JP
698
699#undef PN_SCHEDSTAT
ff9b48c3 700#undef PN
4fa8d299 701#undef P_SCHEDSTAT
ff9b48c3
BR
702#undef P
703}
704#endif
705
efe25c2c 706#ifdef CONFIG_CGROUP_SCHED
ad789f84 707static DEFINE_SPINLOCK(sched_debug_lock);
efe25c2c
BR
708static char group_path[PATH_MAX];
709
ad789f84 710static void task_group_path(struct task_group *tg, char *path, int plen)
efe25c2c 711{
ad789f84
WL
712 if (autogroup_path(tg, path, plen))
713 return;
8ecedd7a 714
ad789f84
WL
715 cgroup_path(tg->css.cgroup, path, plen);
716}
97fb7a0a 717
ad789f84
WL
718/*
719 * Only 1 SEQ_printf_task_group_path() caller can use the full length
720 * group_path[] for cgroup path. Other simultaneous callers will have
721 * to use a shorter stack buffer. A "..." suffix is appended at the end
722 * of the stack buffer so that it will show up in case the output length
723 * matches the given buffer size to indicate possible path name truncation.
724 */
725#define SEQ_printf_task_group_path(m, tg, fmt...) \
726{ \
727 if (spin_trylock(&sched_debug_lock)) { \
728 task_group_path(tg, group_path, sizeof(group_path)); \
729 SEQ_printf(m, fmt, group_path); \
730 spin_unlock(&sched_debug_lock); \
731 } else { \
732 char buf[128]; \
733 char *bufend = buf + sizeof(buf) - 3; \
734 task_group_path(tg, buf, bufend - buf); \
735 strcpy(bufend - 1, "..."); \
736 SEQ_printf(m, fmt, buf); \
737 } \
efe25c2c
BR
738}
739#endif
740
43ae34cb 741static void
a48da48b 742print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
43ae34cb 743{
65bcf072 744 if (task_current(rq, p))
e8c16495 745 SEQ_printf(m, ">R");
20435d84
XX
746 else
747 SEQ_printf(m, " %c", task_state_to_char(p));
43ae34cb 748
2cab4bd0 749 SEQ_printf(m, " %15s %5d %9Ld.%06ld %c %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld %5d ",
fc840914 750 p->comm, task_pid_nr(p),
ef83a571 751 SPLIT_NS(p->se.vruntime),
147f3efa
PZ
752 entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N',
753 SPLIT_NS(p->se.deadline),
857b158d 754 p->se.custom_slice ? 'S' : ' ',
147f3efa
PZ
755 SPLIT_NS(p->se.slice),
756 SPLIT_NS(p->se.sum_exec_runtime),
43ae34cb 757 (long long)(p->nvcsw + p->nivcsw),
6f605d83 758 p->prio);
9c572591 759
2cab4bd0 760 SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld",
ceeadb83 761 SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)),
847fc0cd
YS
762 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
763 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
9c572591 764
b32e86b4 765#ifdef CONFIG_NUMA_BALANCING
2cab4bd0 766 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
b32e86b4 767#endif
efe25c2c 768#ifdef CONFIG_CGROUP_SCHED
2cab4bd0 769 SEQ_printf_task_group_path(m, task_group(p), " %s")
efe25c2c 770#endif
d19ca308 771
d19ca308 772 SEQ_printf(m, "\n");
43ae34cb
IM
773}
774
a48da48b 775static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
43ae34cb
IM
776{
777 struct task_struct *g, *p;
778
e9ca2670
JL
779 SEQ_printf(m, "\n");
780 SEQ_printf(m, "runnable tasks:\n");
2cab4bd0
HS
781 SEQ_printf(m, " S task PID vruntime eligible "
782 "deadline slice sum-exec switches "
783 "prio wait-time sum-sleep sum-block"
784#ifdef CONFIG_NUMA_BALANCING
785 " node group-id"
786#endif
787#ifdef CONFIG_CGROUP_SCHED
788 " group-path"
789#endif
790 "\n");
e9ca2670 791 SEQ_printf(m, "-------------------------------------------------------"
2cab4bd0
HS
792 "------------------------------------------------------"
793 "------------------------------------------------------"
794#ifdef CONFIG_NUMA_BALANCING
795 "--------------"
796#endif
797#ifdef CONFIG_CGROUP_SCHED
798 "--------------"
799#endif
800 "\n");
43ae34cb 801
5bd96ab6 802 rcu_read_lock();
d38e83c7 803 for_each_process_thread(g, p) {
b32e86b4 804 if (task_cpu(p) != rq_cpu)
43ae34cb
IM
805 continue;
806
a48da48b 807 print_task(m, rq, p);
d38e83c7 808 }
5bd96ab6 809 rcu_read_unlock();
43ae34cb
IM
810}
811
5cef9eca 812void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
43ae34cb 813{
2227a957
AW
814 s64 left_vruntime = -1, min_vruntime, right_vruntime = -1, left_deadline = -1, spread;
815 struct sched_entity *last, *first, *root;
348ec61e 816 struct rq *rq = cpu_rq(cpu);
67e12eac
IM
817 unsigned long flags;
818
efe25c2c 819#ifdef CONFIG_FAIR_GROUP_SCHED
e9ca2670 820 SEQ_printf(m, "\n");
ad789f84 821 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
efe25c2c 822#else
e9ca2670
JL
823 SEQ_printf(m, "\n");
824 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
efe25c2c 825#endif
67e12eac 826
5cb9eaa3 827 raw_spin_rq_lock_irqsave(rq, flags);
2227a957
AW
828 root = __pick_root_entity(cfs_rq);
829 if (root)
830 left_vruntime = root->min_vruntime;
af4cf404
PZ
831 first = __pick_first_entity(cfs_rq);
832 if (first)
2227a957 833 left_deadline = first->deadline;
67e12eac
IM
834 last = __pick_last_entity(cfs_rq);
835 if (last)
af4cf404 836 right_vruntime = last->vruntime;
5ac5c4d6 837 min_vruntime = cfs_rq->min_vruntime;
5cb9eaa3 838 raw_spin_rq_unlock_irqrestore(rq, flags);
af4cf404 839
2227a957
AW
840 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_deadline",
841 SPLIT_NS(left_deadline));
af4cf404
PZ
842 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_vruntime",
843 SPLIT_NS(left_vruntime));
ef83a571
IM
844 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
845 SPLIT_NS(min_vruntime));
af4cf404
PZ
846 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "avg_vruntime",
847 SPLIT_NS(avg_vruntime(cfs_rq)));
848 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "right_vruntime",
849 SPLIT_NS(right_vruntime));
850 spread = right_vruntime - left_vruntime;
851 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
736c55a0 852 SEQ_printf(m, " .%-30s: %d\n", "nr_queued", cfs_rq->nr_queued);
c2a295bf 853 SEQ_printf(m, " .%-30s: %d\n", "h_nr_runnable", cfs_rq->h_nr_runnable);
7b8a702d 854 SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
31898e7b 855 SEQ_printf(m, " .%-30s: %d\n", "h_nr_idle", cfs_rq->h_nr_idle);
2069dd75 856 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
c09595f6 857#ifdef CONFIG_SMP
9d89c257
YD
858 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
859 cfs_rq->avg.load_avg);
9f683953
VG
860 SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
861 cfs_rq->avg.runnable_avg);
9d89c257
YD
862 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
863 cfs_rq->avg.util_avg);
11137d38
VG
864 SEQ_printf(m, " .%-30s: %u\n", "util_est",
865 cfs_rq->avg.util_est);
2a2f5d4e
PZ
866 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
867 cfs_rq->removed.load_avg);
868 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
869 cfs_rq->removed.util_avg);
9f683953
VG
870 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg",
871 cfs_rq->removed.runnable_avg);
333bb864 872#ifdef CONFIG_FAIR_GROUP_SCHED
9d89c257
YD
873 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
874 cfs_rq->tg_load_avg_contrib);
333bb864
AS
875 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
876 atomic_long_read(&cfs_rq->tg->load_avg));
c09595f6 877#endif
333bb864 878#endif
f9f9ffc2 879#ifdef CONFIG_CFS_BANDWIDTH
f9f9ffc2
BS
880 SEQ_printf(m, " .%-30s: %d\n", "throttled",
881 cfs_rq->throttled);
882 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
883 cfs_rq->throttle_count);
884#endif
2069dd75 885
333bb864 886#ifdef CONFIG_FAIR_GROUP_SCHED
ff9b48c3 887 print_cfs_group_stats(m, cpu, cfs_rq->tg);
c09595f6 888#endif
43ae34cb
IM
889}
890
ada18de2
PZ
891void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
892{
efe25c2c 893#ifdef CONFIG_RT_GROUP_SCHED
e9ca2670 894 SEQ_printf(m, "\n");
ad789f84 895 SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
efe25c2c 896#else
e9ca2670
JL
897 SEQ_printf(m, "\n");
898 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
efe25c2c 899#endif
ada18de2
PZ
900
901#define P(x) \
902 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
48365b38
DBO
903#define PU(x) \
904 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
ada18de2
PZ
905#define PN(x) \
906 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
907
48365b38 908 PU(rt_nr_running);
5f6bd380
PZ
909
910#ifdef CONFIG_RT_GROUP_SCHED
ada18de2
PZ
911 P(rt_throttled);
912 PN(rt_time);
913 PN(rt_runtime);
5f6bd380 914#endif
ada18de2
PZ
915
916#undef PN
48365b38 917#undef PU
ada18de2
PZ
918#undef P
919}
920
acb32132
WL
921void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
922{
ef477183
SRRH
923 struct dl_bw *dl_bw;
924
e9ca2670
JL
925 SEQ_printf(m, "\n");
926 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
48365b38
DBO
927
928#define PU(x) \
929 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
930
931 PU(dl_nr_running);
ef477183
SRRH
932#ifdef CONFIG_SMP
933 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
934#else
935 dl_bw = &dl_rq->dl_bw;
936#endif
937 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
938 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
48365b38
DBO
939
940#undef PU
acb32132
WL
941}
942
a48da48b 943static void print_cpu(struct seq_file *m, int cpu)
43ae34cb 944{
348ec61e 945 struct rq *rq = cpu_rq(cpu);
43ae34cb
IM
946
947#ifdef CONFIG_X86
948 {
949 unsigned int freq = cpu_khz ? : 1;
950
bbbfeac9 951 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
43ae34cb
IM
952 cpu, freq / 1000, (freq % 1000));
953 }
954#else
bbbfeac9 955 SEQ_printf(m, "cpu#%d\n", cpu);
43ae34cb
IM
956#endif
957
13e099d2
PZ
958#define P(x) \
959do { \
960 if (sizeof(rq->x) == 4) \
a6fcdd8d 961 SEQ_printf(m, " .%-30s: %d\n", #x, (int)(rq->x)); \
13e099d2
PZ
962 else \
963 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
964} while (0)
965
ef83a571
IM
966#define PN(x) \
967 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
43ae34cb
IM
968
969 P(nr_running);
43ae34cb 970 P(nr_switches);
43ae34cb 971 P(nr_uninterruptible);
ef83a571 972 PN(next_balance);
fc840914 973 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
ef83a571 974 PN(clock);
5a537597 975 PN(clock_task);
43ae34cb 976#undef P
ef83a571 977#undef PN
43ae34cb 978
1b9508f6 979#ifdef CONFIG_SMP
db6ea2fb 980#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
1b9508f6 981 P64(avg_idle);
37e6bae8 982 P64(max_idle_balance_cost);
db6ea2fb 983#undef P64
1b9508f6 984#endif
5ac5c4d6 985
4fa8d299 986#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
cb251765
MG
987 if (schedstat_enabled()) {
988 P(yld_count);
989 P(sched_count);
990 P(sched_goidle);
991 P(ttwu_count);
992 P(ttwu_local);
993 }
5ac5c4d6 994#undef P
4fa8d299 995
5cef9eca 996 print_cfs_stats(m, cpu);
ada18de2 997 print_rt_stats(m, cpu);
acb32132 998 print_dl_stats(m, cpu);
43ae34cb 999
a48da48b 1000 print_rq(m, rq, cpu);
bbbfeac9 1001 SEQ_printf(m, "\n");
43ae34cb
IM
1002}
1003
1983a922
CE
1004static const char *sched_tunable_scaling_names[] = {
1005 "none",
ad2e379d 1006 "logarithmic",
1983a922
CE
1007 "linear"
1008};
1009
bbbfeac9 1010static void sched_debug_header(struct seq_file *m)
43ae34cb 1011{
5bb6b1ea
PZ
1012 u64 ktime, sched_clk, cpu_clk;
1013 unsigned long flags;
43ae34cb 1014
5bb6b1ea
PZ
1015 local_irq_save(flags);
1016 ktime = ktime_to_ns(ktime_get());
1017 sched_clk = sched_clock();
1018 cpu_clk = local_clock();
1019 local_irq_restore(flags);
1020
b32e86b4 1021 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
43ae34cb
IM
1022 init_utsname()->release,
1023 (int)strcspn(init_utsname()->version, " "),
1024 init_utsname()->version);
1025
5bb6b1ea
PZ
1026#define P(x) \
1027 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
1028#define PN(x) \
1029 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
1030 PN(ktime);
1031 PN(sched_clk);
1032 PN(cpu_clk);
1033 P(jiffies);
1034#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
35af99e6 1035 P(sched_clock_stable());
5bb6b1ea
PZ
1036#endif
1037#undef PN
1038#undef P
1039
1040 SEQ_printf(m, "\n");
1041 SEQ_printf(m, "sysctl_sched\n");
43ae34cb 1042
1aa4731e 1043#define P(x) \
d822cece 1044 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
1aa4731e 1045#define PN(x) \
d822cece 1046 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
e4ec3318 1047 PN(sysctl_sched_base_slice);
1aa4731e
IM
1048 P(sysctl_sched_features);
1049#undef PN
1050#undef P
1051
bbbfeac9
NZ
1052 SEQ_printf(m, " .%-40s: %d (%s)\n",
1053 "sysctl_sched_tunable_scaling",
1983a922
CE
1054 sysctl_sched_tunable_scaling,
1055 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
bbbfeac9
NZ
1056 SEQ_printf(m, "\n");
1057}
1983a922 1058
bbbfeac9
NZ
1059static int sched_debug_show(struct seq_file *m, void *v)
1060{
1061 int cpu = (unsigned long)(v - 2);
43ae34cb 1062
bbbfeac9
NZ
1063 if (cpu != -1)
1064 print_cpu(m, cpu);
1065 else
1066 sched_debug_header(m);
43ae34cb
IM
1067
1068 return 0;
1069}
1070
029632fb 1071void sysrq_sched_debug_show(void)
43ae34cb 1072{
bbbfeac9
NZ
1073 int cpu;
1074
1075 sched_debug_header(NULL);
02d4ac58
WL
1076 for_each_online_cpu(cpu) {
1077 /*
1078 * Need to reset softlockup watchdogs on all CPUs, because
1079 * another CPU might be blocked waiting for us to process
1080 * an IPI or stop_machine.
1081 */
1082 touch_nmi_watchdog();
1083 touch_all_softlockup_watchdogs();
bbbfeac9 1084 print_cpu(NULL, cpu);
02d4ac58 1085 }
bbbfeac9
NZ
1086}
1087
1088/*
3b03706f 1089 * This iterator needs some explanation.
bbbfeac9 1090 * It returns 1 for the header position.
97fb7a0a
IM
1091 * This means 2 is CPU 0.
1092 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
1093 * to use cpumask_* to iterate over the CPUs.
bbbfeac9
NZ
1094 */
1095static void *sched_debug_start(struct seq_file *file, loff_t *offset)
1096{
1097 unsigned long n = *offset;
1098
1099 if (n == 0)
1100 return (void *) 1;
1101
1102 n--;
1103
1104 if (n > 0)
1105 n = cpumask_next(n - 1, cpu_online_mask);
1106 else
1107 n = cpumask_first(cpu_online_mask);
1108
1109 *offset = n + 1;
1110
1111 if (n < nr_cpu_ids)
1112 return (void *)(unsigned long)(n + 2);
97fb7a0a 1113
bbbfeac9
NZ
1114 return NULL;
1115}
1116
1117static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
1118{
1119 (*offset)++;
1120 return sched_debug_start(file, offset);
1121}
1122
1123static void sched_debug_stop(struct seq_file *file, void *data)
1124{
1125}
1126
1127static const struct seq_operations sched_debug_sops = {
97fb7a0a
IM
1128 .start = sched_debug_start,
1129 .next = sched_debug_next,
1130 .stop = sched_debug_stop,
1131 .show = sched_debug_show,
bbbfeac9
NZ
1132};
1133
9e3bf946
VS
1134#define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
1135#define __P(F) __PS(#F, F)
1136#define P(F) __PS(#F, p->F)
68d7a190 1137#define PM(F, M) __PS(#F, p->F & (M))
9e3bf946
VS
1138#define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
1139#define __PN(F) __PSN(#F, F)
1140#define PN(F) __PSN(#F, p->F)
b32e86b4
IM
1141
1142
397f2378
SD
1143#ifdef CONFIG_NUMA_BALANCING
1144void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1145 unsigned long tpf, unsigned long gsf, unsigned long gpf)
1146{
1147 SEQ_printf(m, "numa_faults node=%d ", node);
67d9f6c2
SD
1148 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
1149 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
397f2378
SD
1150}
1151#endif
1152
1153
b32e86b4
IM
1154static void sched_show_numa(struct task_struct *p, struct seq_file *m)
1155{
1156#ifdef CONFIG_NUMA_BALANCING
b32e86b4
IM
1157 if (p->mm)
1158 P(mm->numa_scan_seq);
1159
397f2378
SD
1160 P(numa_pages_migrated);
1161 P(numa_preferred_nid);
1162 P(total_numa_faults);
1163 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
1164 task_node(p), task_numa_group_id(p));
1165 show_numa_stats(p, m);
b32e86b4
IM
1166#endif
1167}
1168
74dc3384
AS
1169void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
1170 struct seq_file *m)
43ae34cb 1171{
cc367732 1172 unsigned long nr_switches;
43ae34cb 1173
74dc3384 1174 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
5089a976 1175 get_nr_threads(p));
2d92f227 1176 SEQ_printf(m,
add332a1
KB
1177 "---------------------------------------------------------"
1178 "----------\n");
9e3bf946 1179
ceeadb83
YS
1180#define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->stats.F))
1181#define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->stats.F))
43ae34cb 1182
ef83a571
IM
1183 PN(se.exec_start);
1184 PN(se.vruntime);
1185 PN(se.sum_exec_runtime);
6cfb0d5d 1186
cc367732
IM
1187 nr_switches = p->nvcsw + p->nivcsw;
1188
cc367732 1189 P(se.nr_migrations);
cc367732 1190
cb251765 1191 if (schedstat_enabled()) {
cc367732
IM
1192 u64 avg_atom, avg_per_cpu;
1193
ceeadb83 1194 PN_SCHEDSTAT(sum_sleep_runtime);
847fc0cd 1195 PN_SCHEDSTAT(sum_block_runtime);
ceeadb83
YS
1196 PN_SCHEDSTAT(wait_start);
1197 PN_SCHEDSTAT(sleep_start);
1198 PN_SCHEDSTAT(block_start);
1199 PN_SCHEDSTAT(sleep_max);
1200 PN_SCHEDSTAT(block_max);
1201 PN_SCHEDSTAT(exec_max);
1202 PN_SCHEDSTAT(slice_max);
1203 PN_SCHEDSTAT(wait_max);
1204 PN_SCHEDSTAT(wait_sum);
1205 P_SCHEDSTAT(wait_count);
1206 PN_SCHEDSTAT(iowait_sum);
1207 P_SCHEDSTAT(iowait_count);
1208 P_SCHEDSTAT(nr_migrations_cold);
1209 P_SCHEDSTAT(nr_failed_migrations_affine);
1210 P_SCHEDSTAT(nr_failed_migrations_running);
1211 P_SCHEDSTAT(nr_failed_migrations_hot);
1212 P_SCHEDSTAT(nr_forced_migrations);
ad6b26b6
CY
1213#ifdef CONFIG_NUMA_BALANCING
1214 P_SCHEDSTAT(numa_task_migrated);
1215 P_SCHEDSTAT(numa_task_swapped);
1216#endif
ceeadb83
YS
1217 P_SCHEDSTAT(nr_wakeups);
1218 P_SCHEDSTAT(nr_wakeups_sync);
1219 P_SCHEDSTAT(nr_wakeups_migrate);
1220 P_SCHEDSTAT(nr_wakeups_local);
1221 P_SCHEDSTAT(nr_wakeups_remote);
1222 P_SCHEDSTAT(nr_wakeups_affine);
1223 P_SCHEDSTAT(nr_wakeups_affine_attempts);
1224 P_SCHEDSTAT(nr_wakeups_passive);
1225 P_SCHEDSTAT(nr_wakeups_idle);
cb251765 1226
cc367732
IM
1227 avg_atom = p->se.sum_exec_runtime;
1228 if (nr_switches)
b0ab99e7 1229 avg_atom = div64_ul(avg_atom, nr_switches);
cc367732
IM
1230 else
1231 avg_atom = -1LL;
1232
1233 avg_per_cpu = p->se.sum_exec_runtime;
c1a89740 1234 if (p->se.nr_migrations) {
6f6d6a1a
RZ
1235 avg_per_cpu = div64_u64(avg_per_cpu,
1236 p->se.nr_migrations);
c1a89740 1237 } else {
cc367732 1238 avg_per_cpu = -1LL;
c1a89740 1239 }
cc367732
IM
1240
1241 __PN(avg_atom);
1242 __PN(avg_per_cpu);
4feee7d1
JD
1243
1244#ifdef CONFIG_SCHED_CORE
1245 PN_SCHEDSTAT(core_forceidle_sum);
1246#endif
cc367732 1247 }
4fa8d299 1248
cc367732 1249 __P(nr_switches);
9e3bf946
VS
1250 __PS("nr_voluntary_switches", p->nvcsw);
1251 __PS("nr_involuntary_switches", p->nivcsw);
cc367732 1252
43ae34cb 1253 P(se.load.weight);
333bb864 1254#ifdef CONFIG_SMP
9d89c257 1255 P(se.avg.load_sum);
9f683953 1256 P(se.avg.runnable_sum);
9d89c257
YD
1257 P(se.avg.util_sum);
1258 P(se.avg.load_avg);
9f683953 1259 P(se.avg.runnable_avg);
9d89c257
YD
1260 P(se.avg.util_avg);
1261 P(se.avg.last_update_time);
11137d38 1262 PM(se.avg.util_est, ~UTIL_AVG_UNCHANGED);
96e74ebf
VS
1263#endif
1264#ifdef CONFIG_UCLAMP_TASK
ad32bb41
PK
1265 __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1266 __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
96e74ebf
VS
1267 __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1268 __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
939fd731 1269#endif
43ae34cb
IM
1270 P(policy);
1271 P(prio);
1da1843f 1272 if (task_has_dl_policy(p)) {
59f8c298
TC
1273 P(dl.runtime);
1274 P(dl.deadline);
9065ce69
CL
1275 } else if (fair_policy(p->policy)) {
1276 P(se.slice);
59f8c298 1277 }
f0e1a064
TH
1278#ifdef CONFIG_SCHED_CLASS_EXT
1279 __PS("ext.enabled", task_on_scx(p));
1280#endif
4fa8d299 1281#undef PN_SCHEDSTAT
4fa8d299 1282#undef P_SCHEDSTAT
43ae34cb
IM
1283
1284 {
29d7b90c 1285 unsigned int this_cpu = raw_smp_processor_id();
43ae34cb
IM
1286 u64 t0, t1;
1287
29d7b90c
IM
1288 t0 = cpu_clock(this_cpu);
1289 t1 = cpu_clock(this_cpu);
9e3bf946 1290 __PS("clock-delta", t1-t0);
43ae34cb 1291 }
b32e86b4
IM
1292
1293 sched_show_numa(p, m);
43ae34cb
IM
1294}
1295
1296void proc_sched_set_task(struct task_struct *p)
1297{
6cfb0d5d 1298#ifdef CONFIG_SCHEDSTATS
ceeadb83 1299 memset(&p->stats, 0, sizeof(p->stats));
6cfb0d5d 1300#endif
43ae34cb 1301}
c006fac5
PT
1302
1303void resched_latency_warn(int cpu, u64 latency)
1304{
1305 static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1306
8061b9f5
DR
1307 if (likely(!__ratelimit(&latency_check_ratelimit)))
1308 return;
1309
1310 pr_err("sched: CPU %d need_resched set for > %llu ns (%d ticks) without schedule\n",
1311 cpu, latency, cpu_rq(cpu)->ticks_without_resched);
1312 dump_stack();
c006fac5 1313}