2 * Pressure stall information for CPU, memory and IO
4 * Copyright (c) 2018 Facebook, Inc.
5 * Author: Johannes Weiner <hannes@cmpxchg.org>
7 * When CPU, memory and IO are contended, tasks experience delays that
8 * reduce throughput and introduce latencies into the workload. Memory
9 * and IO contention, in addition, can cause a full loss of forward
10 * progress in which the CPU goes idle.
12 * This code aggregates individual task delays into resource pressure
13 * metrics that indicate problems with both workload health and
14 * resource utilization.
18 * The time in which a task can execute on a CPU is our baseline for
19 * productivity. Pressure expresses the amount of time in which this
20 * potential cannot be realized due to resource contention.
22 * This concept of productivity has two components: the workload and
23 * the CPU. To measure the impact of pressure on both, we define two
24 * contention states for a resource: SOME and FULL.
26 * In the SOME state of a given resource, one or more tasks are
27 * delayed on that resource. This affects the workload's ability to
28 * perform work, but the CPU may still be executing other tasks.
30 * In the FULL state of a given resource, all non-idle tasks are
31 * delayed on that resource such that nobody is advancing and the CPU
32 * goes idle. This leaves both workload and CPU unproductive.
34 * (Naturally, the FULL state doesn't exist for the CPU resource.)
36 * SOME = nr_delayed_tasks != 0
37 * FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0
39 * The percentage of wallclock time spent in those compound stall
40 * states gives pressure numbers between 0 and 100 for each resource,
41 * where the SOME percentage indicates workload slowdowns and the FULL
42 * percentage indicates reduced CPU utilization:
44 * %SOME = time(SOME) / period
45 * %FULL = time(FULL) / period
49 * The more tasks and available CPUs there are, the more work can be
50 * performed concurrently. This means that the potential that can go
51 * unrealized due to resource contention *also* scales with non-idle
54 * Consider a scenario where 257 number crunching tasks are trying to
55 * run concurrently on 256 CPUs. If we simply aggregated the task
56 * states, we would have to conclude a CPU SOME pressure number of
57 * 100%, since *somebody* is waiting on a runqueue at all
58 * times. However, that is clearly not the amount of contention the
59 * workload is experiencing: only one out of 256 possible exceution
60 * threads will be contended at any given time, or about 0.4%.
62 * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
63 * given time *one* of the tasks is delayed due to a lack of memory.
64 * Again, looking purely at the task state would yield a memory FULL
65 * pressure number of 0%, since *somebody* is always making forward
66 * progress. But again this wouldn't capture the amount of execution
67 * potential lost, which is 1 out of 4 CPUs, or 25%.
69 * To calculate wasted potential (pressure) with multiple processors,
70 * we have to base our calculation on the number of non-idle tasks in
71 * conjunction with the number of available CPUs, which is the number
72 * of potential execution threads. SOME becomes then the proportion of
73 * delayed tasks to possibe threads, and FULL is the share of possible
74 * threads that are unproductive due to delays:
76 * threads = min(nr_nonidle_tasks, nr_cpus)
77 * SOME = min(nr_delayed_tasks / threads, 1)
78 * FULL = (threads - min(nr_running_tasks, threads)) / threads
80 * For the 257 number crunchers on 256 CPUs, this yields:
82 * threads = min(257, 256)
83 * SOME = min(1 / 256, 1) = 0.4%
84 * FULL = (256 - min(257, 256)) / 256 = 0%
86 * For the 1 out of 4 memory-delayed tasks, this yields:
89 * SOME = min(1 / 4, 1) = 25%
90 * FULL = (4 - min(3, 4)) / 4 = 25%
92 * [ Substitute nr_cpus with 1, and you can see that it's a natural
93 * extension of the single-CPU model. ]
97 * To assess the precise time spent in each such state, we would have
98 * to freeze the system on task changes and start/stop the state
99 * clocks accordingly. Obviously that doesn't scale in practice.
101 * Because the scheduler aims to distribute the compute load evenly
102 * among the available CPUs, we can track task state locally to each
103 * CPU and, at much lower frequency, extrapolate the global state for
104 * the cumulative stall times and the running averages.
106 * For each runqueue, we track:
108 * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
109 * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu])
110 * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
112 * and then periodically aggregate:
114 * tNONIDLE = sum(tNONIDLE[i])
116 * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
117 * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
119 * %SOME = tSOME / period
120 * %FULL = tFULL / period
122 * This gives us an approximation of pressure that is practical
123 * cost-wise, yet way more sensitive and accurate than periodic
124 * sampling of the aggregate task states would be.
127 #include <linux/sched/loadavg.h>
128 #include <linux/seq_file.h>
129 #include <linux/proc_fs.h>
130 #include <linux/seqlock.h>
131 #include <linux/cgroup.h>
132 #include <linux/module.h>
133 #include <linux/sched.h>
134 #include <linux/psi.h>
137 static int psi_bug __read_mostly;
139 DEFINE_STATIC_KEY_FALSE(psi_disabled);
141 #ifdef CONFIG_PSI_DEFAULT_DISABLED
144 bool psi_enable = true;
146 static int __init setup_psi(char *str)
148 return kstrtobool(str, &psi_enable) == 0;
150 __setup("psi=", setup_psi);
152 /* Running averages - we need to be higher-res than loadavg */
153 #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
154 #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */
155 #define EXP_60s 1981 /* 1/exp(2s/60s) */
156 #define EXP_300s 2034 /* 1/exp(2s/300s) */
158 /* Sampling frequency in nanoseconds */
159 static u64 psi_period __read_mostly;
161 /* System-level pressure and stall tracking */
162 static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
163 static struct psi_group psi_system = {
164 .pcpu = &system_group_pcpu,
167 static void psi_update_work(struct work_struct *work);
169 static void group_init(struct psi_group *group)
173 for_each_possible_cpu(cpu)
174 seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
175 group->next_update = sched_clock() + psi_period;
176 INIT_DELAYED_WORK(&group->clock_work, psi_update_work);
177 mutex_init(&group->stat_lock);
180 void __init psi_init(void)
183 static_branch_enable(&psi_disabled);
187 psi_period = jiffies_to_nsecs(PSI_FREQ);
188 group_init(&psi_system);
191 static bool test_state(unsigned int *tasks, enum psi_states state)
195 return tasks[NR_IOWAIT];
197 return tasks[NR_IOWAIT] && !tasks[NR_RUNNING];
199 return tasks[NR_MEMSTALL];
201 return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING];
203 return tasks[NR_RUNNING] > 1;
205 return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
212 static void get_recent_times(struct psi_group *group, int cpu, u32 *times)
214 struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
215 unsigned int tasks[NR_PSI_TASK_COUNTS];
216 u64 now, state_start;
220 /* Snapshot a coherent view of the CPU state */
222 seq = read_seqcount_begin(&groupc->seq);
223 now = cpu_clock(cpu);
224 memcpy(times, groupc->times, sizeof(groupc->times));
225 memcpy(tasks, groupc->tasks, sizeof(groupc->tasks));
226 state_start = groupc->state_start;
227 } while (read_seqcount_retry(&groupc->seq, seq));
229 /* Calculate state time deltas against the previous snapshot */
230 for (s = 0; s < NR_PSI_STATES; s++) {
233 * In addition to already concluded states, we also
234 * incorporate currently active states on the CPU,
235 * since states may last for many sampling periods.
237 * This way we keep our delta sampling buckets small
238 * (u32) and our reported pressure close to what's
239 * actually happening.
241 if (test_state(tasks, s))
242 times[s] += now - state_start;
244 delta = times[s] - groupc->times_prev[s];
245 groupc->times_prev[s] = times[s];
251 static void calc_avgs(unsigned long avg[3], int missed_periods,
252 u64 time, u64 period)
256 /* Fill in zeroes for periods of no activity */
257 if (missed_periods) {
258 avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
259 avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
260 avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
263 /* Sample the most recent active period */
264 pct = div_u64(time * 100, period);
266 avg[0] = calc_load(avg[0], EXP_10s, pct);
267 avg[1] = calc_load(avg[1], EXP_60s, pct);
268 avg[2] = calc_load(avg[2], EXP_300s, pct);
271 static bool update_stats(struct psi_group *group)
273 u64 deltas[NR_PSI_STATES - 1] = { 0, };
274 unsigned long missed_periods = 0;
275 unsigned long nonidle_total = 0;
276 u64 now, expires, period;
280 mutex_lock(&group->stat_lock);
283 * Collect the per-cpu time buckets and average them into a
284 * single time sample that is normalized to wallclock time.
286 * For averaging, each CPU is weighted by its non-idle time in
287 * the sampling period. This eliminates artifacts from uneven
288 * loading, or even entirely idle CPUs.
290 for_each_possible_cpu(cpu) {
291 u32 times[NR_PSI_STATES];
294 get_recent_times(group, cpu, times);
296 nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
297 nonidle_total += nonidle;
299 for (s = 0; s < PSI_NONIDLE; s++)
300 deltas[s] += (u64)times[s] * nonidle;
304 * Integrate the sample into the running statistics that are
305 * reported to userspace: the cumulative stall times and the
308 * Pressure percentages are sampled at PSI_FREQ. We might be
309 * called more often when the user polls more frequently than
310 * that; we might be called less often when there is no task
311 * activity, thus no data, and clock ticks are sporadic. The
312 * below handles both.
316 for (s = 0; s < NR_PSI_STATES - 1; s++)
317 group->total[s] += div_u64(deltas[s], max(nonidle_total, 1UL));
321 expires = group->next_update;
324 if (now - expires > psi_period)
325 missed_periods = div_u64(now - expires, psi_period);
328 * The periodic clock tick can get delayed for various
329 * reasons, especially on loaded systems. To avoid clock
330 * drift, we schedule the clock in fixed psi_period intervals.
331 * But the deltas we sample out of the per-cpu buckets above
332 * are based on the actual time elapsing between clock ticks.
334 group->next_update = expires + ((1 + missed_periods) * psi_period);
335 period = now - (group->last_update + (missed_periods * psi_period));
336 group->last_update = now;
338 for (s = 0; s < NR_PSI_STATES - 1; s++) {
341 sample = group->total[s] - group->total_prev[s];
343 * Due to the lockless sampling of the time buckets,
344 * recorded time deltas can slip into the next period,
345 * which under full pressure can result in samples in
346 * excess of the period length.
348 * We don't want to report non-sensical pressures in
349 * excess of 100%, nor do we want to drop such events
350 * on the floor. Instead we punt any overage into the
351 * future until pressure subsides. By doing this we
352 * don't underreport the occurring pressure curve, we
353 * just report it delayed by one period length.
355 * The error isn't cumulative. As soon as another
356 * delta slips from a period P to P+1, by definition
357 * it frees up its time T in P.
361 group->total_prev[s] += sample;
362 calc_avgs(group->avg[s], missed_periods, sample, period);
365 mutex_unlock(&group->stat_lock);
366 return nonidle_total;
369 static void psi_update_work(struct work_struct *work)
371 struct delayed_work *dwork;
372 struct psi_group *group;
375 dwork = to_delayed_work(work);
376 group = container_of(dwork, struct psi_group, clock_work);
379 * If there is task activity, periodically fold the per-cpu
380 * times and feed samples into the running averages. If things
381 * are idle and there is no data to process, stop the clock.
382 * Once restarted, we'll catch up the running averages in one
383 * go - see calc_avgs() and missed_periods.
386 nonidle = update_stats(group);
389 unsigned long delay = 0;
393 if (group->next_update > now)
394 delay = nsecs_to_jiffies(group->next_update - now) + 1;
395 schedule_delayed_work(dwork, delay);
399 static void record_times(struct psi_group_cpu *groupc, int cpu,
405 now = cpu_clock(cpu);
406 delta = now - groupc->state_start;
407 groupc->state_start = now;
409 if (test_state(groupc->tasks, PSI_IO_SOME)) {
410 groupc->times[PSI_IO_SOME] += delta;
411 if (test_state(groupc->tasks, PSI_IO_FULL))
412 groupc->times[PSI_IO_FULL] += delta;
415 if (test_state(groupc->tasks, PSI_MEM_SOME)) {
416 groupc->times[PSI_MEM_SOME] += delta;
417 if (test_state(groupc->tasks, PSI_MEM_FULL))
418 groupc->times[PSI_MEM_FULL] += delta;
419 else if (memstall_tick) {
422 * Since we care about lost potential, a
423 * memstall is FULL when there are no other
424 * working tasks, but also when the CPU is
425 * actively reclaiming and nothing productive
426 * could run even if it were runnable.
428 * When the timer tick sees a reclaiming CPU,
429 * regardless of runnable tasks, sample a FULL
430 * tick (or less if it hasn't been a full tick
431 * since the last state change).
433 sample = min(delta, (u32)jiffies_to_nsecs(1));
434 groupc->times[PSI_MEM_FULL] += sample;
438 if (test_state(groupc->tasks, PSI_CPU_SOME))
439 groupc->times[PSI_CPU_SOME] += delta;
441 if (test_state(groupc->tasks, PSI_NONIDLE))
442 groupc->times[PSI_NONIDLE] += delta;
445 static void psi_group_change(struct psi_group *group, int cpu,
446 unsigned int clear, unsigned int set)
448 struct psi_group_cpu *groupc;
451 groupc = per_cpu_ptr(group->pcpu, cpu);
454 * First we assess the aggregate resource states this CPU's
455 * tasks have been in since the last change, and account any
456 * SOME and FULL time these may have resulted in.
458 * Then we update the task counts according to the state
459 * change requested through the @clear and @set bits.
461 write_seqcount_begin(&groupc->seq);
463 record_times(groupc, cpu, false);
465 for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
468 if (groupc->tasks[t] == 0 && !psi_bug) {
469 printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u] clear=%x set=%x\n",
470 cpu, t, groupc->tasks[0],
471 groupc->tasks[1], groupc->tasks[2],
478 for (t = 0; set; set &= ~(1 << t), t++)
482 write_seqcount_end(&groupc->seq);
484 if (!delayed_work_pending(&group->clock_work))
485 schedule_delayed_work(&group->clock_work, PSI_FREQ);
488 static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
490 #ifdef CONFIG_CGROUPS
491 struct cgroup *cgroup = NULL;
494 cgroup = task->cgroups->dfl_cgrp;
495 else if (*iter == &psi_system)
498 cgroup = cgroup_parent(*iter);
500 if (cgroup && cgroup_parent(cgroup)) {
502 return cgroup_psi(cgroup);
512 void psi_task_change(struct task_struct *task, int clear, int set)
514 int cpu = task_cpu(task);
515 struct psi_group *group;
521 if (((task->psi_flags & set) ||
522 (task->psi_flags & clear) != clear) &&
524 printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
525 task->pid, task->comm, cpu,
526 task->psi_flags, clear, set);
530 task->psi_flags &= ~clear;
531 task->psi_flags |= set;
533 while ((group = iterate_groups(task, &iter)))
534 psi_group_change(group, cpu, clear, set);
537 void psi_memstall_tick(struct task_struct *task, int cpu)
539 struct psi_group *group;
542 while ((group = iterate_groups(task, &iter))) {
543 struct psi_group_cpu *groupc;
545 groupc = per_cpu_ptr(group->pcpu, cpu);
546 write_seqcount_begin(&groupc->seq);
547 record_times(groupc, cpu, true);
548 write_seqcount_end(&groupc->seq);
553 * psi_memstall_enter - mark the beginning of a memory stall section
554 * @flags: flags to handle nested sections
556 * Marks the calling task as being stalled due to a lack of memory,
557 * such as waiting for a refault or performing reclaim.
559 void psi_memstall_enter(unsigned long *flags)
564 if (static_branch_likely(&psi_disabled))
567 *flags = current->flags & PF_MEMSTALL;
571 * PF_MEMSTALL setting & accounting needs to be atomic wrt
572 * changes to the task's scheduling state, otherwise we can
573 * race with CPU migration.
575 rq = this_rq_lock_irq(&rf);
577 current->flags |= PF_MEMSTALL;
578 psi_task_change(current, 0, TSK_MEMSTALL);
580 rq_unlock_irq(rq, &rf);
584 * psi_memstall_leave - mark the end of an memory stall section
585 * @flags: flags to handle nested memdelay sections
587 * Marks the calling task as no longer stalled due to lack of memory.
589 void psi_memstall_leave(unsigned long *flags)
594 if (static_branch_likely(&psi_disabled))
600 * PF_MEMSTALL clearing & accounting needs to be atomic wrt
601 * changes to the task's scheduling state, otherwise we could
602 * race with CPU migration.
604 rq = this_rq_lock_irq(&rf);
606 current->flags &= ~PF_MEMSTALL;
607 psi_task_change(current, TSK_MEMSTALL, 0);
609 rq_unlock_irq(rq, &rf);
612 #ifdef CONFIG_CGROUPS
613 int psi_cgroup_alloc(struct cgroup *cgroup)
615 if (static_branch_likely(&psi_disabled))
618 cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
619 if (!cgroup->psi.pcpu)
621 group_init(&cgroup->psi);
625 void psi_cgroup_free(struct cgroup *cgroup)
627 if (static_branch_likely(&psi_disabled))
630 cancel_delayed_work_sync(&cgroup->psi.clock_work);
631 free_percpu(cgroup->psi.pcpu);
635 * cgroup_move_task - move task to a different cgroup
637 * @to: the target css_set
639 * Move task to a new cgroup and safely migrate its associated stall
640 * state between the different groups.
642 * This function acquires the task's rq lock to lock out concurrent
643 * changes to the task's scheduling state and - in case the task is
644 * running - concurrent changes to its stall state.
646 void cgroup_move_task(struct task_struct *task, struct css_set *to)
648 unsigned int task_flags = 0;
652 if (static_branch_likely(&psi_disabled)) {
654 * Lame to do this here, but the scheduler cannot be locked
655 * from the outside, so we move cgroups from inside sched/.
657 rcu_assign_pointer(task->cgroups, to);
661 rq = task_rq_lock(task, &rf);
663 if (task_on_rq_queued(task))
664 task_flags = TSK_RUNNING;
665 else if (task->in_iowait)
666 task_flags = TSK_IOWAIT;
668 if (task->flags & PF_MEMSTALL)
669 task_flags |= TSK_MEMSTALL;
672 psi_task_change(task, task_flags, 0);
674 /* See comment above */
675 rcu_assign_pointer(task->cgroups, to);
678 psi_task_change(task, 0, task_flags);
680 task_rq_unlock(rq, task, &rf);
682 #endif /* CONFIG_CGROUPS */
684 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
688 if (static_branch_likely(&psi_disabled))
693 for (full = 0; full < 2 - (res == PSI_CPU); full++) {
694 unsigned long avg[3];
698 for (w = 0; w < 3; w++)
699 avg[w] = group->avg[res * 2 + full][w];
700 total = div_u64(group->total[res * 2 + full], NSEC_PER_USEC);
702 seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
703 full ? "full" : "some",
704 LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
705 LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
706 LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
713 static int psi_io_show(struct seq_file *m, void *v)
715 return psi_show(m, &psi_system, PSI_IO);
718 static int psi_memory_show(struct seq_file *m, void *v)
720 return psi_show(m, &psi_system, PSI_MEM);
723 static int psi_cpu_show(struct seq_file *m, void *v)
725 return psi_show(m, &psi_system, PSI_CPU);
728 static int psi_io_open(struct inode *inode, struct file *file)
730 return single_open(file, psi_io_show, NULL);
733 static int psi_memory_open(struct inode *inode, struct file *file)
735 return single_open(file, psi_memory_show, NULL);
738 static int psi_cpu_open(struct inode *inode, struct file *file)
740 return single_open(file, psi_cpu_show, NULL);
743 static const struct file_operations psi_io_fops = {
747 .release = single_release,
750 static const struct file_operations psi_memory_fops = {
751 .open = psi_memory_open,
754 .release = single_release,
757 static const struct file_operations psi_cpu_fops = {
758 .open = psi_cpu_open,
761 .release = single_release,
764 static int __init psi_proc_init(void)
766 proc_mkdir("pressure", NULL);
767 proc_create("pressure/io", 0, NULL, &psi_io_fops);
768 proc_create("pressure/memory", 0, NULL, &psi_memory_fops);
769 proc_create("pressure/cpu", 0, NULL, &psi_cpu_fops);
772 module_init(psi_proc_init);