Commit | Line | Data |
---|---|---|
eb414681 JW |
1 | /* |
2 | * Pressure stall information for CPU, memory and IO | |
3 | * | |
4 | * Copyright (c) 2018 Facebook, Inc. | |
5 | * Author: Johannes Weiner <hannes@cmpxchg.org> | |
6 | * | |
7 | * When CPU, memory and IO are contended, tasks experience delays that | |
8 | * reduce throughput and introduce latencies into the workload. Memory | |
9 | * and IO contention, in addition, can cause a full loss of forward | |
10 | * progress in which the CPU goes idle. | |
11 | * | |
12 | * This code aggregates individual task delays into resource pressure | |
13 | * metrics that indicate problems with both workload health and | |
14 | * resource utilization. | |
15 | * | |
16 | * Model | |
17 | * | |
18 | * The time in which a task can execute on a CPU is our baseline for | |
19 | * productivity. Pressure expresses the amount of time in which this | |
20 | * potential cannot be realized due to resource contention. | |
21 | * | |
22 | * This concept of productivity has two components: the workload and | |
23 | * the CPU. To measure the impact of pressure on both, we define two | |
24 | * contention states for a resource: SOME and FULL. | |
25 | * | |
26 | * In the SOME state of a given resource, one or more tasks are | |
27 | * delayed on that resource. This affects the workload's ability to | |
28 | * perform work, but the CPU may still be executing other tasks. | |
29 | * | |
30 | * In the FULL state of a given resource, all non-idle tasks are | |
31 | * delayed on that resource such that nobody is advancing and the CPU | |
32 | * goes idle. This leaves both workload and CPU unproductive. | |
33 | * | |
34 | * (Naturally, the FULL state doesn't exist for the CPU resource.) | |
35 | * | |
36 | * SOME = nr_delayed_tasks != 0 | |
37 | * FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0 | |
38 | * | |
39 | * The percentage of wallclock time spent in those compound stall | |
40 | * states gives pressure numbers between 0 and 100 for each resource, | |
41 | * where the SOME percentage indicates workload slowdowns and the FULL | |
42 | * percentage indicates reduced CPU utilization: | |
43 | * | |
44 | * %SOME = time(SOME) / period | |
45 | * %FULL = time(FULL) / period | |
46 | * | |
47 | * Multiple CPUs | |
48 | * | |
49 | * The more tasks and available CPUs there are, the more work can be | |
50 | * performed concurrently. This means that the potential that can go | |
51 | * unrealized due to resource contention *also* scales with non-idle | |
52 | * tasks and CPUs. | |
53 | * | |
54 | * Consider a scenario where 257 number crunching tasks are trying to | |
55 | * run concurrently on 256 CPUs. If we simply aggregated the task | |
56 | * states, we would have to conclude a CPU SOME pressure number of | |
57 | * 100%, since *somebody* is waiting on a runqueue at all | |
58 | * times. However, that is clearly not the amount of contention the | |
59 | * workload is experiencing: only one out of 256 possible exceution | |
60 | * threads will be contended at any given time, or about 0.4%. | |
61 | * | |
62 | * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any | |
63 | * given time *one* of the tasks is delayed due to a lack of memory. | |
64 | * Again, looking purely at the task state would yield a memory FULL | |
65 | * pressure number of 0%, since *somebody* is always making forward | |
66 | * progress. But again this wouldn't capture the amount of execution | |
67 | * potential lost, which is 1 out of 4 CPUs, or 25%. | |
68 | * | |
69 | * To calculate wasted potential (pressure) with multiple processors, | |
70 | * we have to base our calculation on the number of non-idle tasks in | |
71 | * conjunction with the number of available CPUs, which is the number | |
72 | * of potential execution threads. SOME becomes then the proportion of | |
73 | * delayed tasks to possibe threads, and FULL is the share of possible | |
74 | * threads that are unproductive due to delays: | |
75 | * | |
76 | * threads = min(nr_nonidle_tasks, nr_cpus) | |
77 | * SOME = min(nr_delayed_tasks / threads, 1) | |
78 | * FULL = (threads - min(nr_running_tasks, threads)) / threads | |
79 | * | |
80 | * For the 257 number crunchers on 256 CPUs, this yields: | |
81 | * | |
82 | * threads = min(257, 256) | |
83 | * SOME = min(1 / 256, 1) = 0.4% | |
84 | * FULL = (256 - min(257, 256)) / 256 = 0% | |
85 | * | |
86 | * For the 1 out of 4 memory-delayed tasks, this yields: | |
87 | * | |
88 | * threads = min(4, 4) | |
89 | * SOME = min(1 / 4, 1) = 25% | |
90 | * FULL = (4 - min(3, 4)) / 4 = 25% | |
91 | * | |
92 | * [ Substitute nr_cpus with 1, and you can see that it's a natural | |
93 | * extension of the single-CPU model. ] | |
94 | * | |
95 | * Implementation | |
96 | * | |
97 | * To assess the precise time spent in each such state, we would have | |
98 | * to freeze the system on task changes and start/stop the state | |
99 | * clocks accordingly. Obviously that doesn't scale in practice. | |
100 | * | |
101 | * Because the scheduler aims to distribute the compute load evenly | |
102 | * among the available CPUs, we can track task state locally to each | |
103 | * CPU and, at much lower frequency, extrapolate the global state for | |
104 | * the cumulative stall times and the running averages. | |
105 | * | |
106 | * For each runqueue, we track: | |
107 | * | |
108 | * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0) | |
109 | * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu]) | |
110 | * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0) | |
111 | * | |
112 | * and then periodically aggregate: | |
113 | * | |
114 | * tNONIDLE = sum(tNONIDLE[i]) | |
115 | * | |
116 | * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE | |
117 | * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE | |
118 | * | |
119 | * %SOME = tSOME / period | |
120 | * %FULL = tFULL / period | |
121 | * | |
122 | * This gives us an approximation of pressure that is practical | |
123 | * cost-wise, yet way more sensitive and accurate than periodic | |
124 | * sampling of the aggregate task states would be. | |
125 | */ | |
126 | ||
1b69ac6b | 127 | #include "../workqueue_internal.h" |
eb414681 JW |
128 | #include <linux/sched/loadavg.h> |
129 | #include <linux/seq_file.h> | |
130 | #include <linux/proc_fs.h> | |
131 | #include <linux/seqlock.h> | |
132 | #include <linux/cgroup.h> | |
133 | #include <linux/module.h> | |
134 | #include <linux/sched.h> | |
135 | #include <linux/psi.h> | |
136 | #include "sched.h" | |
137 | ||
138 | static int psi_bug __read_mostly; | |
139 | ||
e0c27447 JW |
140 | DEFINE_STATIC_KEY_FALSE(psi_disabled); |
141 | ||
142 | #ifdef CONFIG_PSI_DEFAULT_DISABLED | |
143 | bool psi_enable; | |
144 | #else | |
145 | bool psi_enable = true; | |
146 | #endif | |
147 | static int __init setup_psi(char *str) | |
148 | { | |
149 | return kstrtobool(str, &psi_enable) == 0; | |
150 | } | |
151 | __setup("psi=", setup_psi); | |
eb414681 JW |
152 | |
153 | /* Running averages - we need to be higher-res than loadavg */ | |
154 | #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */ | |
155 | #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */ | |
156 | #define EXP_60s 1981 /* 1/exp(2s/60s) */ | |
157 | #define EXP_300s 2034 /* 1/exp(2s/300s) */ | |
158 | ||
159 | /* Sampling frequency in nanoseconds */ | |
160 | static u64 psi_period __read_mostly; | |
161 | ||
162 | /* System-level pressure and stall tracking */ | |
163 | static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu); | |
164 | static struct psi_group psi_system = { | |
165 | .pcpu = &system_group_pcpu, | |
166 | }; | |
167 | ||
168 | static void psi_update_work(struct work_struct *work); | |
169 | ||
170 | static void group_init(struct psi_group *group) | |
171 | { | |
172 | int cpu; | |
173 | ||
174 | for_each_possible_cpu(cpu) | |
175 | seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq); | |
176 | group->next_update = sched_clock() + psi_period; | |
177 | INIT_DELAYED_WORK(&group->clock_work, psi_update_work); | |
178 | mutex_init(&group->stat_lock); | |
179 | } | |
180 | ||
181 | void __init psi_init(void) | |
182 | { | |
e0c27447 JW |
183 | if (!psi_enable) { |
184 | static_branch_enable(&psi_disabled); | |
eb414681 | 185 | return; |
e0c27447 | 186 | } |
eb414681 JW |
187 | |
188 | psi_period = jiffies_to_nsecs(PSI_FREQ); | |
189 | group_init(&psi_system); | |
190 | } | |
191 | ||
192 | static bool test_state(unsigned int *tasks, enum psi_states state) | |
193 | { | |
194 | switch (state) { | |
195 | case PSI_IO_SOME: | |
196 | return tasks[NR_IOWAIT]; | |
197 | case PSI_IO_FULL: | |
198 | return tasks[NR_IOWAIT] && !tasks[NR_RUNNING]; | |
199 | case PSI_MEM_SOME: | |
200 | return tasks[NR_MEMSTALL]; | |
201 | case PSI_MEM_FULL: | |
202 | return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING]; | |
203 | case PSI_CPU_SOME: | |
204 | return tasks[NR_RUNNING] > 1; | |
205 | case PSI_NONIDLE: | |
206 | return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || | |
207 | tasks[NR_RUNNING]; | |
208 | default: | |
209 | return false; | |
210 | } | |
211 | } | |
212 | ||
213 | static void get_recent_times(struct psi_group *group, int cpu, u32 *times) | |
214 | { | |
215 | struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu); | |
216 | unsigned int tasks[NR_PSI_TASK_COUNTS]; | |
217 | u64 now, state_start; | |
218 | unsigned int seq; | |
219 | int s; | |
220 | ||
221 | /* Snapshot a coherent view of the CPU state */ | |
222 | do { | |
223 | seq = read_seqcount_begin(&groupc->seq); | |
224 | now = cpu_clock(cpu); | |
225 | memcpy(times, groupc->times, sizeof(groupc->times)); | |
226 | memcpy(tasks, groupc->tasks, sizeof(groupc->tasks)); | |
227 | state_start = groupc->state_start; | |
228 | } while (read_seqcount_retry(&groupc->seq, seq)); | |
229 | ||
230 | /* Calculate state time deltas against the previous snapshot */ | |
231 | for (s = 0; s < NR_PSI_STATES; s++) { | |
232 | u32 delta; | |
233 | /* | |
234 | * In addition to already concluded states, we also | |
235 | * incorporate currently active states on the CPU, | |
236 | * since states may last for many sampling periods. | |
237 | * | |
238 | * This way we keep our delta sampling buckets small | |
239 | * (u32) and our reported pressure close to what's | |
240 | * actually happening. | |
241 | */ | |
242 | if (test_state(tasks, s)) | |
243 | times[s] += now - state_start; | |
244 | ||
245 | delta = times[s] - groupc->times_prev[s]; | |
246 | groupc->times_prev[s] = times[s]; | |
247 | ||
248 | times[s] = delta; | |
249 | } | |
250 | } | |
251 | ||
252 | static void calc_avgs(unsigned long avg[3], int missed_periods, | |
253 | u64 time, u64 period) | |
254 | { | |
255 | unsigned long pct; | |
256 | ||
257 | /* Fill in zeroes for periods of no activity */ | |
258 | if (missed_periods) { | |
259 | avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods); | |
260 | avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods); | |
261 | avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods); | |
262 | } | |
263 | ||
264 | /* Sample the most recent active period */ | |
265 | pct = div_u64(time * 100, period); | |
266 | pct *= FIXED_1; | |
267 | avg[0] = calc_load(avg[0], EXP_10s, pct); | |
268 | avg[1] = calc_load(avg[1], EXP_60s, pct); | |
269 | avg[2] = calc_load(avg[2], EXP_300s, pct); | |
270 | } | |
271 | ||
272 | static bool update_stats(struct psi_group *group) | |
273 | { | |
274 | u64 deltas[NR_PSI_STATES - 1] = { 0, }; | |
275 | unsigned long missed_periods = 0; | |
276 | unsigned long nonidle_total = 0; | |
277 | u64 now, expires, period; | |
278 | int cpu; | |
279 | int s; | |
280 | ||
281 | mutex_lock(&group->stat_lock); | |
282 | ||
283 | /* | |
284 | * Collect the per-cpu time buckets and average them into a | |
285 | * single time sample that is normalized to wallclock time. | |
286 | * | |
287 | * For averaging, each CPU is weighted by its non-idle time in | |
288 | * the sampling period. This eliminates artifacts from uneven | |
289 | * loading, or even entirely idle CPUs. | |
290 | */ | |
291 | for_each_possible_cpu(cpu) { | |
292 | u32 times[NR_PSI_STATES]; | |
293 | u32 nonidle; | |
294 | ||
295 | get_recent_times(group, cpu, times); | |
296 | ||
297 | nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]); | |
298 | nonidle_total += nonidle; | |
299 | ||
300 | for (s = 0; s < PSI_NONIDLE; s++) | |
301 | deltas[s] += (u64)times[s] * nonidle; | |
302 | } | |
303 | ||
304 | /* | |
305 | * Integrate the sample into the running statistics that are | |
306 | * reported to userspace: the cumulative stall times and the | |
307 | * decaying averages. | |
308 | * | |
309 | * Pressure percentages are sampled at PSI_FREQ. We might be | |
310 | * called more often when the user polls more frequently than | |
311 | * that; we might be called less often when there is no task | |
312 | * activity, thus no data, and clock ticks are sporadic. The | |
313 | * below handles both. | |
314 | */ | |
315 | ||
316 | /* total= */ | |
317 | for (s = 0; s < NR_PSI_STATES - 1; s++) | |
318 | group->total[s] += div_u64(deltas[s], max(nonidle_total, 1UL)); | |
319 | ||
320 | /* avgX= */ | |
321 | now = sched_clock(); | |
322 | expires = group->next_update; | |
323 | if (now < expires) | |
324 | goto out; | |
4e37504d | 325 | if (now - expires >= psi_period) |
eb414681 JW |
326 | missed_periods = div_u64(now - expires, psi_period); |
327 | ||
328 | /* | |
329 | * The periodic clock tick can get delayed for various | |
330 | * reasons, especially on loaded systems. To avoid clock | |
331 | * drift, we schedule the clock in fixed psi_period intervals. | |
332 | * But the deltas we sample out of the per-cpu buckets above | |
333 | * are based on the actual time elapsing between clock ticks. | |
334 | */ | |
335 | group->next_update = expires + ((1 + missed_periods) * psi_period); | |
336 | period = now - (group->last_update + (missed_periods * psi_period)); | |
337 | group->last_update = now; | |
338 | ||
339 | for (s = 0; s < NR_PSI_STATES - 1; s++) { | |
340 | u32 sample; | |
341 | ||
342 | sample = group->total[s] - group->total_prev[s]; | |
343 | /* | |
344 | * Due to the lockless sampling of the time buckets, | |
345 | * recorded time deltas can slip into the next period, | |
346 | * which under full pressure can result in samples in | |
347 | * excess of the period length. | |
348 | * | |
349 | * We don't want to report non-sensical pressures in | |
350 | * excess of 100%, nor do we want to drop such events | |
351 | * on the floor. Instead we punt any overage into the | |
352 | * future until pressure subsides. By doing this we | |
353 | * don't underreport the occurring pressure curve, we | |
354 | * just report it delayed by one period length. | |
355 | * | |
356 | * The error isn't cumulative. As soon as another | |
357 | * delta slips from a period P to P+1, by definition | |
358 | * it frees up its time T in P. | |
359 | */ | |
360 | if (sample > period) | |
361 | sample = period; | |
362 | group->total_prev[s] += sample; | |
363 | calc_avgs(group->avg[s], missed_periods, sample, period); | |
364 | } | |
365 | out: | |
366 | mutex_unlock(&group->stat_lock); | |
367 | return nonidle_total; | |
368 | } | |
369 | ||
370 | static void psi_update_work(struct work_struct *work) | |
371 | { | |
372 | struct delayed_work *dwork; | |
373 | struct psi_group *group; | |
374 | bool nonidle; | |
375 | ||
376 | dwork = to_delayed_work(work); | |
377 | group = container_of(dwork, struct psi_group, clock_work); | |
378 | ||
379 | /* | |
380 | * If there is task activity, periodically fold the per-cpu | |
381 | * times and feed samples into the running averages. If things | |
382 | * are idle and there is no data to process, stop the clock. | |
383 | * Once restarted, we'll catch up the running averages in one | |
384 | * go - see calc_avgs() and missed_periods. | |
385 | */ | |
386 | ||
387 | nonidle = update_stats(group); | |
388 | ||
389 | if (nonidle) { | |
390 | unsigned long delay = 0; | |
391 | u64 now; | |
392 | ||
393 | now = sched_clock(); | |
394 | if (group->next_update > now) | |
395 | delay = nsecs_to_jiffies(group->next_update - now) + 1; | |
396 | schedule_delayed_work(dwork, delay); | |
397 | } | |
398 | } | |
399 | ||
400 | static void record_times(struct psi_group_cpu *groupc, int cpu, | |
401 | bool memstall_tick) | |
402 | { | |
403 | u32 delta; | |
404 | u64 now; | |
405 | ||
406 | now = cpu_clock(cpu); | |
407 | delta = now - groupc->state_start; | |
408 | groupc->state_start = now; | |
409 | ||
410 | if (test_state(groupc->tasks, PSI_IO_SOME)) { | |
411 | groupc->times[PSI_IO_SOME] += delta; | |
412 | if (test_state(groupc->tasks, PSI_IO_FULL)) | |
413 | groupc->times[PSI_IO_FULL] += delta; | |
414 | } | |
415 | ||
416 | if (test_state(groupc->tasks, PSI_MEM_SOME)) { | |
417 | groupc->times[PSI_MEM_SOME] += delta; | |
418 | if (test_state(groupc->tasks, PSI_MEM_FULL)) | |
419 | groupc->times[PSI_MEM_FULL] += delta; | |
420 | else if (memstall_tick) { | |
421 | u32 sample; | |
422 | /* | |
423 | * Since we care about lost potential, a | |
424 | * memstall is FULL when there are no other | |
425 | * working tasks, but also when the CPU is | |
426 | * actively reclaiming and nothing productive | |
427 | * could run even if it were runnable. | |
428 | * | |
429 | * When the timer tick sees a reclaiming CPU, | |
430 | * regardless of runnable tasks, sample a FULL | |
431 | * tick (or less if it hasn't been a full tick | |
432 | * since the last state change). | |
433 | */ | |
434 | sample = min(delta, (u32)jiffies_to_nsecs(1)); | |
435 | groupc->times[PSI_MEM_FULL] += sample; | |
436 | } | |
437 | } | |
438 | ||
439 | if (test_state(groupc->tasks, PSI_CPU_SOME)) | |
440 | groupc->times[PSI_CPU_SOME] += delta; | |
441 | ||
442 | if (test_state(groupc->tasks, PSI_NONIDLE)) | |
443 | groupc->times[PSI_NONIDLE] += delta; | |
444 | } | |
445 | ||
446 | static void psi_group_change(struct psi_group *group, int cpu, | |
447 | unsigned int clear, unsigned int set) | |
448 | { | |
449 | struct psi_group_cpu *groupc; | |
450 | unsigned int t, m; | |
451 | ||
452 | groupc = per_cpu_ptr(group->pcpu, cpu); | |
453 | ||
454 | /* | |
455 | * First we assess the aggregate resource states this CPU's | |
456 | * tasks have been in since the last change, and account any | |
457 | * SOME and FULL time these may have resulted in. | |
458 | * | |
459 | * Then we update the task counts according to the state | |
460 | * change requested through the @clear and @set bits. | |
461 | */ | |
462 | write_seqcount_begin(&groupc->seq); | |
463 | ||
464 | record_times(groupc, cpu, false); | |
465 | ||
466 | for (t = 0, m = clear; m; m &= ~(1 << t), t++) { | |
467 | if (!(m & (1 << t))) | |
468 | continue; | |
469 | if (groupc->tasks[t] == 0 && !psi_bug) { | |
470 | printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u] clear=%x set=%x\n", | |
471 | cpu, t, groupc->tasks[0], | |
472 | groupc->tasks[1], groupc->tasks[2], | |
473 | clear, set); | |
474 | psi_bug = 1; | |
475 | } | |
476 | groupc->tasks[t]--; | |
477 | } | |
478 | ||
479 | for (t = 0; set; set &= ~(1 << t), t++) | |
480 | if (set & (1 << t)) | |
481 | groupc->tasks[t]++; | |
482 | ||
483 | write_seqcount_end(&groupc->seq); | |
eb414681 JW |
484 | } |
485 | ||
2ce7135a JW |
486 | static struct psi_group *iterate_groups(struct task_struct *task, void **iter) |
487 | { | |
488 | #ifdef CONFIG_CGROUPS | |
489 | struct cgroup *cgroup = NULL; | |
490 | ||
491 | if (!*iter) | |
492 | cgroup = task->cgroups->dfl_cgrp; | |
493 | else if (*iter == &psi_system) | |
494 | return NULL; | |
495 | else | |
496 | cgroup = cgroup_parent(*iter); | |
497 | ||
498 | if (cgroup && cgroup_parent(cgroup)) { | |
499 | *iter = cgroup; | |
500 | return cgroup_psi(cgroup); | |
501 | } | |
502 | #else | |
503 | if (*iter) | |
504 | return NULL; | |
505 | #endif | |
506 | *iter = &psi_system; | |
507 | return &psi_system; | |
508 | } | |
509 | ||
eb414681 JW |
510 | void psi_task_change(struct task_struct *task, int clear, int set) |
511 | { | |
512 | int cpu = task_cpu(task); | |
2ce7135a | 513 | struct psi_group *group; |
1b69ac6b | 514 | bool wake_clock = true; |
2ce7135a | 515 | void *iter = NULL; |
eb414681 JW |
516 | |
517 | if (!task->pid) | |
518 | return; | |
519 | ||
520 | if (((task->psi_flags & set) || | |
521 | (task->psi_flags & clear) != clear) && | |
522 | !psi_bug) { | |
523 | printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n", | |
524 | task->pid, task->comm, cpu, | |
525 | task->psi_flags, clear, set); | |
526 | psi_bug = 1; | |
527 | } | |
528 | ||
529 | task->psi_flags &= ~clear; | |
530 | task->psi_flags |= set; | |
531 | ||
1b69ac6b JW |
532 | /* |
533 | * Periodic aggregation shuts off if there is a period of no | |
534 | * task changes, so we wake it back up if necessary. However, | |
535 | * don't do this if the task change is the aggregation worker | |
536 | * itself going to sleep, or we'll ping-pong forever. | |
537 | */ | |
538 | if (unlikely((clear & TSK_RUNNING) && | |
539 | (task->flags & PF_WQ_WORKER) && | |
540 | wq_worker_last_func(task) == psi_update_work)) | |
541 | wake_clock = false; | |
542 | ||
543 | while ((group = iterate_groups(task, &iter))) { | |
2ce7135a | 544 | psi_group_change(group, cpu, clear, set); |
1b69ac6b JW |
545 | if (wake_clock && !delayed_work_pending(&group->clock_work)) |
546 | schedule_delayed_work(&group->clock_work, PSI_FREQ); | |
547 | } | |
eb414681 JW |
548 | } |
549 | ||
550 | void psi_memstall_tick(struct task_struct *task, int cpu) | |
551 | { | |
2ce7135a JW |
552 | struct psi_group *group; |
553 | void *iter = NULL; | |
eb414681 | 554 | |
2ce7135a JW |
555 | while ((group = iterate_groups(task, &iter))) { |
556 | struct psi_group_cpu *groupc; | |
557 | ||
558 | groupc = per_cpu_ptr(group->pcpu, cpu); | |
559 | write_seqcount_begin(&groupc->seq); | |
560 | record_times(groupc, cpu, true); | |
561 | write_seqcount_end(&groupc->seq); | |
562 | } | |
eb414681 JW |
563 | } |
564 | ||
565 | /** | |
566 | * psi_memstall_enter - mark the beginning of a memory stall section | |
567 | * @flags: flags to handle nested sections | |
568 | * | |
569 | * Marks the calling task as being stalled due to a lack of memory, | |
570 | * such as waiting for a refault or performing reclaim. | |
571 | */ | |
572 | void psi_memstall_enter(unsigned long *flags) | |
573 | { | |
574 | struct rq_flags rf; | |
575 | struct rq *rq; | |
576 | ||
e0c27447 | 577 | if (static_branch_likely(&psi_disabled)) |
eb414681 JW |
578 | return; |
579 | ||
580 | *flags = current->flags & PF_MEMSTALL; | |
581 | if (*flags) | |
582 | return; | |
583 | /* | |
584 | * PF_MEMSTALL setting & accounting needs to be atomic wrt | |
585 | * changes to the task's scheduling state, otherwise we can | |
586 | * race with CPU migration. | |
587 | */ | |
588 | rq = this_rq_lock_irq(&rf); | |
589 | ||
590 | current->flags |= PF_MEMSTALL; | |
591 | psi_task_change(current, 0, TSK_MEMSTALL); | |
592 | ||
593 | rq_unlock_irq(rq, &rf); | |
594 | } | |
595 | ||
596 | /** | |
597 | * psi_memstall_leave - mark the end of an memory stall section | |
598 | * @flags: flags to handle nested memdelay sections | |
599 | * | |
600 | * Marks the calling task as no longer stalled due to lack of memory. | |
601 | */ | |
602 | void psi_memstall_leave(unsigned long *flags) | |
603 | { | |
604 | struct rq_flags rf; | |
605 | struct rq *rq; | |
606 | ||
e0c27447 | 607 | if (static_branch_likely(&psi_disabled)) |
eb414681 JW |
608 | return; |
609 | ||
610 | if (*flags) | |
611 | return; | |
612 | /* | |
613 | * PF_MEMSTALL clearing & accounting needs to be atomic wrt | |
614 | * changes to the task's scheduling state, otherwise we could | |
615 | * race with CPU migration. | |
616 | */ | |
617 | rq = this_rq_lock_irq(&rf); | |
618 | ||
619 | current->flags &= ~PF_MEMSTALL; | |
620 | psi_task_change(current, TSK_MEMSTALL, 0); | |
621 | ||
622 | rq_unlock_irq(rq, &rf); | |
623 | } | |
624 | ||
2ce7135a JW |
625 | #ifdef CONFIG_CGROUPS |
626 | int psi_cgroup_alloc(struct cgroup *cgroup) | |
627 | { | |
e0c27447 | 628 | if (static_branch_likely(&psi_disabled)) |
2ce7135a JW |
629 | return 0; |
630 | ||
631 | cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu); | |
632 | if (!cgroup->psi.pcpu) | |
633 | return -ENOMEM; | |
634 | group_init(&cgroup->psi); | |
635 | return 0; | |
636 | } | |
637 | ||
638 | void psi_cgroup_free(struct cgroup *cgroup) | |
639 | { | |
e0c27447 | 640 | if (static_branch_likely(&psi_disabled)) |
2ce7135a JW |
641 | return; |
642 | ||
643 | cancel_delayed_work_sync(&cgroup->psi.clock_work); | |
644 | free_percpu(cgroup->psi.pcpu); | |
645 | } | |
646 | ||
647 | /** | |
648 | * cgroup_move_task - move task to a different cgroup | |
649 | * @task: the task | |
650 | * @to: the target css_set | |
651 | * | |
652 | * Move task to a new cgroup and safely migrate its associated stall | |
653 | * state between the different groups. | |
654 | * | |
655 | * This function acquires the task's rq lock to lock out concurrent | |
656 | * changes to the task's scheduling state and - in case the task is | |
657 | * running - concurrent changes to its stall state. | |
658 | */ | |
659 | void cgroup_move_task(struct task_struct *task, struct css_set *to) | |
660 | { | |
2ce7135a JW |
661 | unsigned int task_flags = 0; |
662 | struct rq_flags rf; | |
663 | struct rq *rq; | |
664 | ||
e0c27447 | 665 | if (static_branch_likely(&psi_disabled)) { |
8fcb2312 OJ |
666 | /* |
667 | * Lame to do this here, but the scheduler cannot be locked | |
668 | * from the outside, so we move cgroups from inside sched/. | |
669 | */ | |
670 | rcu_assign_pointer(task->cgroups, to); | |
671 | return; | |
672 | } | |
2ce7135a | 673 | |
8fcb2312 | 674 | rq = task_rq_lock(task, &rf); |
2ce7135a | 675 | |
8fcb2312 OJ |
676 | if (task_on_rq_queued(task)) |
677 | task_flags = TSK_RUNNING; | |
678 | else if (task->in_iowait) | |
679 | task_flags = TSK_IOWAIT; | |
2ce7135a | 680 | |
8fcb2312 OJ |
681 | if (task->flags & PF_MEMSTALL) |
682 | task_flags |= TSK_MEMSTALL; | |
2ce7135a | 683 | |
8fcb2312 OJ |
684 | if (task_flags) |
685 | psi_task_change(task, task_flags, 0); | |
686 | ||
687 | /* See comment above */ | |
2ce7135a JW |
688 | rcu_assign_pointer(task->cgroups, to); |
689 | ||
8fcb2312 OJ |
690 | if (task_flags) |
691 | psi_task_change(task, 0, task_flags); | |
2ce7135a | 692 | |
8fcb2312 | 693 | task_rq_unlock(rq, task, &rf); |
2ce7135a JW |
694 | } |
695 | #endif /* CONFIG_CGROUPS */ | |
696 | ||
697 | int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res) | |
eb414681 JW |
698 | { |
699 | int full; | |
700 | ||
e0c27447 | 701 | if (static_branch_likely(&psi_disabled)) |
eb414681 JW |
702 | return -EOPNOTSUPP; |
703 | ||
704 | update_stats(group); | |
705 | ||
706 | for (full = 0; full < 2 - (res == PSI_CPU); full++) { | |
707 | unsigned long avg[3]; | |
708 | u64 total; | |
709 | int w; | |
710 | ||
711 | for (w = 0; w < 3; w++) | |
712 | avg[w] = group->avg[res * 2 + full][w]; | |
713 | total = div_u64(group->total[res * 2 + full], NSEC_PER_USEC); | |
714 | ||
715 | seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n", | |
716 | full ? "full" : "some", | |
717 | LOAD_INT(avg[0]), LOAD_FRAC(avg[0]), | |
718 | LOAD_INT(avg[1]), LOAD_FRAC(avg[1]), | |
719 | LOAD_INT(avg[2]), LOAD_FRAC(avg[2]), | |
720 | total); | |
721 | } | |
722 | ||
723 | return 0; | |
724 | } | |
725 | ||
726 | static int psi_io_show(struct seq_file *m, void *v) | |
727 | { | |
728 | return psi_show(m, &psi_system, PSI_IO); | |
729 | } | |
730 | ||
731 | static int psi_memory_show(struct seq_file *m, void *v) | |
732 | { | |
733 | return psi_show(m, &psi_system, PSI_MEM); | |
734 | } | |
735 | ||
736 | static int psi_cpu_show(struct seq_file *m, void *v) | |
737 | { | |
738 | return psi_show(m, &psi_system, PSI_CPU); | |
739 | } | |
740 | ||
741 | static int psi_io_open(struct inode *inode, struct file *file) | |
742 | { | |
743 | return single_open(file, psi_io_show, NULL); | |
744 | } | |
745 | ||
746 | static int psi_memory_open(struct inode *inode, struct file *file) | |
747 | { | |
748 | return single_open(file, psi_memory_show, NULL); | |
749 | } | |
750 | ||
751 | static int psi_cpu_open(struct inode *inode, struct file *file) | |
752 | { | |
753 | return single_open(file, psi_cpu_show, NULL); | |
754 | } | |
755 | ||
756 | static const struct file_operations psi_io_fops = { | |
757 | .open = psi_io_open, | |
758 | .read = seq_read, | |
759 | .llseek = seq_lseek, | |
760 | .release = single_release, | |
761 | }; | |
762 | ||
763 | static const struct file_operations psi_memory_fops = { | |
764 | .open = psi_memory_open, | |
765 | .read = seq_read, | |
766 | .llseek = seq_lseek, | |
767 | .release = single_release, | |
768 | }; | |
769 | ||
770 | static const struct file_operations psi_cpu_fops = { | |
771 | .open = psi_cpu_open, | |
772 | .read = seq_read, | |
773 | .llseek = seq_lseek, | |
774 | .release = single_release, | |
775 | }; | |
776 | ||
777 | static int __init psi_proc_init(void) | |
778 | { | |
779 | proc_mkdir("pressure", NULL); | |
780 | proc_create("pressure/io", 0, NULL, &psi_io_fops); | |
781 | proc_create("pressure/memory", 0, NULL, &psi_memory_fops); | |
782 | proc_create("pressure/cpu", 0, NULL, &psi_cpu_fops); | |
783 | return 0; | |
784 | } | |
785 | module_init(psi_proc_init); |