Commit | Line | Data |
---|---|---|
eb414681 JW |
1 | /* |
2 | * Pressure stall information for CPU, memory and IO | |
3 | * | |
4 | * Copyright (c) 2018 Facebook, Inc. | |
5 | * Author: Johannes Weiner <hannes@cmpxchg.org> | |
6 | * | |
0e94682b SB |
7 | * Polling support by Suren Baghdasaryan <surenb@google.com> |
8 | * Copyright (c) 2018 Google, Inc. | |
9 | * | |
eb414681 JW |
10 | * When CPU, memory and IO are contended, tasks experience delays that |
11 | * reduce throughput and introduce latencies into the workload. Memory | |
12 | * and IO contention, in addition, can cause a full loss of forward | |
13 | * progress in which the CPU goes idle. | |
14 | * | |
15 | * This code aggregates individual task delays into resource pressure | |
16 | * metrics that indicate problems with both workload health and | |
17 | * resource utilization. | |
18 | * | |
19 | * Model | |
20 | * | |
21 | * The time in which a task can execute on a CPU is our baseline for | |
22 | * productivity. Pressure expresses the amount of time in which this | |
23 | * potential cannot be realized due to resource contention. | |
24 | * | |
25 | * This concept of productivity has two components: the workload and | |
26 | * the CPU. To measure the impact of pressure on both, we define two | |
27 | * contention states for a resource: SOME and FULL. | |
28 | * | |
29 | * In the SOME state of a given resource, one or more tasks are | |
30 | * delayed on that resource. This affects the workload's ability to | |
31 | * perform work, but the CPU may still be executing other tasks. | |
32 | * | |
33 | * In the FULL state of a given resource, all non-idle tasks are | |
34 | * delayed on that resource such that nobody is advancing and the CPU | |
35 | * goes idle. This leaves both workload and CPU unproductive. | |
36 | * | |
e7fcd762 CZ |
37 | * Naturally, the FULL state doesn't exist for the CPU resource at the |
38 | * system level, but exist at the cgroup level, means all non-idle tasks | |
39 | * in a cgroup are delayed on the CPU resource which used by others outside | |
40 | * of the cgroup or throttled by the cgroup cpu.max configuration. | |
eb414681 JW |
41 | * |
42 | * SOME = nr_delayed_tasks != 0 | |
43 | * FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0 | |
44 | * | |
45 | * The percentage of wallclock time spent in those compound stall | |
46 | * states gives pressure numbers between 0 and 100 for each resource, | |
47 | * where the SOME percentage indicates workload slowdowns and the FULL | |
48 | * percentage indicates reduced CPU utilization: | |
49 | * | |
50 | * %SOME = time(SOME) / period | |
51 | * %FULL = time(FULL) / period | |
52 | * | |
53 | * Multiple CPUs | |
54 | * | |
55 | * The more tasks and available CPUs there are, the more work can be | |
56 | * performed concurrently. This means that the potential that can go | |
57 | * unrealized due to resource contention *also* scales with non-idle | |
58 | * tasks and CPUs. | |
59 | * | |
60 | * Consider a scenario where 257 number crunching tasks are trying to | |
61 | * run concurrently on 256 CPUs. If we simply aggregated the task | |
62 | * states, we would have to conclude a CPU SOME pressure number of | |
63 | * 100%, since *somebody* is waiting on a runqueue at all | |
64 | * times. However, that is clearly not the amount of contention the | |
3b03706f | 65 | * workload is experiencing: only one out of 256 possible execution |
eb414681 JW |
66 | * threads will be contended at any given time, or about 0.4%. |
67 | * | |
68 | * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any | |
69 | * given time *one* of the tasks is delayed due to a lack of memory. | |
70 | * Again, looking purely at the task state would yield a memory FULL | |
71 | * pressure number of 0%, since *somebody* is always making forward | |
72 | * progress. But again this wouldn't capture the amount of execution | |
73 | * potential lost, which is 1 out of 4 CPUs, or 25%. | |
74 | * | |
75 | * To calculate wasted potential (pressure) with multiple processors, | |
76 | * we have to base our calculation on the number of non-idle tasks in | |
77 | * conjunction with the number of available CPUs, which is the number | |
78 | * of potential execution threads. SOME becomes then the proportion of | |
3b03706f | 79 | * delayed tasks to possible threads, and FULL is the share of possible |
eb414681 JW |
80 | * threads that are unproductive due to delays: |
81 | * | |
82 | * threads = min(nr_nonidle_tasks, nr_cpus) | |
83 | * SOME = min(nr_delayed_tasks / threads, 1) | |
84 | * FULL = (threads - min(nr_running_tasks, threads)) / threads | |
85 | * | |
86 | * For the 257 number crunchers on 256 CPUs, this yields: | |
87 | * | |
88 | * threads = min(257, 256) | |
89 | * SOME = min(1 / 256, 1) = 0.4% | |
90 | * FULL = (256 - min(257, 256)) / 256 = 0% | |
91 | * | |
92 | * For the 1 out of 4 memory-delayed tasks, this yields: | |
93 | * | |
94 | * threads = min(4, 4) | |
95 | * SOME = min(1 / 4, 1) = 25% | |
96 | * FULL = (4 - min(3, 4)) / 4 = 25% | |
97 | * | |
98 | * [ Substitute nr_cpus with 1, and you can see that it's a natural | |
99 | * extension of the single-CPU model. ] | |
100 | * | |
101 | * Implementation | |
102 | * | |
103 | * To assess the precise time spent in each such state, we would have | |
104 | * to freeze the system on task changes and start/stop the state | |
105 | * clocks accordingly. Obviously that doesn't scale in practice. | |
106 | * | |
107 | * Because the scheduler aims to distribute the compute load evenly | |
108 | * among the available CPUs, we can track task state locally to each | |
109 | * CPU and, at much lower frequency, extrapolate the global state for | |
110 | * the cumulative stall times and the running averages. | |
111 | * | |
112 | * For each runqueue, we track: | |
113 | * | |
114 | * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0) | |
115 | * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu]) | |
116 | * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0) | |
117 | * | |
118 | * and then periodically aggregate: | |
119 | * | |
120 | * tNONIDLE = sum(tNONIDLE[i]) | |
121 | * | |
122 | * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE | |
123 | * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE | |
124 | * | |
125 | * %SOME = tSOME / period | |
126 | * %FULL = tFULL / period | |
127 | * | |
128 | * This gives us an approximation of pressure that is practical | |
129 | * cost-wise, yet way more sensitive and accurate than periodic | |
130 | * sampling of the aggregate task states would be. | |
131 | */ | |
132 | ||
1b69ac6b | 133 | #include "../workqueue_internal.h" |
eb414681 JW |
134 | #include <linux/sched/loadavg.h> |
135 | #include <linux/seq_file.h> | |
136 | #include <linux/proc_fs.h> | |
137 | #include <linux/seqlock.h> | |
0e94682b | 138 | #include <linux/uaccess.h> |
eb414681 JW |
139 | #include <linux/cgroup.h> |
140 | #include <linux/module.h> | |
141 | #include <linux/sched.h> | |
0e94682b SB |
142 | #include <linux/ctype.h> |
143 | #include <linux/file.h> | |
144 | #include <linux/poll.h> | |
eb414681 JW |
145 | #include <linux/psi.h> |
146 | #include "sched.h" | |
147 | ||
148 | static int psi_bug __read_mostly; | |
149 | ||
e0c27447 JW |
150 | DEFINE_STATIC_KEY_FALSE(psi_disabled); |
151 | ||
152 | #ifdef CONFIG_PSI_DEFAULT_DISABLED | |
9289c5e6 | 153 | static bool psi_enable; |
e0c27447 | 154 | #else |
9289c5e6 | 155 | static bool psi_enable = true; |
e0c27447 JW |
156 | #endif |
157 | static int __init setup_psi(char *str) | |
158 | { | |
159 | return kstrtobool(str, &psi_enable) == 0; | |
160 | } | |
161 | __setup("psi=", setup_psi); | |
eb414681 JW |
162 | |
163 | /* Running averages - we need to be higher-res than loadavg */ | |
164 | #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */ | |
165 | #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */ | |
166 | #define EXP_60s 1981 /* 1/exp(2s/60s) */ | |
167 | #define EXP_300s 2034 /* 1/exp(2s/300s) */ | |
168 | ||
0e94682b SB |
169 | /* PSI trigger definitions */ |
170 | #define WINDOW_MIN_US 500000 /* Min window size is 500ms */ | |
171 | #define WINDOW_MAX_US 10000000 /* Max window size is 10s */ | |
172 | #define UPDATES_PER_WINDOW 10 /* 10 updates per window */ | |
173 | ||
eb414681 JW |
174 | /* Sampling frequency in nanoseconds */ |
175 | static u64 psi_period __read_mostly; | |
176 | ||
177 | /* System-level pressure and stall tracking */ | |
178 | static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu); | |
df5ba5be | 179 | struct psi_group psi_system = { |
eb414681 JW |
180 | .pcpu = &system_group_pcpu, |
181 | }; | |
182 | ||
bcc78db6 | 183 | static void psi_avgs_work(struct work_struct *work); |
eb414681 JW |
184 | |
185 | static void group_init(struct psi_group *group) | |
186 | { | |
187 | int cpu; | |
188 | ||
189 | for_each_possible_cpu(cpu) | |
190 | seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq); | |
3dfbe25c JW |
191 | group->avg_last_update = sched_clock(); |
192 | group->avg_next_update = group->avg_last_update + psi_period; | |
bcc78db6 SB |
193 | INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work); |
194 | mutex_init(&group->avgs_lock); | |
0e94682b | 195 | /* Init trigger-related members */ |
0e94682b SB |
196 | mutex_init(&group->trigger_lock); |
197 | INIT_LIST_HEAD(&group->triggers); | |
198 | memset(group->nr_triggers, 0, sizeof(group->nr_triggers)); | |
199 | group->poll_states = 0; | |
200 | group->poll_min_period = U32_MAX; | |
201 | memset(group->polling_total, 0, sizeof(group->polling_total)); | |
202 | group->polling_next_update = ULLONG_MAX; | |
203 | group->polling_until = 0; | |
461daba0 | 204 | rcu_assign_pointer(group->poll_task, NULL); |
eb414681 JW |
205 | } |
206 | ||
207 | void __init psi_init(void) | |
208 | { | |
e0c27447 JW |
209 | if (!psi_enable) { |
210 | static_branch_enable(&psi_disabled); | |
eb414681 | 211 | return; |
e0c27447 | 212 | } |
eb414681 JW |
213 | |
214 | psi_period = jiffies_to_nsecs(PSI_FREQ); | |
215 | group_init(&psi_system); | |
216 | } | |
217 | ||
218 | static bool test_state(unsigned int *tasks, enum psi_states state) | |
219 | { | |
220 | switch (state) { | |
221 | case PSI_IO_SOME: | |
fddc8bab | 222 | return unlikely(tasks[NR_IOWAIT]); |
eb414681 | 223 | case PSI_IO_FULL: |
fddc8bab | 224 | return unlikely(tasks[NR_IOWAIT] && !tasks[NR_RUNNING]); |
eb414681 | 225 | case PSI_MEM_SOME: |
fddc8bab | 226 | return unlikely(tasks[NR_MEMSTALL]); |
eb414681 | 227 | case PSI_MEM_FULL: |
fddc8bab | 228 | return unlikely(tasks[NR_MEMSTALL] && !tasks[NR_RUNNING]); |
eb414681 | 229 | case PSI_CPU_SOME: |
fddc8bab | 230 | return unlikely(tasks[NR_RUNNING] > tasks[NR_ONCPU]); |
e7fcd762 | 231 | case PSI_CPU_FULL: |
fddc8bab | 232 | return unlikely(tasks[NR_RUNNING] && !tasks[NR_ONCPU]); |
eb414681 JW |
233 | case PSI_NONIDLE: |
234 | return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || | |
235 | tasks[NR_RUNNING]; | |
236 | default: | |
237 | return false; | |
238 | } | |
239 | } | |
240 | ||
0e94682b SB |
241 | static void get_recent_times(struct psi_group *group, int cpu, |
242 | enum psi_aggregators aggregator, u32 *times, | |
333f3017 | 243 | u32 *pchanged_states) |
eb414681 JW |
244 | { |
245 | struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu); | |
eb414681 | 246 | u64 now, state_start; |
33b2d630 | 247 | enum psi_states s; |
eb414681 | 248 | unsigned int seq; |
33b2d630 | 249 | u32 state_mask; |
eb414681 | 250 | |
333f3017 SB |
251 | *pchanged_states = 0; |
252 | ||
eb414681 JW |
253 | /* Snapshot a coherent view of the CPU state */ |
254 | do { | |
255 | seq = read_seqcount_begin(&groupc->seq); | |
256 | now = cpu_clock(cpu); | |
257 | memcpy(times, groupc->times, sizeof(groupc->times)); | |
33b2d630 | 258 | state_mask = groupc->state_mask; |
eb414681 JW |
259 | state_start = groupc->state_start; |
260 | } while (read_seqcount_retry(&groupc->seq, seq)); | |
261 | ||
262 | /* Calculate state time deltas against the previous snapshot */ | |
263 | for (s = 0; s < NR_PSI_STATES; s++) { | |
264 | u32 delta; | |
265 | /* | |
266 | * In addition to already concluded states, we also | |
267 | * incorporate currently active states on the CPU, | |
268 | * since states may last for many sampling periods. | |
269 | * | |
270 | * This way we keep our delta sampling buckets small | |
271 | * (u32) and our reported pressure close to what's | |
272 | * actually happening. | |
273 | */ | |
33b2d630 | 274 | if (state_mask & (1 << s)) |
eb414681 JW |
275 | times[s] += now - state_start; |
276 | ||
0e94682b SB |
277 | delta = times[s] - groupc->times_prev[aggregator][s]; |
278 | groupc->times_prev[aggregator][s] = times[s]; | |
eb414681 JW |
279 | |
280 | times[s] = delta; | |
333f3017 SB |
281 | if (delta) |
282 | *pchanged_states |= (1 << s); | |
eb414681 JW |
283 | } |
284 | } | |
285 | ||
286 | static void calc_avgs(unsigned long avg[3], int missed_periods, | |
287 | u64 time, u64 period) | |
288 | { | |
289 | unsigned long pct; | |
290 | ||
291 | /* Fill in zeroes for periods of no activity */ | |
292 | if (missed_periods) { | |
293 | avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods); | |
294 | avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods); | |
295 | avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods); | |
296 | } | |
297 | ||
298 | /* Sample the most recent active period */ | |
299 | pct = div_u64(time * 100, period); | |
300 | pct *= FIXED_1; | |
301 | avg[0] = calc_load(avg[0], EXP_10s, pct); | |
302 | avg[1] = calc_load(avg[1], EXP_60s, pct); | |
303 | avg[2] = calc_load(avg[2], EXP_300s, pct); | |
304 | } | |
305 | ||
0e94682b SB |
306 | static void collect_percpu_times(struct psi_group *group, |
307 | enum psi_aggregators aggregator, | |
308 | u32 *pchanged_states) | |
eb414681 JW |
309 | { |
310 | u64 deltas[NR_PSI_STATES - 1] = { 0, }; | |
eb414681 | 311 | unsigned long nonidle_total = 0; |
333f3017 | 312 | u32 changed_states = 0; |
eb414681 JW |
313 | int cpu; |
314 | int s; | |
315 | ||
eb414681 JW |
316 | /* |
317 | * Collect the per-cpu time buckets and average them into a | |
318 | * single time sample that is normalized to wallclock time. | |
319 | * | |
320 | * For averaging, each CPU is weighted by its non-idle time in | |
321 | * the sampling period. This eliminates artifacts from uneven | |
322 | * loading, or even entirely idle CPUs. | |
323 | */ | |
324 | for_each_possible_cpu(cpu) { | |
325 | u32 times[NR_PSI_STATES]; | |
326 | u32 nonidle; | |
333f3017 | 327 | u32 cpu_changed_states; |
eb414681 | 328 | |
0e94682b | 329 | get_recent_times(group, cpu, aggregator, times, |
333f3017 SB |
330 | &cpu_changed_states); |
331 | changed_states |= cpu_changed_states; | |
eb414681 JW |
332 | |
333 | nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]); | |
334 | nonidle_total += nonidle; | |
335 | ||
336 | for (s = 0; s < PSI_NONIDLE; s++) | |
337 | deltas[s] += (u64)times[s] * nonidle; | |
338 | } | |
339 | ||
340 | /* | |
341 | * Integrate the sample into the running statistics that are | |
342 | * reported to userspace: the cumulative stall times and the | |
343 | * decaying averages. | |
344 | * | |
345 | * Pressure percentages are sampled at PSI_FREQ. We might be | |
346 | * called more often when the user polls more frequently than | |
347 | * that; we might be called less often when there is no task | |
348 | * activity, thus no data, and clock ticks are sporadic. The | |
349 | * below handles both. | |
350 | */ | |
351 | ||
352 | /* total= */ | |
353 | for (s = 0; s < NR_PSI_STATES - 1; s++) | |
0e94682b SB |
354 | group->total[aggregator][s] += |
355 | div_u64(deltas[s], max(nonidle_total, 1UL)); | |
eb414681 | 356 | |
333f3017 SB |
357 | if (pchanged_states) |
358 | *pchanged_states = changed_states; | |
7fc70a39 SB |
359 | } |
360 | ||
361 | static u64 update_averages(struct psi_group *group, u64 now) | |
362 | { | |
363 | unsigned long missed_periods = 0; | |
364 | u64 expires, period; | |
365 | u64 avg_next_update; | |
366 | int s; | |
367 | ||
eb414681 | 368 | /* avgX= */ |
bcc78db6 | 369 | expires = group->avg_next_update; |
4e37504d | 370 | if (now - expires >= psi_period) |
eb414681 JW |
371 | missed_periods = div_u64(now - expires, psi_period); |
372 | ||
373 | /* | |
374 | * The periodic clock tick can get delayed for various | |
375 | * reasons, especially on loaded systems. To avoid clock | |
376 | * drift, we schedule the clock in fixed psi_period intervals. | |
377 | * But the deltas we sample out of the per-cpu buckets above | |
378 | * are based on the actual time elapsing between clock ticks. | |
379 | */ | |
7fc70a39 | 380 | avg_next_update = expires + ((1 + missed_periods) * psi_period); |
bcc78db6 SB |
381 | period = now - (group->avg_last_update + (missed_periods * psi_period)); |
382 | group->avg_last_update = now; | |
eb414681 JW |
383 | |
384 | for (s = 0; s < NR_PSI_STATES - 1; s++) { | |
385 | u32 sample; | |
386 | ||
0e94682b | 387 | sample = group->total[PSI_AVGS][s] - group->avg_total[s]; |
eb414681 JW |
388 | /* |
389 | * Due to the lockless sampling of the time buckets, | |
390 | * recorded time deltas can slip into the next period, | |
391 | * which under full pressure can result in samples in | |
392 | * excess of the period length. | |
393 | * | |
394 | * We don't want to report non-sensical pressures in | |
395 | * excess of 100%, nor do we want to drop such events | |
396 | * on the floor. Instead we punt any overage into the | |
397 | * future until pressure subsides. By doing this we | |
398 | * don't underreport the occurring pressure curve, we | |
399 | * just report it delayed by one period length. | |
400 | * | |
401 | * The error isn't cumulative. As soon as another | |
402 | * delta slips from a period P to P+1, by definition | |
403 | * it frees up its time T in P. | |
404 | */ | |
405 | if (sample > period) | |
406 | sample = period; | |
bcc78db6 | 407 | group->avg_total[s] += sample; |
eb414681 JW |
408 | calc_avgs(group->avg[s], missed_periods, sample, period); |
409 | } | |
7fc70a39 SB |
410 | |
411 | return avg_next_update; | |
eb414681 JW |
412 | } |
413 | ||
bcc78db6 | 414 | static void psi_avgs_work(struct work_struct *work) |
eb414681 JW |
415 | { |
416 | struct delayed_work *dwork; | |
417 | struct psi_group *group; | |
333f3017 | 418 | u32 changed_states; |
eb414681 | 419 | bool nonidle; |
7fc70a39 | 420 | u64 now; |
eb414681 JW |
421 | |
422 | dwork = to_delayed_work(work); | |
bcc78db6 | 423 | group = container_of(dwork, struct psi_group, avgs_work); |
eb414681 | 424 | |
7fc70a39 SB |
425 | mutex_lock(&group->avgs_lock); |
426 | ||
427 | now = sched_clock(); | |
428 | ||
0e94682b | 429 | collect_percpu_times(group, PSI_AVGS, &changed_states); |
333f3017 | 430 | nonidle = changed_states & (1 << PSI_NONIDLE); |
eb414681 JW |
431 | /* |
432 | * If there is task activity, periodically fold the per-cpu | |
433 | * times and feed samples into the running averages. If things | |
434 | * are idle and there is no data to process, stop the clock. | |
435 | * Once restarted, we'll catch up the running averages in one | |
436 | * go - see calc_avgs() and missed_periods. | |
437 | */ | |
7fc70a39 SB |
438 | if (now >= group->avg_next_update) |
439 | group->avg_next_update = update_averages(group, now); | |
eb414681 JW |
440 | |
441 | if (nonidle) { | |
7fc70a39 SB |
442 | schedule_delayed_work(dwork, nsecs_to_jiffies( |
443 | group->avg_next_update - now) + 1); | |
eb414681 | 444 | } |
7fc70a39 SB |
445 | |
446 | mutex_unlock(&group->avgs_lock); | |
eb414681 JW |
447 | } |
448 | ||
3b03706f | 449 | /* Trigger tracking window manipulations */ |
0e94682b SB |
450 | static void window_reset(struct psi_window *win, u64 now, u64 value, |
451 | u64 prev_growth) | |
452 | { | |
453 | win->start_time = now; | |
454 | win->start_value = value; | |
455 | win->prev_growth = prev_growth; | |
456 | } | |
457 | ||
458 | /* | |
459 | * PSI growth tracking window update and growth calculation routine. | |
460 | * | |
461 | * This approximates a sliding tracking window by interpolating | |
462 | * partially elapsed windows using historical growth data from the | |
463 | * previous intervals. This minimizes memory requirements (by not storing | |
464 | * all the intermediate values in the previous window) and simplifies | |
465 | * the calculations. It works well because PSI signal changes only in | |
466 | * positive direction and over relatively small window sizes the growth | |
467 | * is close to linear. | |
468 | */ | |
469 | static u64 window_update(struct psi_window *win, u64 now, u64 value) | |
470 | { | |
471 | u64 elapsed; | |
472 | u64 growth; | |
473 | ||
474 | elapsed = now - win->start_time; | |
475 | growth = value - win->start_value; | |
476 | /* | |
477 | * After each tracking window passes win->start_value and | |
478 | * win->start_time get reset and win->prev_growth stores | |
479 | * the average per-window growth of the previous window. | |
480 | * win->prev_growth is then used to interpolate additional | |
481 | * growth from the previous window assuming it was linear. | |
482 | */ | |
483 | if (elapsed > win->size) | |
484 | window_reset(win, now, value, growth); | |
485 | else { | |
486 | u32 remaining; | |
487 | ||
488 | remaining = win->size - elapsed; | |
c3466952 | 489 | growth += div64_u64(win->prev_growth * remaining, win->size); |
0e94682b SB |
490 | } |
491 | ||
492 | return growth; | |
493 | } | |
494 | ||
495 | static void init_triggers(struct psi_group *group, u64 now) | |
496 | { | |
497 | struct psi_trigger *t; | |
498 | ||
499 | list_for_each_entry(t, &group->triggers, node) | |
500 | window_reset(&t->win, now, | |
501 | group->total[PSI_POLL][t->state], 0); | |
502 | memcpy(group->polling_total, group->total[PSI_POLL], | |
503 | sizeof(group->polling_total)); | |
504 | group->polling_next_update = now + group->poll_min_period; | |
505 | } | |
506 | ||
507 | static u64 update_triggers(struct psi_group *group, u64 now) | |
508 | { | |
509 | struct psi_trigger *t; | |
510 | bool new_stall = false; | |
511 | u64 *total = group->total[PSI_POLL]; | |
512 | ||
513 | /* | |
514 | * On subsequent updates, calculate growth deltas and let | |
515 | * watchers know when their specified thresholds are exceeded. | |
516 | */ | |
517 | list_for_each_entry(t, &group->triggers, node) { | |
518 | u64 growth; | |
519 | ||
520 | /* Check for stall activity */ | |
521 | if (group->polling_total[t->state] == total[t->state]) | |
522 | continue; | |
523 | ||
524 | /* | |
525 | * Multiple triggers might be looking at the same state, | |
526 | * remember to update group->polling_total[] once we've | |
527 | * been through all of them. Also remember to extend the | |
528 | * polling time if we see new stall activity. | |
529 | */ | |
530 | new_stall = true; | |
531 | ||
532 | /* Calculate growth since last update */ | |
533 | growth = window_update(&t->win, now, total[t->state]); | |
534 | if (growth < t->threshold) | |
535 | continue; | |
536 | ||
537 | /* Limit event signaling to once per window */ | |
538 | if (now < t->last_event_time + t->win.size) | |
539 | continue; | |
540 | ||
541 | /* Generate an event */ | |
542 | if (cmpxchg(&t->event, 0, 1) == 0) | |
543 | wake_up_interruptible(&t->event_wait); | |
544 | t->last_event_time = now; | |
545 | } | |
546 | ||
547 | if (new_stall) | |
548 | memcpy(group->polling_total, total, | |
549 | sizeof(group->polling_total)); | |
550 | ||
551 | return now + group->poll_min_period; | |
552 | } | |
553 | ||
461daba0 | 554 | /* Schedule polling if it's not already scheduled. */ |
0e94682b SB |
555 | static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay) |
556 | { | |
461daba0 | 557 | struct task_struct *task; |
0e94682b | 558 | |
461daba0 SB |
559 | /* |
560 | * Do not reschedule if already scheduled. | |
561 | * Possible race with a timer scheduled after this check but before | |
562 | * mod_timer below can be tolerated because group->polling_next_update | |
563 | * will keep updates on schedule. | |
564 | */ | |
565 | if (timer_pending(&group->poll_timer)) | |
0e94682b SB |
566 | return; |
567 | ||
568 | rcu_read_lock(); | |
569 | ||
461daba0 | 570 | task = rcu_dereference(group->poll_task); |
0e94682b SB |
571 | /* |
572 | * kworker might be NULL in case psi_trigger_destroy races with | |
573 | * psi_task_change (hotpath) which can't use locks | |
574 | */ | |
461daba0 SB |
575 | if (likely(task)) |
576 | mod_timer(&group->poll_timer, jiffies + delay); | |
0e94682b SB |
577 | |
578 | rcu_read_unlock(); | |
579 | } | |
580 | ||
461daba0 | 581 | static void psi_poll_work(struct psi_group *group) |
0e94682b | 582 | { |
0e94682b SB |
583 | u32 changed_states; |
584 | u64 now; | |
585 | ||
0e94682b SB |
586 | mutex_lock(&group->trigger_lock); |
587 | ||
588 | now = sched_clock(); | |
589 | ||
590 | collect_percpu_times(group, PSI_POLL, &changed_states); | |
591 | ||
592 | if (changed_states & group->poll_states) { | |
593 | /* Initialize trigger windows when entering polling mode */ | |
594 | if (now > group->polling_until) | |
595 | init_triggers(group, now); | |
596 | ||
597 | /* | |
598 | * Keep the monitor active for at least the duration of the | |
599 | * minimum tracking window as long as monitor states are | |
600 | * changing. | |
601 | */ | |
602 | group->polling_until = now + | |
603 | group->poll_min_period * UPDATES_PER_WINDOW; | |
604 | } | |
605 | ||
606 | if (now > group->polling_until) { | |
607 | group->polling_next_update = ULLONG_MAX; | |
608 | goto out; | |
609 | } | |
610 | ||
611 | if (now >= group->polling_next_update) | |
612 | group->polling_next_update = update_triggers(group, now); | |
613 | ||
614 | psi_schedule_poll_work(group, | |
615 | nsecs_to_jiffies(group->polling_next_update - now) + 1); | |
616 | ||
617 | out: | |
618 | mutex_unlock(&group->trigger_lock); | |
619 | } | |
620 | ||
461daba0 SB |
621 | static int psi_poll_worker(void *data) |
622 | { | |
623 | struct psi_group *group = (struct psi_group *)data; | |
461daba0 | 624 | |
2cca5426 | 625 | sched_set_fifo_low(current); |
461daba0 SB |
626 | |
627 | while (true) { | |
628 | wait_event_interruptible(group->poll_wait, | |
629 | atomic_cmpxchg(&group->poll_wakeup, 1, 0) || | |
630 | kthread_should_stop()); | |
631 | if (kthread_should_stop()) | |
632 | break; | |
633 | ||
634 | psi_poll_work(group); | |
635 | } | |
636 | return 0; | |
637 | } | |
638 | ||
639 | static void poll_timer_fn(struct timer_list *t) | |
640 | { | |
641 | struct psi_group *group = from_timer(group, t, poll_timer); | |
642 | ||
643 | atomic_set(&group->poll_wakeup, 1); | |
644 | wake_up_interruptible(&group->poll_wait); | |
645 | } | |
646 | ||
df774306 | 647 | static void record_times(struct psi_group_cpu *groupc, u64 now) |
eb414681 JW |
648 | { |
649 | u32 delta; | |
eb414681 | 650 | |
eb414681 JW |
651 | delta = now - groupc->state_start; |
652 | groupc->state_start = now; | |
653 | ||
33b2d630 | 654 | if (groupc->state_mask & (1 << PSI_IO_SOME)) { |
eb414681 | 655 | groupc->times[PSI_IO_SOME] += delta; |
33b2d630 | 656 | if (groupc->state_mask & (1 << PSI_IO_FULL)) |
eb414681 JW |
657 | groupc->times[PSI_IO_FULL] += delta; |
658 | } | |
659 | ||
33b2d630 | 660 | if (groupc->state_mask & (1 << PSI_MEM_SOME)) { |
eb414681 | 661 | groupc->times[PSI_MEM_SOME] += delta; |
33b2d630 | 662 | if (groupc->state_mask & (1 << PSI_MEM_FULL)) |
eb414681 | 663 | groupc->times[PSI_MEM_FULL] += delta; |
eb414681 JW |
664 | } |
665 | ||
e7fcd762 | 666 | if (groupc->state_mask & (1 << PSI_CPU_SOME)) { |
eb414681 | 667 | groupc->times[PSI_CPU_SOME] += delta; |
e7fcd762 CZ |
668 | if (groupc->state_mask & (1 << PSI_CPU_FULL)) |
669 | groupc->times[PSI_CPU_FULL] += delta; | |
670 | } | |
eb414681 | 671 | |
33b2d630 | 672 | if (groupc->state_mask & (1 << PSI_NONIDLE)) |
eb414681 JW |
673 | groupc->times[PSI_NONIDLE] += delta; |
674 | } | |
675 | ||
36b238d5 | 676 | static void psi_group_change(struct psi_group *group, int cpu, |
df774306 | 677 | unsigned int clear, unsigned int set, u64 now, |
36b238d5 | 678 | bool wake_clock) |
eb414681 JW |
679 | { |
680 | struct psi_group_cpu *groupc; | |
36b238d5 | 681 | u32 state_mask = 0; |
eb414681 | 682 | unsigned int t, m; |
33b2d630 | 683 | enum psi_states s; |
eb414681 JW |
684 | |
685 | groupc = per_cpu_ptr(group->pcpu, cpu); | |
686 | ||
687 | /* | |
688 | * First we assess the aggregate resource states this CPU's | |
689 | * tasks have been in since the last change, and account any | |
690 | * SOME and FULL time these may have resulted in. | |
691 | * | |
692 | * Then we update the task counts according to the state | |
693 | * change requested through the @clear and @set bits. | |
694 | */ | |
695 | write_seqcount_begin(&groupc->seq); | |
696 | ||
df774306 | 697 | record_times(groupc, now); |
eb414681 JW |
698 | |
699 | for (t = 0, m = clear; m; m &= ~(1 << t), t++) { | |
700 | if (!(m & (1 << t))) | |
701 | continue; | |
9d10a13d CTR |
702 | if (groupc->tasks[t]) { |
703 | groupc->tasks[t]--; | |
704 | } else if (!psi_bug) { | |
b05e75d6 | 705 | printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n", |
eb414681 JW |
706 | cpu, t, groupc->tasks[0], |
707 | groupc->tasks[1], groupc->tasks[2], | |
b05e75d6 | 708 | groupc->tasks[3], clear, set); |
eb414681 JW |
709 | psi_bug = 1; |
710 | } | |
eb414681 JW |
711 | } |
712 | ||
713 | for (t = 0; set; set &= ~(1 << t), t++) | |
714 | if (set & (1 << t)) | |
715 | groupc->tasks[t]++; | |
716 | ||
33b2d630 SB |
717 | /* Calculate state mask representing active states */ |
718 | for (s = 0; s < NR_PSI_STATES; s++) { | |
719 | if (test_state(groupc->tasks, s)) | |
720 | state_mask |= (1 << s); | |
721 | } | |
7fae6c81 CZ |
722 | |
723 | /* | |
724 | * Since we care about lost potential, a memstall is FULL | |
725 | * when there are no other working tasks, but also when | |
726 | * the CPU is actively reclaiming and nothing productive | |
727 | * could run even if it were runnable. So when the current | |
728 | * task in a cgroup is in_memstall, the corresponding groupc | |
729 | * on that cpu is in PSI_MEM_FULL state. | |
730 | */ | |
fddc8bab | 731 | if (unlikely(groupc->tasks[NR_ONCPU] && cpu_curr(cpu)->in_memstall)) |
7fae6c81 CZ |
732 | state_mask |= (1 << PSI_MEM_FULL); |
733 | ||
33b2d630 SB |
734 | groupc->state_mask = state_mask; |
735 | ||
eb414681 | 736 | write_seqcount_end(&groupc->seq); |
0e94682b | 737 | |
36b238d5 JW |
738 | if (state_mask & group->poll_states) |
739 | psi_schedule_poll_work(group, 1); | |
740 | ||
741 | if (wake_clock && !delayed_work_pending(&group->avgs_work)) | |
742 | schedule_delayed_work(&group->avgs_work, PSI_FREQ); | |
eb414681 JW |
743 | } |
744 | ||
2ce7135a JW |
745 | static struct psi_group *iterate_groups(struct task_struct *task, void **iter) |
746 | { | |
747 | #ifdef CONFIG_CGROUPS | |
748 | struct cgroup *cgroup = NULL; | |
749 | ||
750 | if (!*iter) | |
751 | cgroup = task->cgroups->dfl_cgrp; | |
752 | else if (*iter == &psi_system) | |
753 | return NULL; | |
754 | else | |
755 | cgroup = cgroup_parent(*iter); | |
756 | ||
757 | if (cgroup && cgroup_parent(cgroup)) { | |
758 | *iter = cgroup; | |
759 | return cgroup_psi(cgroup); | |
760 | } | |
761 | #else | |
762 | if (*iter) | |
763 | return NULL; | |
764 | #endif | |
765 | *iter = &psi_system; | |
766 | return &psi_system; | |
767 | } | |
768 | ||
36b238d5 | 769 | static void psi_flags_change(struct task_struct *task, int clear, int set) |
eb414681 | 770 | { |
eb414681 JW |
771 | if (((task->psi_flags & set) || |
772 | (task->psi_flags & clear) != clear) && | |
773 | !psi_bug) { | |
774 | printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n", | |
36b238d5 | 775 | task->pid, task->comm, task_cpu(task), |
eb414681 JW |
776 | task->psi_flags, clear, set); |
777 | psi_bug = 1; | |
778 | } | |
779 | ||
780 | task->psi_flags &= ~clear; | |
781 | task->psi_flags |= set; | |
36b238d5 JW |
782 | } |
783 | ||
784 | void psi_task_change(struct task_struct *task, int clear, int set) | |
785 | { | |
786 | int cpu = task_cpu(task); | |
787 | struct psi_group *group; | |
788 | bool wake_clock = true; | |
789 | void *iter = NULL; | |
df774306 | 790 | u64 now; |
36b238d5 JW |
791 | |
792 | if (!task->pid) | |
793 | return; | |
794 | ||
795 | psi_flags_change(task, clear, set); | |
eb414681 | 796 | |
df774306 | 797 | now = cpu_clock(cpu); |
1b69ac6b JW |
798 | /* |
799 | * Periodic aggregation shuts off if there is a period of no | |
800 | * task changes, so we wake it back up if necessary. However, | |
801 | * don't do this if the task change is the aggregation worker | |
802 | * itself going to sleep, or we'll ping-pong forever. | |
803 | */ | |
804 | if (unlikely((clear & TSK_RUNNING) && | |
805 | (task->flags & PF_WQ_WORKER) && | |
bcc78db6 | 806 | wq_worker_last_func(task) == psi_avgs_work)) |
1b69ac6b JW |
807 | wake_clock = false; |
808 | ||
36b238d5 | 809 | while ((group = iterate_groups(task, &iter))) |
df774306 | 810 | psi_group_change(group, cpu, clear, set, now, wake_clock); |
36b238d5 JW |
811 | } |
812 | ||
813 | void psi_task_switch(struct task_struct *prev, struct task_struct *next, | |
814 | bool sleep) | |
815 | { | |
816 | struct psi_group *group, *common = NULL; | |
817 | int cpu = task_cpu(prev); | |
818 | void *iter; | |
df774306 | 819 | u64 now = cpu_clock(cpu); |
36b238d5 JW |
820 | |
821 | if (next->pid) { | |
7fae6c81 CZ |
822 | bool identical_state; |
823 | ||
36b238d5 JW |
824 | psi_flags_change(next, 0, TSK_ONCPU); |
825 | /* | |
7fae6c81 CZ |
826 | * When switching between tasks that have an identical |
827 | * runtime state, the cgroup that contains both tasks | |
828 | * runtime state, the cgroup that contains both tasks | |
829 | * we reach the first common ancestor. Iterate @next's | |
830 | * ancestors only until we encounter @prev's ONCPU. | |
36b238d5 | 831 | */ |
7fae6c81 | 832 | identical_state = prev->psi_flags == next->psi_flags; |
36b238d5 JW |
833 | iter = NULL; |
834 | while ((group = iterate_groups(next, &iter))) { | |
7fae6c81 CZ |
835 | if (identical_state && |
836 | per_cpu_ptr(group->pcpu, cpu)->tasks[NR_ONCPU]) { | |
36b238d5 JW |
837 | common = group; |
838 | break; | |
839 | } | |
840 | ||
df774306 | 841 | psi_group_change(group, cpu, 0, TSK_ONCPU, now, true); |
36b238d5 JW |
842 | } |
843 | } | |
844 | ||
36b238d5 | 845 | if (prev->pid) { |
4117cebf CZ |
846 | int clear = TSK_ONCPU, set = 0; |
847 | ||
848 | /* | |
849 | * When we're going to sleep, psi_dequeue() lets us handle | |
850 | * TSK_RUNNING and TSK_IOWAIT here, where we can combine it | |
851 | * with TSK_ONCPU and save walking common ancestors twice. | |
852 | */ | |
853 | if (sleep) { | |
854 | clear |= TSK_RUNNING; | |
855 | if (prev->in_iowait) | |
856 | set |= TSK_IOWAIT; | |
857 | } | |
858 | ||
859 | psi_flags_change(prev, clear, set); | |
0e94682b | 860 | |
36b238d5 JW |
861 | iter = NULL; |
862 | while ((group = iterate_groups(prev, &iter)) && group != common) | |
df774306 | 863 | psi_group_change(group, cpu, clear, set, now, true); |
4117cebf CZ |
864 | |
865 | /* | |
866 | * TSK_ONCPU is handled up to the common ancestor. If we're tasked | |
867 | * with dequeuing too, finish that for the rest of the hierarchy. | |
868 | */ | |
869 | if (sleep) { | |
870 | clear &= ~TSK_ONCPU; | |
871 | for (; group; group = iterate_groups(prev, &iter)) | |
df774306 | 872 | psi_group_change(group, cpu, clear, set, now, true); |
4117cebf | 873 | } |
1b69ac6b | 874 | } |
eb414681 JW |
875 | } |
876 | ||
eb414681 JW |
877 | /** |
878 | * psi_memstall_enter - mark the beginning of a memory stall section | |
879 | * @flags: flags to handle nested sections | |
880 | * | |
881 | * Marks the calling task as being stalled due to a lack of memory, | |
882 | * such as waiting for a refault or performing reclaim. | |
883 | */ | |
884 | void psi_memstall_enter(unsigned long *flags) | |
885 | { | |
886 | struct rq_flags rf; | |
887 | struct rq *rq; | |
888 | ||
e0c27447 | 889 | if (static_branch_likely(&psi_disabled)) |
eb414681 JW |
890 | return; |
891 | ||
1066d1b6 | 892 | *flags = current->in_memstall; |
eb414681 JW |
893 | if (*flags) |
894 | return; | |
895 | /* | |
1066d1b6 | 896 | * in_memstall setting & accounting needs to be atomic wrt |
eb414681 JW |
897 | * changes to the task's scheduling state, otherwise we can |
898 | * race with CPU migration. | |
899 | */ | |
900 | rq = this_rq_lock_irq(&rf); | |
901 | ||
1066d1b6 | 902 | current->in_memstall = 1; |
eb414681 JW |
903 | psi_task_change(current, 0, TSK_MEMSTALL); |
904 | ||
905 | rq_unlock_irq(rq, &rf); | |
906 | } | |
907 | ||
908 | /** | |
909 | * psi_memstall_leave - mark the end of an memory stall section | |
910 | * @flags: flags to handle nested memdelay sections | |
911 | * | |
912 | * Marks the calling task as no longer stalled due to lack of memory. | |
913 | */ | |
914 | void psi_memstall_leave(unsigned long *flags) | |
915 | { | |
916 | struct rq_flags rf; | |
917 | struct rq *rq; | |
918 | ||
e0c27447 | 919 | if (static_branch_likely(&psi_disabled)) |
eb414681 JW |
920 | return; |
921 | ||
922 | if (*flags) | |
923 | return; | |
924 | /* | |
1066d1b6 | 925 | * in_memstall clearing & accounting needs to be atomic wrt |
eb414681 JW |
926 | * changes to the task's scheduling state, otherwise we could |
927 | * race with CPU migration. | |
928 | */ | |
929 | rq = this_rq_lock_irq(&rf); | |
930 | ||
1066d1b6 | 931 | current->in_memstall = 0; |
eb414681 JW |
932 | psi_task_change(current, TSK_MEMSTALL, 0); |
933 | ||
934 | rq_unlock_irq(rq, &rf); | |
935 | } | |
936 | ||
2ce7135a JW |
937 | #ifdef CONFIG_CGROUPS |
938 | int psi_cgroup_alloc(struct cgroup *cgroup) | |
939 | { | |
e0c27447 | 940 | if (static_branch_likely(&psi_disabled)) |
2ce7135a JW |
941 | return 0; |
942 | ||
943 | cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu); | |
944 | if (!cgroup->psi.pcpu) | |
945 | return -ENOMEM; | |
946 | group_init(&cgroup->psi); | |
947 | return 0; | |
948 | } | |
949 | ||
950 | void psi_cgroup_free(struct cgroup *cgroup) | |
951 | { | |
e0c27447 | 952 | if (static_branch_likely(&psi_disabled)) |
2ce7135a JW |
953 | return; |
954 | ||
bcc78db6 | 955 | cancel_delayed_work_sync(&cgroup->psi.avgs_work); |
2ce7135a | 956 | free_percpu(cgroup->psi.pcpu); |
0e94682b SB |
957 | /* All triggers must be removed by now */ |
958 | WARN_ONCE(cgroup->psi.poll_states, "psi: trigger leak\n"); | |
2ce7135a JW |
959 | } |
960 | ||
961 | /** | |
962 | * cgroup_move_task - move task to a different cgroup | |
963 | * @task: the task | |
964 | * @to: the target css_set | |
965 | * | |
966 | * Move task to a new cgroup and safely migrate its associated stall | |
967 | * state between the different groups. | |
968 | * | |
969 | * This function acquires the task's rq lock to lock out concurrent | |
970 | * changes to the task's scheduling state and - in case the task is | |
971 | * running - concurrent changes to its stall state. | |
972 | */ | |
973 | void cgroup_move_task(struct task_struct *task, struct css_set *to) | |
974 | { | |
d583d360 | 975 | unsigned int task_flags; |
2ce7135a JW |
976 | struct rq_flags rf; |
977 | struct rq *rq; | |
978 | ||
e0c27447 | 979 | if (static_branch_likely(&psi_disabled)) { |
8fcb2312 OJ |
980 | /* |
981 | * Lame to do this here, but the scheduler cannot be locked | |
982 | * from the outside, so we move cgroups from inside sched/. | |
983 | */ | |
984 | rcu_assign_pointer(task->cgroups, to); | |
985 | return; | |
986 | } | |
2ce7135a | 987 | |
8fcb2312 | 988 | rq = task_rq_lock(task, &rf); |
2ce7135a | 989 | |
d583d360 JW |
990 | /* |
991 | * We may race with schedule() dropping the rq lock between | |
992 | * deactivating prev and switching to next. Because the psi | |
993 | * updates from the deactivation are deferred to the switch | |
994 | * callback to save cgroup tree updates, the task's scheduling | |
995 | * state here is not coherent with its psi state: | |
996 | * | |
997 | * schedule() cgroup_move_task() | |
998 | * rq_lock() | |
999 | * deactivate_task() | |
1000 | * p->on_rq = 0 | |
1001 | * psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates | |
1002 | * pick_next_task() | |
1003 | * rq_unlock() | |
1004 | * rq_lock() | |
1005 | * psi_task_change() // old cgroup | |
1006 | * task->cgroups = to | |
1007 | * psi_task_change() // new cgroup | |
1008 | * rq_unlock() | |
1009 | * rq_lock() | |
1010 | * psi_sched_switch() // does deferred updates in new cgroup | |
1011 | * | |
1012 | * Don't rely on the scheduling state. Use psi_flags instead. | |
1013 | */ | |
1014 | task_flags = task->psi_flags; | |
2ce7135a | 1015 | |
8fcb2312 OJ |
1016 | if (task_flags) |
1017 | psi_task_change(task, task_flags, 0); | |
1018 | ||
1019 | /* See comment above */ | |
2ce7135a JW |
1020 | rcu_assign_pointer(task->cgroups, to); |
1021 | ||
8fcb2312 OJ |
1022 | if (task_flags) |
1023 | psi_task_change(task, 0, task_flags); | |
2ce7135a | 1024 | |
8fcb2312 | 1025 | task_rq_unlock(rq, task, &rf); |
2ce7135a JW |
1026 | } |
1027 | #endif /* CONFIG_CGROUPS */ | |
1028 | ||
1029 | int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res) | |
eb414681 JW |
1030 | { |
1031 | int full; | |
7fc70a39 | 1032 | u64 now; |
eb414681 | 1033 | |
e0c27447 | 1034 | if (static_branch_likely(&psi_disabled)) |
eb414681 JW |
1035 | return -EOPNOTSUPP; |
1036 | ||
7fc70a39 SB |
1037 | /* Update averages before reporting them */ |
1038 | mutex_lock(&group->avgs_lock); | |
1039 | now = sched_clock(); | |
0e94682b | 1040 | collect_percpu_times(group, PSI_AVGS, NULL); |
7fc70a39 SB |
1041 | if (now >= group->avg_next_update) |
1042 | group->avg_next_update = update_averages(group, now); | |
1043 | mutex_unlock(&group->avgs_lock); | |
eb414681 | 1044 | |
e7fcd762 | 1045 | for (full = 0; full < 2; full++) { |
eb414681 JW |
1046 | unsigned long avg[3]; |
1047 | u64 total; | |
1048 | int w; | |
1049 | ||
1050 | for (w = 0; w < 3; w++) | |
1051 | avg[w] = group->avg[res * 2 + full][w]; | |
0e94682b SB |
1052 | total = div_u64(group->total[PSI_AVGS][res * 2 + full], |
1053 | NSEC_PER_USEC); | |
eb414681 JW |
1054 | |
1055 | seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n", | |
1056 | full ? "full" : "some", | |
1057 | LOAD_INT(avg[0]), LOAD_FRAC(avg[0]), | |
1058 | LOAD_INT(avg[1]), LOAD_FRAC(avg[1]), | |
1059 | LOAD_INT(avg[2]), LOAD_FRAC(avg[2]), | |
1060 | total); | |
1061 | } | |
1062 | ||
1063 | return 0; | |
1064 | } | |
1065 | ||
1066 | static int psi_io_show(struct seq_file *m, void *v) | |
1067 | { | |
1068 | return psi_show(m, &psi_system, PSI_IO); | |
1069 | } | |
1070 | ||
1071 | static int psi_memory_show(struct seq_file *m, void *v) | |
1072 | { | |
1073 | return psi_show(m, &psi_system, PSI_MEM); | |
1074 | } | |
1075 | ||
1076 | static int psi_cpu_show(struct seq_file *m, void *v) | |
1077 | { | |
1078 | return psi_show(m, &psi_system, PSI_CPU); | |
1079 | } | |
1080 | ||
6db12ee0 JH |
1081 | static int psi_open(struct file *file, int (*psi_show)(struct seq_file *, void *)) |
1082 | { | |
1083 | if (file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE)) | |
1084 | return -EPERM; | |
1085 | ||
1086 | return single_open(file, psi_show, NULL); | |
1087 | } | |
1088 | ||
eb414681 JW |
1089 | static int psi_io_open(struct inode *inode, struct file *file) |
1090 | { | |
6db12ee0 | 1091 | return psi_open(file, psi_io_show); |
eb414681 JW |
1092 | } |
1093 | ||
1094 | static int psi_memory_open(struct inode *inode, struct file *file) | |
1095 | { | |
6db12ee0 | 1096 | return psi_open(file, psi_memory_show); |
eb414681 JW |
1097 | } |
1098 | ||
1099 | static int psi_cpu_open(struct inode *inode, struct file *file) | |
1100 | { | |
6db12ee0 | 1101 | return psi_open(file, psi_cpu_show); |
eb414681 JW |
1102 | } |
1103 | ||
0e94682b SB |
1104 | struct psi_trigger *psi_trigger_create(struct psi_group *group, |
1105 | char *buf, size_t nbytes, enum psi_res res) | |
1106 | { | |
1107 | struct psi_trigger *t; | |
1108 | enum psi_states state; | |
1109 | u32 threshold_us; | |
1110 | u32 window_us; | |
1111 | ||
1112 | if (static_branch_likely(&psi_disabled)) | |
1113 | return ERR_PTR(-EOPNOTSUPP); | |
1114 | ||
1115 | if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2) | |
1116 | state = PSI_IO_SOME + res * 2; | |
1117 | else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2) | |
1118 | state = PSI_IO_FULL + res * 2; | |
1119 | else | |
1120 | return ERR_PTR(-EINVAL); | |
1121 | ||
1122 | if (state >= PSI_NONIDLE) | |
1123 | return ERR_PTR(-EINVAL); | |
1124 | ||
1125 | if (window_us < WINDOW_MIN_US || | |
1126 | window_us > WINDOW_MAX_US) | |
1127 | return ERR_PTR(-EINVAL); | |
1128 | ||
1129 | /* Check threshold */ | |
1130 | if (threshold_us == 0 || threshold_us > window_us) | |
1131 | return ERR_PTR(-EINVAL); | |
1132 | ||
1133 | t = kmalloc(sizeof(*t), GFP_KERNEL); | |
1134 | if (!t) | |
1135 | return ERR_PTR(-ENOMEM); | |
1136 | ||
1137 | t->group = group; | |
1138 | t->state = state; | |
1139 | t->threshold = threshold_us * NSEC_PER_USEC; | |
1140 | t->win.size = window_us * NSEC_PER_USEC; | |
1141 | window_reset(&t->win, 0, 0, 0); | |
1142 | ||
1143 | t->event = 0; | |
1144 | t->last_event_time = 0; | |
1145 | init_waitqueue_head(&t->event_wait); | |
1146 | kref_init(&t->refcount); | |
1147 | ||
1148 | mutex_lock(&group->trigger_lock); | |
1149 | ||
461daba0 SB |
1150 | if (!rcu_access_pointer(group->poll_task)) { |
1151 | struct task_struct *task; | |
0e94682b | 1152 | |
461daba0 SB |
1153 | task = kthread_create(psi_poll_worker, group, "psimon"); |
1154 | if (IS_ERR(task)) { | |
0e94682b SB |
1155 | kfree(t); |
1156 | mutex_unlock(&group->trigger_lock); | |
461daba0 | 1157 | return ERR_CAST(task); |
0e94682b | 1158 | } |
461daba0 SB |
1159 | atomic_set(&group->poll_wakeup, 0); |
1160 | init_waitqueue_head(&group->poll_wait); | |
1161 | wake_up_process(task); | |
1162 | timer_setup(&group->poll_timer, poll_timer_fn, 0); | |
1163 | rcu_assign_pointer(group->poll_task, task); | |
0e94682b SB |
1164 | } |
1165 | ||
1166 | list_add(&t->node, &group->triggers); | |
1167 | group->poll_min_period = min(group->poll_min_period, | |
1168 | div_u64(t->win.size, UPDATES_PER_WINDOW)); | |
1169 | group->nr_triggers[t->state]++; | |
1170 | group->poll_states |= (1 << t->state); | |
1171 | ||
1172 | mutex_unlock(&group->trigger_lock); | |
1173 | ||
1174 | return t; | |
1175 | } | |
1176 | ||
1177 | static void psi_trigger_destroy(struct kref *ref) | |
1178 | { | |
1179 | struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount); | |
1180 | struct psi_group *group = t->group; | |
461daba0 | 1181 | struct task_struct *task_to_destroy = NULL; |
0e94682b SB |
1182 | |
1183 | if (static_branch_likely(&psi_disabled)) | |
1184 | return; | |
1185 | ||
1186 | /* | |
1187 | * Wakeup waiters to stop polling. Can happen if cgroup is deleted | |
1188 | * from under a polling process. | |
1189 | */ | |
1190 | wake_up_interruptible(&t->event_wait); | |
1191 | ||
1192 | mutex_lock(&group->trigger_lock); | |
1193 | ||
1194 | if (!list_empty(&t->node)) { | |
1195 | struct psi_trigger *tmp; | |
1196 | u64 period = ULLONG_MAX; | |
1197 | ||
1198 | list_del(&t->node); | |
1199 | group->nr_triggers[t->state]--; | |
1200 | if (!group->nr_triggers[t->state]) | |
1201 | group->poll_states &= ~(1 << t->state); | |
1202 | /* reset min update period for the remaining triggers */ | |
1203 | list_for_each_entry(tmp, &group->triggers, node) | |
1204 | period = min(period, div_u64(tmp->win.size, | |
1205 | UPDATES_PER_WINDOW)); | |
1206 | group->poll_min_period = period; | |
461daba0 | 1207 | /* Destroy poll_task when the last trigger is destroyed */ |
0e94682b SB |
1208 | if (group->poll_states == 0) { |
1209 | group->polling_until = 0; | |
461daba0 SB |
1210 | task_to_destroy = rcu_dereference_protected( |
1211 | group->poll_task, | |
0e94682b | 1212 | lockdep_is_held(&group->trigger_lock)); |
461daba0 | 1213 | rcu_assign_pointer(group->poll_task, NULL); |
0e94682b SB |
1214 | } |
1215 | } | |
1216 | ||
1217 | mutex_unlock(&group->trigger_lock); | |
1218 | ||
1219 | /* | |
1220 | * Wait for both *trigger_ptr from psi_trigger_replace and | |
461daba0 SB |
1221 | * poll_task RCUs to complete their read-side critical sections |
1222 | * before destroying the trigger and optionally the poll_task | |
0e94682b SB |
1223 | */ |
1224 | synchronize_rcu(); | |
1225 | /* | |
1226 | * Destroy the kworker after releasing trigger_lock to prevent a | |
1227 | * deadlock while waiting for psi_poll_work to acquire trigger_lock | |
1228 | */ | |
461daba0 | 1229 | if (task_to_destroy) { |
7b2b55da JX |
1230 | /* |
1231 | * After the RCU grace period has expired, the worker | |
461daba0 | 1232 | * can no longer be found through group->poll_task. |
7b2b55da JX |
1233 | * But it might have been already scheduled before |
1234 | * that - deschedule it cleanly before destroying it. | |
1235 | */ | |
461daba0 SB |
1236 | del_timer_sync(&group->poll_timer); |
1237 | kthread_stop(task_to_destroy); | |
0e94682b SB |
1238 | } |
1239 | kfree(t); | |
1240 | } | |
1241 | ||
1242 | void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *new) | |
1243 | { | |
1244 | struct psi_trigger *old = *trigger_ptr; | |
1245 | ||
1246 | if (static_branch_likely(&psi_disabled)) | |
1247 | return; | |
1248 | ||
1249 | rcu_assign_pointer(*trigger_ptr, new); | |
1250 | if (old) | |
1251 | kref_put(&old->refcount, psi_trigger_destroy); | |
1252 | } | |
1253 | ||
1254 | __poll_t psi_trigger_poll(void **trigger_ptr, | |
1255 | struct file *file, poll_table *wait) | |
1256 | { | |
1257 | __poll_t ret = DEFAULT_POLLMASK; | |
1258 | struct psi_trigger *t; | |
1259 | ||
1260 | if (static_branch_likely(&psi_disabled)) | |
1261 | return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI; | |
1262 | ||
1263 | rcu_read_lock(); | |
1264 | ||
1265 | t = rcu_dereference(*(void __rcu __force **)trigger_ptr); | |
1266 | if (!t) { | |
1267 | rcu_read_unlock(); | |
1268 | return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI; | |
1269 | } | |
1270 | kref_get(&t->refcount); | |
1271 | ||
1272 | rcu_read_unlock(); | |
1273 | ||
1274 | poll_wait(file, &t->event_wait, wait); | |
1275 | ||
1276 | if (cmpxchg(&t->event, 1, 0) == 1) | |
1277 | ret |= EPOLLPRI; | |
1278 | ||
1279 | kref_put(&t->refcount, psi_trigger_destroy); | |
1280 | ||
1281 | return ret; | |
1282 | } | |
1283 | ||
1284 | static ssize_t psi_write(struct file *file, const char __user *user_buf, | |
1285 | size_t nbytes, enum psi_res res) | |
1286 | { | |
1287 | char buf[32]; | |
1288 | size_t buf_size; | |
1289 | struct seq_file *seq; | |
1290 | struct psi_trigger *new; | |
1291 | ||
1292 | if (static_branch_likely(&psi_disabled)) | |
1293 | return -EOPNOTSUPP; | |
1294 | ||
6fcca0fa SB |
1295 | if (!nbytes) |
1296 | return -EINVAL; | |
1297 | ||
4adcdcea | 1298 | buf_size = min(nbytes, sizeof(buf)); |
0e94682b SB |
1299 | if (copy_from_user(buf, user_buf, buf_size)) |
1300 | return -EFAULT; | |
1301 | ||
1302 | buf[buf_size - 1] = '\0'; | |
1303 | ||
1304 | new = psi_trigger_create(&psi_system, buf, nbytes, res); | |
1305 | if (IS_ERR(new)) | |
1306 | return PTR_ERR(new); | |
1307 | ||
1308 | seq = file->private_data; | |
1309 | /* Take seq->lock to protect seq->private from concurrent writes */ | |
1310 | mutex_lock(&seq->lock); | |
1311 | psi_trigger_replace(&seq->private, new); | |
1312 | mutex_unlock(&seq->lock); | |
1313 | ||
1314 | return nbytes; | |
1315 | } | |
1316 | ||
1317 | static ssize_t psi_io_write(struct file *file, const char __user *user_buf, | |
1318 | size_t nbytes, loff_t *ppos) | |
1319 | { | |
1320 | return psi_write(file, user_buf, nbytes, PSI_IO); | |
1321 | } | |
1322 | ||
1323 | static ssize_t psi_memory_write(struct file *file, const char __user *user_buf, | |
1324 | size_t nbytes, loff_t *ppos) | |
1325 | { | |
1326 | return psi_write(file, user_buf, nbytes, PSI_MEM); | |
1327 | } | |
1328 | ||
1329 | static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf, | |
1330 | size_t nbytes, loff_t *ppos) | |
1331 | { | |
1332 | return psi_write(file, user_buf, nbytes, PSI_CPU); | |
1333 | } | |
1334 | ||
1335 | static __poll_t psi_fop_poll(struct file *file, poll_table *wait) | |
1336 | { | |
1337 | struct seq_file *seq = file->private_data; | |
1338 | ||
1339 | return psi_trigger_poll(&seq->private, file, wait); | |
1340 | } | |
1341 | ||
1342 | static int psi_fop_release(struct inode *inode, struct file *file) | |
1343 | { | |
1344 | struct seq_file *seq = file->private_data; | |
1345 | ||
1346 | psi_trigger_replace(&seq->private, NULL); | |
1347 | return single_release(inode, file); | |
1348 | } | |
1349 | ||
97a32539 AD |
1350 | static const struct proc_ops psi_io_proc_ops = { |
1351 | .proc_open = psi_io_open, | |
1352 | .proc_read = seq_read, | |
1353 | .proc_lseek = seq_lseek, | |
1354 | .proc_write = psi_io_write, | |
1355 | .proc_poll = psi_fop_poll, | |
1356 | .proc_release = psi_fop_release, | |
eb414681 JW |
1357 | }; |
1358 | ||
97a32539 AD |
1359 | static const struct proc_ops psi_memory_proc_ops = { |
1360 | .proc_open = psi_memory_open, | |
1361 | .proc_read = seq_read, | |
1362 | .proc_lseek = seq_lseek, | |
1363 | .proc_write = psi_memory_write, | |
1364 | .proc_poll = psi_fop_poll, | |
1365 | .proc_release = psi_fop_release, | |
eb414681 JW |
1366 | }; |
1367 | ||
97a32539 AD |
1368 | static const struct proc_ops psi_cpu_proc_ops = { |
1369 | .proc_open = psi_cpu_open, | |
1370 | .proc_read = seq_read, | |
1371 | .proc_lseek = seq_lseek, | |
1372 | .proc_write = psi_cpu_write, | |
1373 | .proc_poll = psi_fop_poll, | |
1374 | .proc_release = psi_fop_release, | |
eb414681 JW |
1375 | }; |
1376 | ||
1377 | static int __init psi_proc_init(void) | |
1378 | { | |
3d817689 WL |
1379 | if (psi_enable) { |
1380 | proc_mkdir("pressure", NULL); | |
6db12ee0 JH |
1381 | proc_create("pressure/io", 0666, NULL, &psi_io_proc_ops); |
1382 | proc_create("pressure/memory", 0666, NULL, &psi_memory_proc_ops); | |
1383 | proc_create("pressure/cpu", 0666, NULL, &psi_cpu_proc_ops); | |
3d817689 | 1384 | } |
eb414681 JW |
1385 | return 0; |
1386 | } | |
1387 | module_init(psi_proc_init); |