Commit | Line | Data |
---|---|---|
108c35a9 | 1 | // SPDX-License-Identifier: GPL-2.0 |
9bdcb44e RW |
2 | /* |
3 | * CPUFreq governor based on scheduler-provided CPU utilization data. | |
4 | * | |
5 | * Copyright (C) 2016, Intel Corporation | |
6 | * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> | |
9bdcb44e RW |
7 | */ |
8 | ||
60f05e86 VK |
9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
10 | ||
9bdcb44e RW |
11 | #include "sched.h" |
12 | ||
938e5e4b | 13 | #include <linux/sched/cpufreq.h> |
325ea10c IM |
14 | #include <trace/events/power.h> |
15 | ||
9eca544b RW |
16 | #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8) |
17 | ||
9bdcb44e | 18 | struct sugov_tunables { |
97fb7a0a IM |
19 | struct gov_attr_set attr_set; |
20 | unsigned int rate_limit_us; | |
9bdcb44e RW |
21 | }; |
22 | ||
23 | struct sugov_policy { | |
97fb7a0a IM |
24 | struct cpufreq_policy *policy; |
25 | ||
26 | struct sugov_tunables *tunables; | |
27 | struct list_head tunables_hook; | |
28 | ||
29 | raw_spinlock_t update_lock; /* For shared policies */ | |
30 | u64 last_freq_update_time; | |
31 | s64 freq_update_delay_ns; | |
32 | unsigned int next_freq; | |
33 | unsigned int cached_raw_freq; | |
34 | ||
35 | /* The next fields are only needed if fast switch cannot be used: */ | |
36 | struct irq_work irq_work; | |
37 | struct kthread_work work; | |
38 | struct mutex work_lock; | |
39 | struct kthread_worker worker; | |
40 | struct task_struct *thread; | |
41 | bool work_in_progress; | |
42 | ||
43 | bool need_freq_update; | |
9bdcb44e RW |
44 | }; |
45 | ||
46 | struct sugov_cpu { | |
97fb7a0a IM |
47 | struct update_util_data update_util; |
48 | struct sugov_policy *sg_policy; | |
49 | unsigned int cpu; | |
9bdcb44e | 50 | |
97fb7a0a IM |
51 | bool iowait_boost_pending; |
52 | unsigned int iowait_boost; | |
fd7d5287 | 53 | u64 last_update; |
5cbea469 | 54 | |
8cc90515 | 55 | unsigned long bw_dl; |
97fb7a0a | 56 | unsigned long max; |
b7eaf1aa | 57 | |
97fb7a0a | 58 | /* The field below is for single-CPU policies only: */ |
b7eaf1aa | 59 | #ifdef CONFIG_NO_HZ_COMMON |
97fb7a0a | 60 | unsigned long saved_idle_calls; |
b7eaf1aa | 61 | #endif |
9bdcb44e RW |
62 | }; |
63 | ||
64 | static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu); | |
65 | ||
66 | /************************ Governor internals ***********************/ | |
67 | ||
68 | static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) | |
69 | { | |
70 | s64 delta_ns; | |
71 | ||
674e7541 VK |
72 | /* |
73 | * Since cpufreq_update_util() is called with rq->lock held for | |
97fb7a0a | 74 | * the @target_cpu, our per-CPU data is fully serialized. |
674e7541 | 75 | * |
97fb7a0a | 76 | * However, drivers cannot in general deal with cross-CPU |
674e7541 | 77 | * requests, so while get_next_freq() will work, our |
c49cbc19 | 78 | * sugov_update_commit() call may not for the fast switching platforms. |
674e7541 VK |
79 | * |
80 | * Hence stop here for remote requests if they aren't supported | |
81 | * by the hardware, as calculating the frequency is pointless if | |
82 | * we cannot in fact act on it. | |
c49cbc19 VK |
83 | * |
84 | * For the slow switching platforms, the kthread is always scheduled on | |
85 | * the right set of CPUs and any CPU can find the next frequency and | |
86 | * schedule the kthread. | |
674e7541 | 87 | */ |
c49cbc19 | 88 | if (sg_policy->policy->fast_switch_enabled && |
03639978 | 89 | !cpufreq_this_cpu_can_update(sg_policy->policy)) |
674e7541 VK |
90 | return false; |
91 | ||
ecd28842 | 92 | if (unlikely(sg_policy->need_freq_update)) |
9bdcb44e | 93 | return true; |
9bdcb44e RW |
94 | |
95 | delta_ns = time - sg_policy->last_freq_update_time; | |
97fb7a0a | 96 | |
9bdcb44e RW |
97 | return delta_ns >= sg_policy->freq_update_delay_ns; |
98 | } | |
99 | ||
a61dec74 RW |
100 | static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time, |
101 | unsigned int next_freq) | |
9bdcb44e | 102 | { |
38d4ea22 | 103 | if (sg_policy->next_freq == next_freq) |
a61dec74 | 104 | return false; |
38d4ea22 RW |
105 | |
106 | sg_policy->next_freq = next_freq; | |
9bdcb44e RW |
107 | sg_policy->last_freq_update_time = time; |
108 | ||
a61dec74 RW |
109 | return true; |
110 | } | |
9bdcb44e | 111 | |
a61dec74 RW |
112 | static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time, |
113 | unsigned int next_freq) | |
114 | { | |
115 | struct cpufreq_policy *policy = sg_policy->policy; | |
116 | ||
117 | if (!sugov_update_next_freq(sg_policy, time, next_freq)) | |
118 | return; | |
119 | ||
120 | next_freq = cpufreq_driver_fast_switch(policy, next_freq); | |
121 | if (!next_freq) | |
122 | return; | |
9bdcb44e | 123 | |
a61dec74 RW |
124 | policy->cur = next_freq; |
125 | trace_cpu_frequency(next_freq, smp_processor_id()); | |
126 | } | |
127 | ||
128 | static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time, | |
129 | unsigned int next_freq) | |
130 | { | |
131 | if (!sugov_update_next_freq(sg_policy, time, next_freq)) | |
132 | return; | |
133 | ||
134 | if (!sg_policy->work_in_progress) { | |
9bdcb44e RW |
135 | sg_policy->work_in_progress = true; |
136 | irq_work_queue(&sg_policy->irq_work); | |
137 | } | |
138 | } | |
139 | ||
140 | /** | |
141 | * get_next_freq - Compute a new frequency for a given cpufreq policy. | |
655cb1eb | 142 | * @sg_policy: schedutil policy object to compute the new frequency for. |
9bdcb44e RW |
143 | * @util: Current CPU utilization. |
144 | * @max: CPU capacity. | |
145 | * | |
146 | * If the utilization is frequency-invariant, choose the new frequency to be | |
147 | * proportional to it, that is | |
148 | * | |
149 | * next_freq = C * max_freq * util / max | |
150 | * | |
151 | * Otherwise, approximate the would-be frequency-invariant utilization by | |
152 | * util_raw * (curr_freq / max_freq) which leads to | |
153 | * | |
154 | * next_freq = C * curr_freq * util_raw / max | |
155 | * | |
156 | * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8. | |
5cbea469 SM |
157 | * |
158 | * The lowest driver-supported frequency which is equal or greater than the raw | |
159 | * next_freq (as calculated above) is returned, subject to policy min/max and | |
160 | * cpufreq driver limitations. | |
9bdcb44e | 161 | */ |
655cb1eb VK |
162 | static unsigned int get_next_freq(struct sugov_policy *sg_policy, |
163 | unsigned long util, unsigned long max) | |
9bdcb44e | 164 | { |
5cbea469 | 165 | struct cpufreq_policy *policy = sg_policy->policy; |
9bdcb44e RW |
166 | unsigned int freq = arch_scale_freq_invariant() ? |
167 | policy->cpuinfo.max_freq : policy->cur; | |
168 | ||
938e5e4b | 169 | freq = map_util_freq(util, freq, max); |
5cbea469 | 170 | |
ecd28842 | 171 | if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update) |
5cbea469 | 172 | return sg_policy->next_freq; |
ecd28842 VK |
173 | |
174 | sg_policy->need_freq_update = false; | |
6c4f0fa6 | 175 | sg_policy->cached_raw_freq = freq; |
5cbea469 | 176 | return cpufreq_driver_resolve_freq(policy, freq); |
9bdcb44e RW |
177 | } |
178 | ||
45f5519e PZ |
179 | /* |
180 | * This function computes an effective utilization for the given CPU, to be | |
181 | * used for frequency selection given the linear relation: f = u * f_max. | |
182 | * | |
183 | * The scheduler tracks the following metrics: | |
184 | * | |
185 | * cpu_util_{cfs,rt,dl,irq}() | |
186 | * cpu_bw_dl() | |
187 | * | |
188 | * Where the cfs,rt and dl util numbers are tracked with the same metric and | |
189 | * synchronized windows and are thus directly comparable. | |
190 | * | |
191 | * The cfs,rt,dl utilization are the running times measured with rq->clock_task | |
192 | * which excludes things like IRQ and steal-time. These latter are then accrued | |
193 | * in the irq utilization. | |
194 | * | |
195 | * The DL bandwidth number otoh is not a measured metric but a value computed | |
196 | * based on the task model parameters and gives the minimal utilization | |
197 | * required to meet deadlines. | |
198 | */ | |
af24bde8 PB |
199 | unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, |
200 | unsigned long max, enum schedutil_type type, | |
201 | struct task_struct *p) | |
58919e83 | 202 | { |
938e5e4b QP |
203 | unsigned long dl_util, util, irq; |
204 | struct rq *rq = cpu_rq(cpu); | |
8f111bc3 | 205 | |
982d9cdc PB |
206 | if (!IS_BUILTIN(CONFIG_UCLAMP_TASK) && |
207 | type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) { | |
dfa444dc | 208 | return max; |
982d9cdc | 209 | } |
dfa444dc | 210 | |
45f5519e PZ |
211 | /* |
212 | * Early check to see if IRQ/steal time saturates the CPU, can be | |
213 | * because of inaccuracies in how we track these -- see | |
214 | * update_irq_load_avg(). | |
215 | */ | |
dfa444dc | 216 | irq = cpu_util_irq(rq); |
dfa444dc | 217 | if (unlikely(irq >= max)) |
9033ea11 VG |
218 | return max; |
219 | ||
45f5519e PZ |
220 | /* |
221 | * Because the time spend on RT/DL tasks is visible as 'lost' time to | |
222 | * CFS tasks and we use the same metric to track the effective | |
223 | * utilization (PELT windows are synchronized) we can directly add them | |
224 | * to obtain the CPU's actual utilization. | |
982d9cdc PB |
225 | * |
226 | * CFS and RT utilization can be boosted or capped, depending on | |
227 | * utilization clamp constraints requested by currently RUNNABLE | |
228 | * tasks. | |
229 | * When there are no CFS RUNNABLE tasks, clamps are released and | |
230 | * frequency will be gracefully reduced with the utilization decay. | |
45f5519e | 231 | */ |
982d9cdc PB |
232 | util = util_cfs + cpu_util_rt(rq); |
233 | if (type == FREQUENCY_UTIL) | |
af24bde8 | 234 | util = uclamp_util_with(rq, util, p); |
3ae117c6 | 235 | |
938e5e4b QP |
236 | dl_util = cpu_util_dl(rq); |
237 | ||
9033ea11 | 238 | /* |
938e5e4b QP |
239 | * For frequency selection we do not make cpu_util_dl() a permanent part |
240 | * of this sum because we want to use cpu_bw_dl() later on, but we need | |
241 | * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such | |
242 | * that we select f_max when there is no idle time. | |
45f5519e PZ |
243 | * |
244 | * NOTE: numerical errors or stop class might cause us to not quite hit | |
245 | * saturation when we should -- something for later. | |
9033ea11 | 246 | */ |
938e5e4b | 247 | if (util + dl_util >= max) |
9033ea11 | 248 | return max; |
8cc90515 | 249 | |
938e5e4b QP |
250 | /* |
251 | * OTOH, for energy computation we need the estimated running time, so | |
252 | * include util_dl and ignore dl_bw. | |
253 | */ | |
254 | if (type == ENERGY_UTIL) | |
255 | util += dl_util; | |
256 | ||
d4edd662 | 257 | /* |
45f5519e PZ |
258 | * There is still idle time; further improve the number by using the |
259 | * irq metric. Because IRQ/steal time is hidden from the task clock we | |
260 | * need to scale the task numbers: | |
8cc90515 | 261 | * |
45f5519e PZ |
262 | * 1 - irq |
263 | * U' = irq + ------- * U | |
264 | * max | |
265 | */ | |
2e62c474 | 266 | util = scale_irq_capacity(util, irq, max); |
45f5519e PZ |
267 | util += irq; |
268 | ||
269 | /* | |
8cc90515 VG |
270 | * Bandwidth required by DEADLINE must always be granted while, for |
271 | * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism | |
272 | * to gracefully reduce the frequency when no tasks show up for longer | |
8ecf04e1 PB |
273 | * periods of time. |
274 | * | |
45f5519e PZ |
275 | * Ideally we would like to set bw_dl as min/guaranteed freq and util + |
276 | * bw_dl as requested freq. However, cpufreq is not yet ready for such | |
277 | * an interface. So, we only do the latter for now. | |
d4edd662 | 278 | */ |
938e5e4b QP |
279 | if (type == FREQUENCY_UTIL) |
280 | util += cpu_bw_dl(rq); | |
281 | ||
282 | return min(max, util); | |
283 | } | |
284 | ||
285 | static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) | |
286 | { | |
287 | struct rq *rq = cpu_rq(sg_cpu->cpu); | |
288 | unsigned long util = cpu_util_cfs(rq); | |
8ec59c0f | 289 | unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu); |
938e5e4b QP |
290 | |
291 | sg_cpu->max = max; | |
292 | sg_cpu->bw_dl = cpu_bw_dl(rq); | |
293 | ||
af24bde8 | 294 | return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL); |
58919e83 RW |
295 | } |
296 | ||
fd7d5287 PB |
297 | /** |
298 | * sugov_iowait_reset() - Reset the IO boost status of a CPU. | |
299 | * @sg_cpu: the sugov data for the CPU to boost | |
300 | * @time: the update time from the caller | |
301 | * @set_iowait_boost: true if an IO boost has been requested | |
302 | * | |
303 | * The IO wait boost of a task is disabled after a tick since the last update | |
304 | * of a CPU. If a new IO wait boost is requested after more then a tick, then | |
9eca544b RW |
305 | * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy |
306 | * efficiency by ignoring sporadic wakeups from IO. | |
fd7d5287 PB |
307 | */ |
308 | static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time, | |
309 | bool set_iowait_boost) | |
21ca6d2c | 310 | { |
fd7d5287 | 311 | s64 delta_ns = time - sg_cpu->last_update; |
a5a0809b | 312 | |
fd7d5287 PB |
313 | /* Reset boost only if a tick has elapsed since last request */ |
314 | if (delta_ns <= TICK_NSEC) | |
315 | return false; | |
a5a0809b | 316 | |
9eca544b | 317 | sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0; |
fd7d5287 | 318 | sg_cpu->iowait_boost_pending = set_iowait_boost; |
21ca6d2c | 319 | |
fd7d5287 PB |
320 | return true; |
321 | } | |
a5a0809b | 322 | |
fd7d5287 PB |
323 | /** |
324 | * sugov_iowait_boost() - Updates the IO boost status of a CPU. | |
325 | * @sg_cpu: the sugov data for the CPU to boost | |
326 | * @time: the update time from the caller | |
327 | * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait | |
328 | * | |
329 | * Each time a task wakes up after an IO operation, the CPU utilization can be | |
330 | * boosted to a certain utilization which doubles at each "frequent and | |
9eca544b RW |
331 | * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization |
332 | * of the maximum OPP. | |
333 | * | |
fd7d5287 PB |
334 | * To keep doubling, an IO boost has to be requested at least once per tick, |
335 | * otherwise we restart from the utilization of the minimum OPP. | |
336 | */ | |
337 | static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, | |
338 | unsigned int flags) | |
339 | { | |
340 | bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT; | |
341 | ||
342 | /* Reset boost if the CPU appears to have been idle enough */ | |
343 | if (sg_cpu->iowait_boost && | |
344 | sugov_iowait_reset(sg_cpu, time, set_iowait_boost)) | |
345 | return; | |
346 | ||
347 | /* Boost only tasks waking up after IO */ | |
348 | if (!set_iowait_boost) | |
349 | return; | |
350 | ||
351 | /* Ensure boost doubles only one time at each request */ | |
352 | if (sg_cpu->iowait_boost_pending) | |
353 | return; | |
354 | sg_cpu->iowait_boost_pending = true; | |
355 | ||
356 | /* Double the boost at each request */ | |
357 | if (sg_cpu->iowait_boost) { | |
a23314e9 PZ |
358 | sg_cpu->iowait_boost = |
359 | min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE); | |
fd7d5287 | 360 | return; |
21ca6d2c | 361 | } |
fd7d5287 PB |
362 | |
363 | /* First wakeup after IO: start with minimum boost */ | |
9eca544b | 364 | sg_cpu->iowait_boost = IOWAIT_BOOST_MIN; |
21ca6d2c RW |
365 | } |
366 | ||
fd7d5287 PB |
367 | /** |
368 | * sugov_iowait_apply() - Apply the IO boost to a CPU. | |
369 | * @sg_cpu: the sugov data for the cpu to boost | |
370 | * @time: the update time from the caller | |
371 | * @util: the utilization to (eventually) boost | |
372 | * @max: the maximum value the utilization can be boosted to | |
373 | * | |
374 | * A CPU running a task which woken up after an IO operation can have its | |
375 | * utilization boosted to speed up the completion of those IO operations. | |
376 | * The IO boost value is increased each time a task wakes up from IO, in | |
377 | * sugov_iowait_apply(), and it's instead decreased by this function, | |
378 | * each time an increase has not been requested (!iowait_boost_pending). | |
379 | * | |
380 | * A CPU which also appears to have been idle for at least one tick has also | |
381 | * its IO boost utilization reset. | |
382 | * | |
383 | * This mechanism is designed to boost high frequently IO waiting tasks, while | |
384 | * being more conservative on tasks which does sporadic IO operations. | |
385 | */ | |
a23314e9 PZ |
386 | static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time, |
387 | unsigned long util, unsigned long max) | |
21ca6d2c | 388 | { |
a23314e9 | 389 | unsigned long boost; |
21ca6d2c | 390 | |
fd7d5287 | 391 | /* No boost currently required */ |
a5a0809b | 392 | if (!sg_cpu->iowait_boost) |
a23314e9 | 393 | return util; |
21ca6d2c | 394 | |
fd7d5287 PB |
395 | /* Reset boost if the CPU appears to have been idle enough */ |
396 | if (sugov_iowait_reset(sg_cpu, time, false)) | |
a23314e9 | 397 | return util; |
fd7d5287 | 398 | |
a23314e9 | 399 | if (!sg_cpu->iowait_boost_pending) { |
fd7d5287 | 400 | /* |
a23314e9 | 401 | * No boost pending; reduce the boost value. |
fd7d5287 | 402 | */ |
a5a0809b | 403 | sg_cpu->iowait_boost >>= 1; |
9eca544b | 404 | if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) { |
a5a0809b | 405 | sg_cpu->iowait_boost = 0; |
a23314e9 | 406 | return util; |
a5a0809b JF |
407 | } |
408 | } | |
409 | ||
a23314e9 PZ |
410 | sg_cpu->iowait_boost_pending = false; |
411 | ||
fd7d5287 | 412 | /* |
a23314e9 PZ |
413 | * @util is already in capacity scale; convert iowait_boost |
414 | * into the same scale so we can compare. | |
fd7d5287 | 415 | */ |
a23314e9 PZ |
416 | boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT; |
417 | return max(boost, util); | |
21ca6d2c RW |
418 | } |
419 | ||
b7eaf1aa RW |
420 | #ifdef CONFIG_NO_HZ_COMMON |
421 | static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) | |
422 | { | |
466a2b42 | 423 | unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu); |
b7eaf1aa RW |
424 | bool ret = idle_calls == sg_cpu->saved_idle_calls; |
425 | ||
426 | sg_cpu->saved_idle_calls = idle_calls; | |
427 | return ret; | |
428 | } | |
429 | #else | |
430 | static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } | |
431 | #endif /* CONFIG_NO_HZ_COMMON */ | |
432 | ||
e97a90f7 CS |
433 | /* |
434 | * Make sugov_should_update_freq() ignore the rate limit when DL | |
435 | * has increased the utilization. | |
436 | */ | |
437 | static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy) | |
438 | { | |
8cc90515 | 439 | if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) |
e97a90f7 CS |
440 | sg_policy->need_freq_update = true; |
441 | } | |
442 | ||
9bdcb44e | 443 | static void sugov_update_single(struct update_util_data *hook, u64 time, |
58919e83 | 444 | unsigned int flags) |
9bdcb44e RW |
445 | { |
446 | struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); | |
447 | struct sugov_policy *sg_policy = sg_cpu->sg_policy; | |
58919e83 | 448 | unsigned long util, max; |
9bdcb44e | 449 | unsigned int next_f; |
b7eaf1aa | 450 | bool busy; |
9bdcb44e | 451 | |
fd7d5287 | 452 | sugov_iowait_boost(sg_cpu, time, flags); |
21ca6d2c RW |
453 | sg_cpu->last_update = time; |
454 | ||
e97a90f7 CS |
455 | ignore_dl_rate_limit(sg_cpu, sg_policy); |
456 | ||
9bdcb44e RW |
457 | if (!sugov_should_update_freq(sg_policy, time)) |
458 | return; | |
459 | ||
b7eaf1aa RW |
460 | busy = sugov_cpu_is_busy(sg_cpu); |
461 | ||
dfa444dc | 462 | util = sugov_get_util(sg_cpu); |
8f111bc3 | 463 | max = sg_cpu->max; |
a23314e9 | 464 | util = sugov_iowait_apply(sg_cpu, time, util, max); |
8f111bc3 PZ |
465 | next_f = get_next_freq(sg_policy, util, max); |
466 | /* | |
467 | * Do not reduce the frequency if the CPU has not been idle | |
468 | * recently, as the reduction is likely to be premature then. | |
469 | */ | |
ecd28842 | 470 | if (busy && next_f < sg_policy->next_freq) { |
8f111bc3 | 471 | next_f = sg_policy->next_freq; |
07458f6a | 472 | |
8f111bc3 PZ |
473 | /* Reset cached freq as next_freq has changed */ |
474 | sg_policy->cached_raw_freq = 0; | |
58919e83 | 475 | } |
8f111bc3 | 476 | |
a61dec74 RW |
477 | /* |
478 | * This code runs under rq->lock for the target CPU, so it won't run | |
479 | * concurrently on two different CPUs for the same target and it is not | |
480 | * necessary to acquire the lock in the fast switch case. | |
481 | */ | |
482 | if (sg_policy->policy->fast_switch_enabled) { | |
483 | sugov_fast_switch(sg_policy, time, next_f); | |
484 | } else { | |
485 | raw_spin_lock(&sg_policy->update_lock); | |
486 | sugov_deferred_update(sg_policy, time, next_f); | |
487 | raw_spin_unlock(&sg_policy->update_lock); | |
488 | } | |
9bdcb44e RW |
489 | } |
490 | ||
d86ab9cf | 491 | static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) |
9bdcb44e | 492 | { |
5cbea469 | 493 | struct sugov_policy *sg_policy = sg_cpu->sg_policy; |
9bdcb44e | 494 | struct cpufreq_policy *policy = sg_policy->policy; |
cba1dfb5 | 495 | unsigned long util = 0, max = 1; |
9bdcb44e RW |
496 | unsigned int j; |
497 | ||
9bdcb44e | 498 | for_each_cpu(j, policy->cpus) { |
cba1dfb5 | 499 | struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j); |
9bdcb44e | 500 | unsigned long j_util, j_max; |
9bdcb44e | 501 | |
dfa444dc | 502 | j_util = sugov_get_util(j_sg_cpu); |
9bdcb44e | 503 | j_max = j_sg_cpu->max; |
a23314e9 | 504 | j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max); |
fd7d5287 | 505 | |
9bdcb44e RW |
506 | if (j_util * max > j_max * util) { |
507 | util = j_util; | |
508 | max = j_max; | |
509 | } | |
510 | } | |
511 | ||
655cb1eb | 512 | return get_next_freq(sg_policy, util, max); |
9bdcb44e RW |
513 | } |
514 | ||
97fb7a0a IM |
515 | static void |
516 | sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags) | |
9bdcb44e RW |
517 | { |
518 | struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); | |
519 | struct sugov_policy *sg_policy = sg_cpu->sg_policy; | |
520 | unsigned int next_f; | |
521 | ||
522 | raw_spin_lock(&sg_policy->update_lock); | |
523 | ||
fd7d5287 | 524 | sugov_iowait_boost(sg_cpu, time, flags); |
9bdcb44e RW |
525 | sg_cpu->last_update = time; |
526 | ||
e97a90f7 | 527 | ignore_dl_rate_limit(sg_cpu, sg_policy); |
cba1dfb5 | 528 | |
9bdcb44e | 529 | if (sugov_should_update_freq(sg_policy, time)) { |
8f111bc3 | 530 | next_f = sugov_next_freq_shared(sg_cpu, time); |
a61dec74 RW |
531 | |
532 | if (sg_policy->policy->fast_switch_enabled) | |
533 | sugov_fast_switch(sg_policy, time, next_f); | |
534 | else | |
535 | sugov_deferred_update(sg_policy, time, next_f); | |
9bdcb44e RW |
536 | } |
537 | ||
538 | raw_spin_unlock(&sg_policy->update_lock); | |
539 | } | |
540 | ||
02a7b1ee | 541 | static void sugov_work(struct kthread_work *work) |
9bdcb44e RW |
542 | { |
543 | struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); | |
152db033 JFG |
544 | unsigned int freq; |
545 | unsigned long flags; | |
546 | ||
547 | /* | |
548 | * Hold sg_policy->update_lock shortly to handle the case where: | |
549 | * incase sg_policy->next_freq is read here, and then updated by | |
a61dec74 | 550 | * sugov_deferred_update() just before work_in_progress is set to false |
152db033 JFG |
551 | * here, we may miss queueing the new update. |
552 | * | |
553 | * Note: If a work was queued after the update_lock is released, | |
a61dec74 | 554 | * sugov_work() will just be called again by kthread_work code; and the |
152db033 JFG |
555 | * request will be proceed before the sugov thread sleeps. |
556 | */ | |
557 | raw_spin_lock_irqsave(&sg_policy->update_lock, flags); | |
558 | freq = sg_policy->next_freq; | |
559 | sg_policy->work_in_progress = false; | |
560 | raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags); | |
9bdcb44e RW |
561 | |
562 | mutex_lock(&sg_policy->work_lock); | |
152db033 | 563 | __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L); |
9bdcb44e | 564 | mutex_unlock(&sg_policy->work_lock); |
9bdcb44e RW |
565 | } |
566 | ||
567 | static void sugov_irq_work(struct irq_work *irq_work) | |
568 | { | |
569 | struct sugov_policy *sg_policy; | |
570 | ||
571 | sg_policy = container_of(irq_work, struct sugov_policy, irq_work); | |
02a7b1ee | 572 | |
02a7b1ee | 573 | kthread_queue_work(&sg_policy->worker, &sg_policy->work); |
9bdcb44e RW |
574 | } |
575 | ||
576 | /************************** sysfs interface ************************/ | |
577 | ||
578 | static struct sugov_tunables *global_tunables; | |
579 | static DEFINE_MUTEX(global_tunables_lock); | |
580 | ||
581 | static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set) | |
582 | { | |
583 | return container_of(attr_set, struct sugov_tunables, attr_set); | |
584 | } | |
585 | ||
586 | static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) | |
587 | { | |
588 | struct sugov_tunables *tunables = to_sugov_tunables(attr_set); | |
589 | ||
590 | return sprintf(buf, "%u\n", tunables->rate_limit_us); | |
591 | } | |
592 | ||
97fb7a0a IM |
593 | static ssize_t |
594 | rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count) | |
9bdcb44e RW |
595 | { |
596 | struct sugov_tunables *tunables = to_sugov_tunables(attr_set); | |
597 | struct sugov_policy *sg_policy; | |
598 | unsigned int rate_limit_us; | |
599 | ||
600 | if (kstrtouint(buf, 10, &rate_limit_us)) | |
601 | return -EINVAL; | |
602 | ||
603 | tunables->rate_limit_us = rate_limit_us; | |
604 | ||
605 | list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) | |
606 | sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC; | |
607 | ||
608 | return count; | |
609 | } | |
610 | ||
611 | static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us); | |
612 | ||
9782adeb | 613 | static struct attribute *sugov_attrs[] = { |
9bdcb44e RW |
614 | &rate_limit_us.attr, |
615 | NULL | |
616 | }; | |
9782adeb | 617 | ATTRIBUTE_GROUPS(sugov); |
9bdcb44e RW |
618 | |
619 | static struct kobj_type sugov_tunables_ktype = { | |
9782adeb | 620 | .default_groups = sugov_groups, |
9bdcb44e RW |
621 | .sysfs_ops = &governor_sysfs_ops, |
622 | }; | |
623 | ||
624 | /********************** cpufreq governor interface *********************/ | |
625 | ||
531b5c9f | 626 | struct cpufreq_governor schedutil_gov; |
9bdcb44e RW |
627 | |
628 | static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy) | |
629 | { | |
630 | struct sugov_policy *sg_policy; | |
631 | ||
632 | sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL); | |
633 | if (!sg_policy) | |
634 | return NULL; | |
635 | ||
636 | sg_policy->policy = policy; | |
9bdcb44e RW |
637 | raw_spin_lock_init(&sg_policy->update_lock); |
638 | return sg_policy; | |
639 | } | |
640 | ||
641 | static void sugov_policy_free(struct sugov_policy *sg_policy) | |
642 | { | |
9bdcb44e RW |
643 | kfree(sg_policy); |
644 | } | |
645 | ||
02a7b1ee VK |
646 | static int sugov_kthread_create(struct sugov_policy *sg_policy) |
647 | { | |
648 | struct task_struct *thread; | |
794a56eb | 649 | struct sched_attr attr = { |
97fb7a0a IM |
650 | .size = sizeof(struct sched_attr), |
651 | .sched_policy = SCHED_DEADLINE, | |
652 | .sched_flags = SCHED_FLAG_SUGOV, | |
653 | .sched_nice = 0, | |
654 | .sched_priority = 0, | |
794a56eb JL |
655 | /* |
656 | * Fake (unused) bandwidth; workaround to "fix" | |
657 | * priority inheritance. | |
658 | */ | |
659 | .sched_runtime = 1000000, | |
660 | .sched_deadline = 10000000, | |
661 | .sched_period = 10000000, | |
662 | }; | |
02a7b1ee VK |
663 | struct cpufreq_policy *policy = sg_policy->policy; |
664 | int ret; | |
665 | ||
666 | /* kthread only required for slow path */ | |
667 | if (policy->fast_switch_enabled) | |
668 | return 0; | |
669 | ||
670 | kthread_init_work(&sg_policy->work, sugov_work); | |
671 | kthread_init_worker(&sg_policy->worker); | |
672 | thread = kthread_create(kthread_worker_fn, &sg_policy->worker, | |
673 | "sugov:%d", | |
674 | cpumask_first(policy->related_cpus)); | |
675 | if (IS_ERR(thread)) { | |
676 | pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread)); | |
677 | return PTR_ERR(thread); | |
678 | } | |
679 | ||
794a56eb | 680 | ret = sched_setattr_nocheck(thread, &attr); |
02a7b1ee VK |
681 | if (ret) { |
682 | kthread_stop(thread); | |
794a56eb | 683 | pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__); |
02a7b1ee VK |
684 | return ret; |
685 | } | |
686 | ||
687 | sg_policy->thread = thread; | |
1b04722c | 688 | kthread_bind_mask(thread, policy->related_cpus); |
21ef5729 VK |
689 | init_irq_work(&sg_policy->irq_work, sugov_irq_work); |
690 | mutex_init(&sg_policy->work_lock); | |
691 | ||
02a7b1ee VK |
692 | wake_up_process(thread); |
693 | ||
694 | return 0; | |
695 | } | |
696 | ||
697 | static void sugov_kthread_stop(struct sugov_policy *sg_policy) | |
698 | { | |
699 | /* kthread only required for slow path */ | |
700 | if (sg_policy->policy->fast_switch_enabled) | |
701 | return; | |
702 | ||
703 | kthread_flush_worker(&sg_policy->worker); | |
704 | kthread_stop(sg_policy->thread); | |
21ef5729 | 705 | mutex_destroy(&sg_policy->work_lock); |
02a7b1ee VK |
706 | } |
707 | ||
9bdcb44e RW |
708 | static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy) |
709 | { | |
710 | struct sugov_tunables *tunables; | |
711 | ||
712 | tunables = kzalloc(sizeof(*tunables), GFP_KERNEL); | |
713 | if (tunables) { | |
714 | gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook); | |
715 | if (!have_governor_per_policy()) | |
716 | global_tunables = tunables; | |
717 | } | |
718 | return tunables; | |
719 | } | |
720 | ||
721 | static void sugov_tunables_free(struct sugov_tunables *tunables) | |
722 | { | |
723 | if (!have_governor_per_policy()) | |
724 | global_tunables = NULL; | |
725 | ||
726 | kfree(tunables); | |
727 | } | |
728 | ||
729 | static int sugov_init(struct cpufreq_policy *policy) | |
730 | { | |
731 | struct sugov_policy *sg_policy; | |
732 | struct sugov_tunables *tunables; | |
9bdcb44e RW |
733 | int ret = 0; |
734 | ||
735 | /* State should be equivalent to EXIT */ | |
736 | if (policy->governor_data) | |
737 | return -EBUSY; | |
738 | ||
4a71ce43 VK |
739 | cpufreq_enable_fast_switch(policy); |
740 | ||
9bdcb44e | 741 | sg_policy = sugov_policy_alloc(policy); |
4a71ce43 VK |
742 | if (!sg_policy) { |
743 | ret = -ENOMEM; | |
744 | goto disable_fast_switch; | |
745 | } | |
9bdcb44e | 746 | |
02a7b1ee VK |
747 | ret = sugov_kthread_create(sg_policy); |
748 | if (ret) | |
749 | goto free_sg_policy; | |
750 | ||
9bdcb44e RW |
751 | mutex_lock(&global_tunables_lock); |
752 | ||
753 | if (global_tunables) { | |
754 | if (WARN_ON(have_governor_per_policy())) { | |
755 | ret = -EINVAL; | |
02a7b1ee | 756 | goto stop_kthread; |
9bdcb44e RW |
757 | } |
758 | policy->governor_data = sg_policy; | |
759 | sg_policy->tunables = global_tunables; | |
760 | ||
761 | gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook); | |
762 | goto out; | |
763 | } | |
764 | ||
765 | tunables = sugov_tunables_alloc(sg_policy); | |
766 | if (!tunables) { | |
767 | ret = -ENOMEM; | |
02a7b1ee | 768 | goto stop_kthread; |
9bdcb44e RW |
769 | } |
770 | ||
aa7519af | 771 | tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy); |
9bdcb44e RW |
772 | |
773 | policy->governor_data = sg_policy; | |
774 | sg_policy->tunables = tunables; | |
775 | ||
776 | ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype, | |
777 | get_governor_parent_kobj(policy), "%s", | |
778 | schedutil_gov.name); | |
779 | if (ret) | |
780 | goto fail; | |
781 | ||
8e2ddb03 | 782 | out: |
9bdcb44e | 783 | mutex_unlock(&global_tunables_lock); |
9bdcb44e RW |
784 | return 0; |
785 | ||
8e2ddb03 | 786 | fail: |
9a4f26cc | 787 | kobject_put(&tunables->attr_set.kobj); |
9bdcb44e RW |
788 | policy->governor_data = NULL; |
789 | sugov_tunables_free(tunables); | |
790 | ||
02a7b1ee VK |
791 | stop_kthread: |
792 | sugov_kthread_stop(sg_policy); | |
9bdcb44e RW |
793 | mutex_unlock(&global_tunables_lock); |
794 | ||
1b5d43cf | 795 | free_sg_policy: |
9bdcb44e | 796 | sugov_policy_free(sg_policy); |
4a71ce43 VK |
797 | |
798 | disable_fast_switch: | |
799 | cpufreq_disable_fast_switch(policy); | |
800 | ||
60f05e86 | 801 | pr_err("initialization failed (error %d)\n", ret); |
9bdcb44e RW |
802 | return ret; |
803 | } | |
804 | ||
e788892b | 805 | static void sugov_exit(struct cpufreq_policy *policy) |
9bdcb44e RW |
806 | { |
807 | struct sugov_policy *sg_policy = policy->governor_data; | |
808 | struct sugov_tunables *tunables = sg_policy->tunables; | |
809 | unsigned int count; | |
810 | ||
811 | mutex_lock(&global_tunables_lock); | |
812 | ||
813 | count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook); | |
814 | policy->governor_data = NULL; | |
815 | if (!count) | |
816 | sugov_tunables_free(tunables); | |
817 | ||
818 | mutex_unlock(&global_tunables_lock); | |
819 | ||
02a7b1ee | 820 | sugov_kthread_stop(sg_policy); |
9bdcb44e | 821 | sugov_policy_free(sg_policy); |
4a71ce43 | 822 | cpufreq_disable_fast_switch(policy); |
9bdcb44e RW |
823 | } |
824 | ||
825 | static int sugov_start(struct cpufreq_policy *policy) | |
826 | { | |
827 | struct sugov_policy *sg_policy = policy->governor_data; | |
828 | unsigned int cpu; | |
829 | ||
97fb7a0a IM |
830 | sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; |
831 | sg_policy->last_freq_update_time = 0; | |
ecd28842 | 832 | sg_policy->next_freq = 0; |
97fb7a0a IM |
833 | sg_policy->work_in_progress = false; |
834 | sg_policy->need_freq_update = false; | |
835 | sg_policy->cached_raw_freq = 0; | |
9bdcb44e RW |
836 | |
837 | for_each_cpu(cpu, policy->cpus) { | |
838 | struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); | |
839 | ||
4296f23e | 840 | memset(sg_cpu, 0, sizeof(*sg_cpu)); |
97fb7a0a IM |
841 | sg_cpu->cpu = cpu; |
842 | sg_cpu->sg_policy = sg_policy; | |
ab2f7cf1 VM |
843 | } |
844 | ||
845 | for_each_cpu(cpu, policy->cpus) { | |
846 | struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); | |
847 | ||
4296f23e RW |
848 | cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, |
849 | policy_is_shared(policy) ? | |
850 | sugov_update_shared : | |
851 | sugov_update_single); | |
9bdcb44e RW |
852 | } |
853 | return 0; | |
854 | } | |
855 | ||
e788892b | 856 | static void sugov_stop(struct cpufreq_policy *policy) |
9bdcb44e RW |
857 | { |
858 | struct sugov_policy *sg_policy = policy->governor_data; | |
859 | unsigned int cpu; | |
860 | ||
861 | for_each_cpu(cpu, policy->cpus) | |
862 | cpufreq_remove_update_util_hook(cpu); | |
863 | ||
b290ebcf | 864 | synchronize_rcu(); |
9bdcb44e | 865 | |
21ef5729 VK |
866 | if (!policy->fast_switch_enabled) { |
867 | irq_work_sync(&sg_policy->irq_work); | |
868 | kthread_cancel_work_sync(&sg_policy->work); | |
869 | } | |
9bdcb44e RW |
870 | } |
871 | ||
e788892b | 872 | static void sugov_limits(struct cpufreq_policy *policy) |
9bdcb44e RW |
873 | { |
874 | struct sugov_policy *sg_policy = policy->governor_data; | |
875 | ||
876 | if (!policy->fast_switch_enabled) { | |
877 | mutex_lock(&sg_policy->work_lock); | |
bf2be2de | 878 | cpufreq_policy_apply_limits(policy); |
9bdcb44e RW |
879 | mutex_unlock(&sg_policy->work_lock); |
880 | } | |
881 | ||
882 | sg_policy->need_freq_update = true; | |
9bdcb44e RW |
883 | } |
884 | ||
531b5c9f | 885 | struct cpufreq_governor schedutil_gov = { |
97fb7a0a IM |
886 | .name = "schedutil", |
887 | .owner = THIS_MODULE, | |
888 | .dynamic_switching = true, | |
889 | .init = sugov_init, | |
890 | .exit = sugov_exit, | |
891 | .start = sugov_start, | |
892 | .stop = sugov_stop, | |
893 | .limits = sugov_limits, | |
9bdcb44e RW |
894 | }; |
895 | ||
9bdcb44e RW |
896 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL |
897 | struct cpufreq_governor *cpufreq_default_governor(void) | |
898 | { | |
899 | return &schedutil_gov; | |
900 | } | |
9bdcb44e | 901 | #endif |
58919e83 RW |
902 | |
903 | static int __init sugov_register(void) | |
904 | { | |
905 | return cpufreq_register_governor(&schedutil_gov); | |
906 | } | |
907 | fs_initcall(sugov_register); | |
531b5c9f QP |
908 | |
909 | #ifdef CONFIG_ENERGY_MODEL | |
910 | extern bool sched_energy_update; | |
911 | extern struct mutex sched_energy_mutex; | |
912 | ||
913 | static void rebuild_sd_workfn(struct work_struct *work) | |
914 | { | |
915 | mutex_lock(&sched_energy_mutex); | |
916 | sched_energy_update = true; | |
917 | rebuild_sched_domains(); | |
918 | sched_energy_update = false; | |
919 | mutex_unlock(&sched_energy_mutex); | |
920 | } | |
921 | static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn); | |
922 | ||
923 | /* | |
924 | * EAS shouldn't be attempted without sugov, so rebuild the sched_domains | |
925 | * on governor changes to make sure the scheduler knows about it. | |
926 | */ | |
927 | void sched_cpufreq_governor_change(struct cpufreq_policy *policy, | |
928 | struct cpufreq_governor *old_gov) | |
929 | { | |
930 | if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) { | |
931 | /* | |
932 | * When called from the cpufreq_register_driver() path, the | |
933 | * cpu_hotplug_lock is already held, so use a work item to | |
934 | * avoid nested locking in rebuild_sched_domains(). | |
935 | */ | |
936 | schedule_work(&rebuild_sd_work); | |
937 | } | |
938 | ||
939 | } | |
940 | #endif |