ALSA: pcm: Fix missing check of the new non-cached buffer type
[linux-2.6-block.git] / kernel / sched / cpufreq_schedutil.c
CommitLineData
108c35a9 1// SPDX-License-Identifier: GPL-2.0
9bdcb44e
RW
2/*
3 * CPUFreq governor based on scheduler-provided CPU utilization data.
4 *
5 * Copyright (C) 2016, Intel Corporation
6 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
9bdcb44e
RW
7 */
8
60f05e86
VK
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
9bdcb44e
RW
11#include "sched.h"
12
938e5e4b 13#include <linux/sched/cpufreq.h>
325ea10c
IM
14#include <trace/events/power.h>
15
9eca544b
RW
16#define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
17
9bdcb44e 18struct sugov_tunables {
97fb7a0a
IM
19 struct gov_attr_set attr_set;
20 unsigned int rate_limit_us;
9bdcb44e
RW
21};
22
23struct sugov_policy {
97fb7a0a
IM
24 struct cpufreq_policy *policy;
25
26 struct sugov_tunables *tunables;
27 struct list_head tunables_hook;
28
29 raw_spinlock_t update_lock; /* For shared policies */
30 u64 last_freq_update_time;
31 s64 freq_update_delay_ns;
32 unsigned int next_freq;
33 unsigned int cached_raw_freq;
34
35 /* The next fields are only needed if fast switch cannot be used: */
36 struct irq_work irq_work;
37 struct kthread_work work;
38 struct mutex work_lock;
39 struct kthread_worker worker;
40 struct task_struct *thread;
41 bool work_in_progress;
42
600f5bad 43 bool limits_changed;
97fb7a0a 44 bool need_freq_update;
9bdcb44e
RW
45};
46
47struct sugov_cpu {
97fb7a0a
IM
48 struct update_util_data update_util;
49 struct sugov_policy *sg_policy;
50 unsigned int cpu;
9bdcb44e 51
97fb7a0a
IM
52 bool iowait_boost_pending;
53 unsigned int iowait_boost;
fd7d5287 54 u64 last_update;
5cbea469 55
8cc90515 56 unsigned long bw_dl;
97fb7a0a 57 unsigned long max;
b7eaf1aa 58
97fb7a0a 59 /* The field below is for single-CPU policies only: */
b7eaf1aa 60#ifdef CONFIG_NO_HZ_COMMON
97fb7a0a 61 unsigned long saved_idle_calls;
b7eaf1aa 62#endif
9bdcb44e
RW
63};
64
65static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
66
67/************************ Governor internals ***********************/
68
69static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
70{
71 s64 delta_ns;
72
674e7541
VK
73 /*
74 * Since cpufreq_update_util() is called with rq->lock held for
97fb7a0a 75 * the @target_cpu, our per-CPU data is fully serialized.
674e7541 76 *
97fb7a0a 77 * However, drivers cannot in general deal with cross-CPU
674e7541 78 * requests, so while get_next_freq() will work, our
c49cbc19 79 * sugov_update_commit() call may not for the fast switching platforms.
674e7541
VK
80 *
81 * Hence stop here for remote requests if they aren't supported
82 * by the hardware, as calculating the frequency is pointless if
83 * we cannot in fact act on it.
c49cbc19
VK
84 *
85 * For the slow switching platforms, the kthread is always scheduled on
86 * the right set of CPUs and any CPU can find the next frequency and
87 * schedule the kthread.
674e7541 88 */
c49cbc19 89 if (sg_policy->policy->fast_switch_enabled &&
03639978 90 !cpufreq_this_cpu_can_update(sg_policy->policy))
674e7541
VK
91 return false;
92
600f5bad
VK
93 if (unlikely(sg_policy->limits_changed)) {
94 sg_policy->limits_changed = false;
95 sg_policy->need_freq_update = true;
9bdcb44e 96 return true;
600f5bad 97 }
9bdcb44e
RW
98
99 delta_ns = time - sg_policy->last_freq_update_time;
97fb7a0a 100
9bdcb44e
RW
101 return delta_ns >= sg_policy->freq_update_delay_ns;
102}
103
a61dec74
RW
104static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
105 unsigned int next_freq)
9bdcb44e 106{
38d4ea22 107 if (sg_policy->next_freq == next_freq)
a61dec74 108 return false;
38d4ea22
RW
109
110 sg_policy->next_freq = next_freq;
9bdcb44e
RW
111 sg_policy->last_freq_update_time = time;
112
a61dec74
RW
113 return true;
114}
9bdcb44e 115
a61dec74
RW
116static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
117 unsigned int next_freq)
118{
119 struct cpufreq_policy *policy = sg_policy->policy;
77c84dd1 120 int cpu;
a61dec74
RW
121
122 if (!sugov_update_next_freq(sg_policy, time, next_freq))
123 return;
124
125 next_freq = cpufreq_driver_fast_switch(policy, next_freq);
126 if (!next_freq)
127 return;
9bdcb44e 128
a61dec74 129 policy->cur = next_freq;
77c84dd1
DR
130
131 if (trace_cpu_frequency_enabled()) {
132 for_each_cpu(cpu, policy->cpus)
133 trace_cpu_frequency(next_freq, cpu);
134 }
a61dec74
RW
135}
136
137static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
138 unsigned int next_freq)
139{
140 if (!sugov_update_next_freq(sg_policy, time, next_freq))
141 return;
142
143 if (!sg_policy->work_in_progress) {
9bdcb44e
RW
144 sg_policy->work_in_progress = true;
145 irq_work_queue(&sg_policy->irq_work);
146 }
147}
148
149/**
150 * get_next_freq - Compute a new frequency for a given cpufreq policy.
655cb1eb 151 * @sg_policy: schedutil policy object to compute the new frequency for.
9bdcb44e
RW
152 * @util: Current CPU utilization.
153 * @max: CPU capacity.
154 *
155 * If the utilization is frequency-invariant, choose the new frequency to be
156 * proportional to it, that is
157 *
158 * next_freq = C * max_freq * util / max
159 *
160 * Otherwise, approximate the would-be frequency-invariant utilization by
161 * util_raw * (curr_freq / max_freq) which leads to
162 *
163 * next_freq = C * curr_freq * util_raw / max
164 *
165 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
5cbea469
SM
166 *
167 * The lowest driver-supported frequency which is equal or greater than the raw
168 * next_freq (as calculated above) is returned, subject to policy min/max and
169 * cpufreq driver limitations.
9bdcb44e 170 */
655cb1eb
VK
171static unsigned int get_next_freq(struct sugov_policy *sg_policy,
172 unsigned long util, unsigned long max)
9bdcb44e 173{
5cbea469 174 struct cpufreq_policy *policy = sg_policy->policy;
9bdcb44e
RW
175 unsigned int freq = arch_scale_freq_invariant() ?
176 policy->cpuinfo.max_freq : policy->cur;
177
938e5e4b 178 freq = map_util_freq(util, freq, max);
5cbea469 179
ecd28842 180 if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
5cbea469 181 return sg_policy->next_freq;
ecd28842
VK
182
183 sg_policy->need_freq_update = false;
6c4f0fa6 184 sg_policy->cached_raw_freq = freq;
5cbea469 185 return cpufreq_driver_resolve_freq(policy, freq);
9bdcb44e
RW
186}
187
45f5519e
PZ
188/*
189 * This function computes an effective utilization for the given CPU, to be
190 * used for frequency selection given the linear relation: f = u * f_max.
191 *
192 * The scheduler tracks the following metrics:
193 *
194 * cpu_util_{cfs,rt,dl,irq}()
195 * cpu_bw_dl()
196 *
197 * Where the cfs,rt and dl util numbers are tracked with the same metric and
198 * synchronized windows and are thus directly comparable.
199 *
200 * The cfs,rt,dl utilization are the running times measured with rq->clock_task
201 * which excludes things like IRQ and steal-time. These latter are then accrued
202 * in the irq utilization.
203 *
204 * The DL bandwidth number otoh is not a measured metric but a value computed
205 * based on the task model parameters and gives the minimal utilization
206 * required to meet deadlines.
207 */
af24bde8
PB
208unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
209 unsigned long max, enum schedutil_type type,
210 struct task_struct *p)
58919e83 211{
938e5e4b
QP
212 unsigned long dl_util, util, irq;
213 struct rq *rq = cpu_rq(cpu);
8f111bc3 214
982d9cdc
PB
215 if (!IS_BUILTIN(CONFIG_UCLAMP_TASK) &&
216 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
dfa444dc 217 return max;
982d9cdc 218 }
dfa444dc 219
45f5519e
PZ
220 /*
221 * Early check to see if IRQ/steal time saturates the CPU, can be
222 * because of inaccuracies in how we track these -- see
223 * update_irq_load_avg().
224 */
dfa444dc 225 irq = cpu_util_irq(rq);
dfa444dc 226 if (unlikely(irq >= max))
9033ea11
VG
227 return max;
228
45f5519e
PZ
229 /*
230 * Because the time spend on RT/DL tasks is visible as 'lost' time to
231 * CFS tasks and we use the same metric to track the effective
232 * utilization (PELT windows are synchronized) we can directly add them
233 * to obtain the CPU's actual utilization.
982d9cdc
PB
234 *
235 * CFS and RT utilization can be boosted or capped, depending on
236 * utilization clamp constraints requested by currently RUNNABLE
237 * tasks.
238 * When there are no CFS RUNNABLE tasks, clamps are released and
239 * frequency will be gracefully reduced with the utilization decay.
45f5519e 240 */
982d9cdc
PB
241 util = util_cfs + cpu_util_rt(rq);
242 if (type == FREQUENCY_UTIL)
af24bde8 243 util = uclamp_util_with(rq, util, p);
3ae117c6 244
938e5e4b
QP
245 dl_util = cpu_util_dl(rq);
246
9033ea11 247 /*
938e5e4b
QP
248 * For frequency selection we do not make cpu_util_dl() a permanent part
249 * of this sum because we want to use cpu_bw_dl() later on, but we need
250 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
251 * that we select f_max when there is no idle time.
45f5519e
PZ
252 *
253 * NOTE: numerical errors or stop class might cause us to not quite hit
254 * saturation when we should -- something for later.
9033ea11 255 */
938e5e4b 256 if (util + dl_util >= max)
9033ea11 257 return max;
8cc90515 258
938e5e4b
QP
259 /*
260 * OTOH, for energy computation we need the estimated running time, so
261 * include util_dl and ignore dl_bw.
262 */
263 if (type == ENERGY_UTIL)
264 util += dl_util;
265
d4edd662 266 /*
45f5519e
PZ
267 * There is still idle time; further improve the number by using the
268 * irq metric. Because IRQ/steal time is hidden from the task clock we
269 * need to scale the task numbers:
8cc90515 270 *
5c3ceef9
QY
271 * max - irq
272 * U' = irq + --------- * U
273 * max
45f5519e 274 */
2e62c474 275 util = scale_irq_capacity(util, irq, max);
45f5519e
PZ
276 util += irq;
277
278 /*
8cc90515
VG
279 * Bandwidth required by DEADLINE must always be granted while, for
280 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
281 * to gracefully reduce the frequency when no tasks show up for longer
8ecf04e1
PB
282 * periods of time.
283 *
45f5519e
PZ
284 * Ideally we would like to set bw_dl as min/guaranteed freq and util +
285 * bw_dl as requested freq. However, cpufreq is not yet ready for such
286 * an interface. So, we only do the latter for now.
d4edd662 287 */
938e5e4b
QP
288 if (type == FREQUENCY_UTIL)
289 util += cpu_bw_dl(rq);
290
291 return min(max, util);
292}
293
294static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
295{
296 struct rq *rq = cpu_rq(sg_cpu->cpu);
297 unsigned long util = cpu_util_cfs(rq);
8ec59c0f 298 unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
938e5e4b
QP
299
300 sg_cpu->max = max;
301 sg_cpu->bw_dl = cpu_bw_dl(rq);
302
af24bde8 303 return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL);
58919e83
RW
304}
305
fd7d5287
PB
306/**
307 * sugov_iowait_reset() - Reset the IO boost status of a CPU.
308 * @sg_cpu: the sugov data for the CPU to boost
309 * @time: the update time from the caller
310 * @set_iowait_boost: true if an IO boost has been requested
311 *
312 * The IO wait boost of a task is disabled after a tick since the last update
313 * of a CPU. If a new IO wait boost is requested after more then a tick, then
9eca544b
RW
314 * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
315 * efficiency by ignoring sporadic wakeups from IO.
fd7d5287
PB
316 */
317static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
318 bool set_iowait_boost)
21ca6d2c 319{
fd7d5287 320 s64 delta_ns = time - sg_cpu->last_update;
a5a0809b 321
fd7d5287
PB
322 /* Reset boost only if a tick has elapsed since last request */
323 if (delta_ns <= TICK_NSEC)
324 return false;
a5a0809b 325
9eca544b 326 sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
fd7d5287 327 sg_cpu->iowait_boost_pending = set_iowait_boost;
21ca6d2c 328
fd7d5287
PB
329 return true;
330}
a5a0809b 331
fd7d5287
PB
332/**
333 * sugov_iowait_boost() - Updates the IO boost status of a CPU.
334 * @sg_cpu: the sugov data for the CPU to boost
335 * @time: the update time from the caller
336 * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
337 *
338 * Each time a task wakes up after an IO operation, the CPU utilization can be
339 * boosted to a certain utilization which doubles at each "frequent and
9eca544b
RW
340 * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
341 * of the maximum OPP.
342 *
fd7d5287
PB
343 * To keep doubling, an IO boost has to be requested at least once per tick,
344 * otherwise we restart from the utilization of the minimum OPP.
345 */
346static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
347 unsigned int flags)
348{
349 bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
350
351 /* Reset boost if the CPU appears to have been idle enough */
352 if (sg_cpu->iowait_boost &&
353 sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
354 return;
355
356 /* Boost only tasks waking up after IO */
357 if (!set_iowait_boost)
358 return;
359
360 /* Ensure boost doubles only one time at each request */
361 if (sg_cpu->iowait_boost_pending)
362 return;
363 sg_cpu->iowait_boost_pending = true;
364
365 /* Double the boost at each request */
366 if (sg_cpu->iowait_boost) {
a23314e9
PZ
367 sg_cpu->iowait_boost =
368 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
fd7d5287 369 return;
21ca6d2c 370 }
fd7d5287
PB
371
372 /* First wakeup after IO: start with minimum boost */
9eca544b 373 sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
21ca6d2c
RW
374}
375
fd7d5287
PB
376/**
377 * sugov_iowait_apply() - Apply the IO boost to a CPU.
378 * @sg_cpu: the sugov data for the cpu to boost
379 * @time: the update time from the caller
380 * @util: the utilization to (eventually) boost
381 * @max: the maximum value the utilization can be boosted to
382 *
383 * A CPU running a task which woken up after an IO operation can have its
384 * utilization boosted to speed up the completion of those IO operations.
385 * The IO boost value is increased each time a task wakes up from IO, in
386 * sugov_iowait_apply(), and it's instead decreased by this function,
387 * each time an increase has not been requested (!iowait_boost_pending).
388 *
389 * A CPU which also appears to have been idle for at least one tick has also
390 * its IO boost utilization reset.
391 *
392 * This mechanism is designed to boost high frequently IO waiting tasks, while
393 * being more conservative on tasks which does sporadic IO operations.
394 */
a23314e9
PZ
395static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
396 unsigned long util, unsigned long max)
21ca6d2c 397{
a23314e9 398 unsigned long boost;
21ca6d2c 399
fd7d5287 400 /* No boost currently required */
a5a0809b 401 if (!sg_cpu->iowait_boost)
a23314e9 402 return util;
21ca6d2c 403
fd7d5287
PB
404 /* Reset boost if the CPU appears to have been idle enough */
405 if (sugov_iowait_reset(sg_cpu, time, false))
a23314e9 406 return util;
fd7d5287 407
a23314e9 408 if (!sg_cpu->iowait_boost_pending) {
fd7d5287 409 /*
a23314e9 410 * No boost pending; reduce the boost value.
fd7d5287 411 */
a5a0809b 412 sg_cpu->iowait_boost >>= 1;
9eca544b 413 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
a5a0809b 414 sg_cpu->iowait_boost = 0;
a23314e9 415 return util;
a5a0809b
JF
416 }
417 }
418
a23314e9
PZ
419 sg_cpu->iowait_boost_pending = false;
420
fd7d5287 421 /*
a23314e9
PZ
422 * @util is already in capacity scale; convert iowait_boost
423 * into the same scale so we can compare.
fd7d5287 424 */
a23314e9
PZ
425 boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
426 return max(boost, util);
21ca6d2c
RW
427}
428
b7eaf1aa
RW
429#ifdef CONFIG_NO_HZ_COMMON
430static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
431{
466a2b42 432 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
b7eaf1aa
RW
433 bool ret = idle_calls == sg_cpu->saved_idle_calls;
434
435 sg_cpu->saved_idle_calls = idle_calls;
436 return ret;
437}
438#else
439static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
440#endif /* CONFIG_NO_HZ_COMMON */
441
e97a90f7
CS
442/*
443 * Make sugov_should_update_freq() ignore the rate limit when DL
444 * has increased the utilization.
445 */
446static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
447{
8cc90515 448 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
600f5bad 449 sg_policy->limits_changed = true;
e97a90f7
CS
450}
451
9bdcb44e 452static void sugov_update_single(struct update_util_data *hook, u64 time,
58919e83 453 unsigned int flags)
9bdcb44e
RW
454{
455 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
456 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
58919e83 457 unsigned long util, max;
9bdcb44e 458 unsigned int next_f;
b7eaf1aa 459 bool busy;
9bdcb44e 460
fd7d5287 461 sugov_iowait_boost(sg_cpu, time, flags);
21ca6d2c
RW
462 sg_cpu->last_update = time;
463
e97a90f7
CS
464 ignore_dl_rate_limit(sg_cpu, sg_policy);
465
9bdcb44e
RW
466 if (!sugov_should_update_freq(sg_policy, time))
467 return;
468
600f5bad
VK
469 /* Limits may have changed, don't skip frequency update */
470 busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
b7eaf1aa 471
dfa444dc 472 util = sugov_get_util(sg_cpu);
8f111bc3 473 max = sg_cpu->max;
a23314e9 474 util = sugov_iowait_apply(sg_cpu, time, util, max);
8f111bc3
PZ
475 next_f = get_next_freq(sg_policy, util, max);
476 /*
477 * Do not reduce the frequency if the CPU has not been idle
478 * recently, as the reduction is likely to be premature then.
479 */
ecd28842 480 if (busy && next_f < sg_policy->next_freq) {
8f111bc3 481 next_f = sg_policy->next_freq;
07458f6a 482
8f111bc3
PZ
483 /* Reset cached freq as next_freq has changed */
484 sg_policy->cached_raw_freq = 0;
58919e83 485 }
8f111bc3 486
a61dec74
RW
487 /*
488 * This code runs under rq->lock for the target CPU, so it won't run
489 * concurrently on two different CPUs for the same target and it is not
490 * necessary to acquire the lock in the fast switch case.
491 */
492 if (sg_policy->policy->fast_switch_enabled) {
493 sugov_fast_switch(sg_policy, time, next_f);
494 } else {
495 raw_spin_lock(&sg_policy->update_lock);
496 sugov_deferred_update(sg_policy, time, next_f);
497 raw_spin_unlock(&sg_policy->update_lock);
498 }
9bdcb44e
RW
499}
500
d86ab9cf 501static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
9bdcb44e 502{
5cbea469 503 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
9bdcb44e 504 struct cpufreq_policy *policy = sg_policy->policy;
cba1dfb5 505 unsigned long util = 0, max = 1;
9bdcb44e
RW
506 unsigned int j;
507
9bdcb44e 508 for_each_cpu(j, policy->cpus) {
cba1dfb5 509 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
9bdcb44e 510 unsigned long j_util, j_max;
9bdcb44e 511
dfa444dc 512 j_util = sugov_get_util(j_sg_cpu);
9bdcb44e 513 j_max = j_sg_cpu->max;
a23314e9 514 j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
fd7d5287 515
9bdcb44e
RW
516 if (j_util * max > j_max * util) {
517 util = j_util;
518 max = j_max;
519 }
520 }
521
655cb1eb 522 return get_next_freq(sg_policy, util, max);
9bdcb44e
RW
523}
524
97fb7a0a
IM
525static void
526sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
9bdcb44e
RW
527{
528 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
529 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
530 unsigned int next_f;
531
532 raw_spin_lock(&sg_policy->update_lock);
533
fd7d5287 534 sugov_iowait_boost(sg_cpu, time, flags);
9bdcb44e
RW
535 sg_cpu->last_update = time;
536
e97a90f7 537 ignore_dl_rate_limit(sg_cpu, sg_policy);
cba1dfb5 538
9bdcb44e 539 if (sugov_should_update_freq(sg_policy, time)) {
8f111bc3 540 next_f = sugov_next_freq_shared(sg_cpu, time);
a61dec74
RW
541
542 if (sg_policy->policy->fast_switch_enabled)
543 sugov_fast_switch(sg_policy, time, next_f);
544 else
545 sugov_deferred_update(sg_policy, time, next_f);
9bdcb44e
RW
546 }
547
548 raw_spin_unlock(&sg_policy->update_lock);
549}
550
02a7b1ee 551static void sugov_work(struct kthread_work *work)
9bdcb44e
RW
552{
553 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
152db033
JFG
554 unsigned int freq;
555 unsigned long flags;
556
557 /*
558 * Hold sg_policy->update_lock shortly to handle the case where:
559 * incase sg_policy->next_freq is read here, and then updated by
a61dec74 560 * sugov_deferred_update() just before work_in_progress is set to false
152db033
JFG
561 * here, we may miss queueing the new update.
562 *
563 * Note: If a work was queued after the update_lock is released,
a61dec74 564 * sugov_work() will just be called again by kthread_work code; and the
152db033
JFG
565 * request will be proceed before the sugov thread sleeps.
566 */
567 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
568 freq = sg_policy->next_freq;
569 sg_policy->work_in_progress = false;
570 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
9bdcb44e
RW
571
572 mutex_lock(&sg_policy->work_lock);
152db033 573 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
9bdcb44e 574 mutex_unlock(&sg_policy->work_lock);
9bdcb44e
RW
575}
576
577static void sugov_irq_work(struct irq_work *irq_work)
578{
579 struct sugov_policy *sg_policy;
580
581 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
02a7b1ee 582
02a7b1ee 583 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
9bdcb44e
RW
584}
585
586/************************** sysfs interface ************************/
587
588static struct sugov_tunables *global_tunables;
589static DEFINE_MUTEX(global_tunables_lock);
590
591static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
592{
593 return container_of(attr_set, struct sugov_tunables, attr_set);
594}
595
596static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
597{
598 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
599
600 return sprintf(buf, "%u\n", tunables->rate_limit_us);
601}
602
97fb7a0a
IM
603static ssize_t
604rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
9bdcb44e
RW
605{
606 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
607 struct sugov_policy *sg_policy;
608 unsigned int rate_limit_us;
609
610 if (kstrtouint(buf, 10, &rate_limit_us))
611 return -EINVAL;
612
613 tunables->rate_limit_us = rate_limit_us;
614
615 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
616 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
617
618 return count;
619}
620
621static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
622
9782adeb 623static struct attribute *sugov_attrs[] = {
9bdcb44e
RW
624 &rate_limit_us.attr,
625 NULL
626};
9782adeb 627ATTRIBUTE_GROUPS(sugov);
9bdcb44e
RW
628
629static struct kobj_type sugov_tunables_ktype = {
9782adeb 630 .default_groups = sugov_groups,
9bdcb44e
RW
631 .sysfs_ops = &governor_sysfs_ops,
632};
633
634/********************** cpufreq governor interface *********************/
635
531b5c9f 636struct cpufreq_governor schedutil_gov;
9bdcb44e
RW
637
638static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
639{
640 struct sugov_policy *sg_policy;
641
642 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
643 if (!sg_policy)
644 return NULL;
645
646 sg_policy->policy = policy;
9bdcb44e
RW
647 raw_spin_lock_init(&sg_policy->update_lock);
648 return sg_policy;
649}
650
651static void sugov_policy_free(struct sugov_policy *sg_policy)
652{
9bdcb44e
RW
653 kfree(sg_policy);
654}
655
02a7b1ee
VK
656static int sugov_kthread_create(struct sugov_policy *sg_policy)
657{
658 struct task_struct *thread;
794a56eb 659 struct sched_attr attr = {
97fb7a0a
IM
660 .size = sizeof(struct sched_attr),
661 .sched_policy = SCHED_DEADLINE,
662 .sched_flags = SCHED_FLAG_SUGOV,
663 .sched_nice = 0,
664 .sched_priority = 0,
794a56eb
JL
665 /*
666 * Fake (unused) bandwidth; workaround to "fix"
667 * priority inheritance.
668 */
669 .sched_runtime = 1000000,
670 .sched_deadline = 10000000,
671 .sched_period = 10000000,
672 };
02a7b1ee
VK
673 struct cpufreq_policy *policy = sg_policy->policy;
674 int ret;
675
676 /* kthread only required for slow path */
677 if (policy->fast_switch_enabled)
678 return 0;
679
680 kthread_init_work(&sg_policy->work, sugov_work);
681 kthread_init_worker(&sg_policy->worker);
682 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
683 "sugov:%d",
684 cpumask_first(policy->related_cpus));
685 if (IS_ERR(thread)) {
686 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
687 return PTR_ERR(thread);
688 }
689
794a56eb 690 ret = sched_setattr_nocheck(thread, &attr);
02a7b1ee
VK
691 if (ret) {
692 kthread_stop(thread);
794a56eb 693 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
02a7b1ee
VK
694 return ret;
695 }
696
697 sg_policy->thread = thread;
1b04722c 698 kthread_bind_mask(thread, policy->related_cpus);
21ef5729
VK
699 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
700 mutex_init(&sg_policy->work_lock);
701
02a7b1ee
VK
702 wake_up_process(thread);
703
704 return 0;
705}
706
707static void sugov_kthread_stop(struct sugov_policy *sg_policy)
708{
709 /* kthread only required for slow path */
710 if (sg_policy->policy->fast_switch_enabled)
711 return;
712
713 kthread_flush_worker(&sg_policy->worker);
714 kthread_stop(sg_policy->thread);
21ef5729 715 mutex_destroy(&sg_policy->work_lock);
02a7b1ee
VK
716}
717
9bdcb44e
RW
718static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
719{
720 struct sugov_tunables *tunables;
721
722 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
723 if (tunables) {
724 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
725 if (!have_governor_per_policy())
726 global_tunables = tunables;
727 }
728 return tunables;
729}
730
731static void sugov_tunables_free(struct sugov_tunables *tunables)
732{
733 if (!have_governor_per_policy())
734 global_tunables = NULL;
735
736 kfree(tunables);
737}
738
739static int sugov_init(struct cpufreq_policy *policy)
740{
741 struct sugov_policy *sg_policy;
742 struct sugov_tunables *tunables;
9bdcb44e
RW
743 int ret = 0;
744
745 /* State should be equivalent to EXIT */
746 if (policy->governor_data)
747 return -EBUSY;
748
4a71ce43
VK
749 cpufreq_enable_fast_switch(policy);
750
9bdcb44e 751 sg_policy = sugov_policy_alloc(policy);
4a71ce43
VK
752 if (!sg_policy) {
753 ret = -ENOMEM;
754 goto disable_fast_switch;
755 }
9bdcb44e 756
02a7b1ee
VK
757 ret = sugov_kthread_create(sg_policy);
758 if (ret)
759 goto free_sg_policy;
760
9bdcb44e
RW
761 mutex_lock(&global_tunables_lock);
762
763 if (global_tunables) {
764 if (WARN_ON(have_governor_per_policy())) {
765 ret = -EINVAL;
02a7b1ee 766 goto stop_kthread;
9bdcb44e
RW
767 }
768 policy->governor_data = sg_policy;
769 sg_policy->tunables = global_tunables;
770
771 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
772 goto out;
773 }
774
775 tunables = sugov_tunables_alloc(sg_policy);
776 if (!tunables) {
777 ret = -ENOMEM;
02a7b1ee 778 goto stop_kthread;
9bdcb44e
RW
779 }
780
aa7519af 781 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
9bdcb44e
RW
782
783 policy->governor_data = sg_policy;
784 sg_policy->tunables = tunables;
785
786 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
787 get_governor_parent_kobj(policy), "%s",
788 schedutil_gov.name);
789 if (ret)
790 goto fail;
791
8e2ddb03 792out:
9bdcb44e 793 mutex_unlock(&global_tunables_lock);
9bdcb44e
RW
794 return 0;
795
8e2ddb03 796fail:
9a4f26cc 797 kobject_put(&tunables->attr_set.kobj);
9bdcb44e
RW
798 policy->governor_data = NULL;
799 sugov_tunables_free(tunables);
800
02a7b1ee
VK
801stop_kthread:
802 sugov_kthread_stop(sg_policy);
9bdcb44e
RW
803 mutex_unlock(&global_tunables_lock);
804
1b5d43cf 805free_sg_policy:
9bdcb44e 806 sugov_policy_free(sg_policy);
4a71ce43
VK
807
808disable_fast_switch:
809 cpufreq_disable_fast_switch(policy);
810
60f05e86 811 pr_err("initialization failed (error %d)\n", ret);
9bdcb44e
RW
812 return ret;
813}
814
e788892b 815static void sugov_exit(struct cpufreq_policy *policy)
9bdcb44e
RW
816{
817 struct sugov_policy *sg_policy = policy->governor_data;
818 struct sugov_tunables *tunables = sg_policy->tunables;
819 unsigned int count;
820
821 mutex_lock(&global_tunables_lock);
822
823 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
824 policy->governor_data = NULL;
825 if (!count)
826 sugov_tunables_free(tunables);
827
828 mutex_unlock(&global_tunables_lock);
829
02a7b1ee 830 sugov_kthread_stop(sg_policy);
9bdcb44e 831 sugov_policy_free(sg_policy);
4a71ce43 832 cpufreq_disable_fast_switch(policy);
9bdcb44e
RW
833}
834
835static int sugov_start(struct cpufreq_policy *policy)
836{
837 struct sugov_policy *sg_policy = policy->governor_data;
838 unsigned int cpu;
839
97fb7a0a
IM
840 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
841 sg_policy->last_freq_update_time = 0;
ecd28842 842 sg_policy->next_freq = 0;
97fb7a0a 843 sg_policy->work_in_progress = false;
600f5bad 844 sg_policy->limits_changed = false;
97fb7a0a
IM
845 sg_policy->need_freq_update = false;
846 sg_policy->cached_raw_freq = 0;
9bdcb44e
RW
847
848 for_each_cpu(cpu, policy->cpus) {
849 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
850
4296f23e 851 memset(sg_cpu, 0, sizeof(*sg_cpu));
97fb7a0a
IM
852 sg_cpu->cpu = cpu;
853 sg_cpu->sg_policy = sg_policy;
ab2f7cf1
VM
854 }
855
856 for_each_cpu(cpu, policy->cpus) {
857 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
858
4296f23e
RW
859 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
860 policy_is_shared(policy) ?
861 sugov_update_shared :
862 sugov_update_single);
9bdcb44e
RW
863 }
864 return 0;
865}
866
e788892b 867static void sugov_stop(struct cpufreq_policy *policy)
9bdcb44e
RW
868{
869 struct sugov_policy *sg_policy = policy->governor_data;
870 unsigned int cpu;
871
872 for_each_cpu(cpu, policy->cpus)
873 cpufreq_remove_update_util_hook(cpu);
874
b290ebcf 875 synchronize_rcu();
9bdcb44e 876
21ef5729
VK
877 if (!policy->fast_switch_enabled) {
878 irq_work_sync(&sg_policy->irq_work);
879 kthread_cancel_work_sync(&sg_policy->work);
880 }
9bdcb44e
RW
881}
882
e788892b 883static void sugov_limits(struct cpufreq_policy *policy)
9bdcb44e
RW
884{
885 struct sugov_policy *sg_policy = policy->governor_data;
886
887 if (!policy->fast_switch_enabled) {
888 mutex_lock(&sg_policy->work_lock);
bf2be2de 889 cpufreq_policy_apply_limits(policy);
9bdcb44e
RW
890 mutex_unlock(&sg_policy->work_lock);
891 }
892
600f5bad 893 sg_policy->limits_changed = true;
9bdcb44e
RW
894}
895
531b5c9f 896struct cpufreq_governor schedutil_gov = {
97fb7a0a
IM
897 .name = "schedutil",
898 .owner = THIS_MODULE,
899 .dynamic_switching = true,
900 .init = sugov_init,
901 .exit = sugov_exit,
902 .start = sugov_start,
903 .stop = sugov_stop,
904 .limits = sugov_limits,
9bdcb44e
RW
905};
906
9bdcb44e
RW
907#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
908struct cpufreq_governor *cpufreq_default_governor(void)
909{
910 return &schedutil_gov;
911}
9bdcb44e 912#endif
58919e83
RW
913
914static int __init sugov_register(void)
915{
916 return cpufreq_register_governor(&schedutil_gov);
917}
918fs_initcall(sugov_register);
531b5c9f
QP
919
920#ifdef CONFIG_ENERGY_MODEL
921extern bool sched_energy_update;
922extern struct mutex sched_energy_mutex;
923
924static void rebuild_sd_workfn(struct work_struct *work)
925{
926 mutex_lock(&sched_energy_mutex);
927 sched_energy_update = true;
928 rebuild_sched_domains();
929 sched_energy_update = false;
930 mutex_unlock(&sched_energy_mutex);
931}
932static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
933
934/*
935 * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
936 * on governor changes to make sure the scheduler knows about it.
937 */
938void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
939 struct cpufreq_governor *old_gov)
940{
941 if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
942 /*
943 * When called from the cpufreq_register_driver() path, the
944 * cpu_hotplug_lock is already held, so use a work item to
945 * avoid nested locking in rebuild_sched_domains().
946 */
947 schedule_work(&rebuild_sd_work);
948 }
949
950}
951#endif