cpufreq: schedutil: make kobj_type structure constant
[linux-block.git] / kernel / sched / cpufreq_schedutil.c
CommitLineData
108c35a9 1// SPDX-License-Identifier: GPL-2.0
9bdcb44e
RW
2/*
3 * CPUFreq governor based on scheduler-provided CPU utilization data.
4 *
5 * Copyright (C) 2016, Intel Corporation
6 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
9bdcb44e
RW
7 */
8
9eca544b
RW
9#define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
10
9bdcb44e 11struct sugov_tunables {
97fb7a0a
IM
12 struct gov_attr_set attr_set;
13 unsigned int rate_limit_us;
9bdcb44e
RW
14};
15
16struct sugov_policy {
97fb7a0a
IM
17 struct cpufreq_policy *policy;
18
19 struct sugov_tunables *tunables;
20 struct list_head tunables_hook;
21
e209cb51 22 raw_spinlock_t update_lock;
97fb7a0a
IM
23 u64 last_freq_update_time;
24 s64 freq_update_delay_ns;
25 unsigned int next_freq;
26 unsigned int cached_raw_freq;
27
28 /* The next fields are only needed if fast switch cannot be used: */
29 struct irq_work irq_work;
30 struct kthread_work work;
31 struct mutex work_lock;
32 struct kthread_worker worker;
33 struct task_struct *thread;
34 bool work_in_progress;
35
600f5bad 36 bool limits_changed;
97fb7a0a 37 bool need_freq_update;
9bdcb44e
RW
38};
39
40struct sugov_cpu {
97fb7a0a
IM
41 struct update_util_data update_util;
42 struct sugov_policy *sg_policy;
43 unsigned int cpu;
9bdcb44e 44
97fb7a0a
IM
45 bool iowait_boost_pending;
46 unsigned int iowait_boost;
fd7d5287 47 u64 last_update;
5cbea469 48
ca6827de 49 unsigned long util;
8cc90515 50 unsigned long bw_dl;
b7eaf1aa 51
97fb7a0a 52 /* The field below is for single-CPU policies only: */
b7eaf1aa 53#ifdef CONFIG_NO_HZ_COMMON
97fb7a0a 54 unsigned long saved_idle_calls;
b7eaf1aa 55#endif
9bdcb44e
RW
56};
57
58static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
59
60/************************ Governor internals ***********************/
61
62static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
63{
64 s64 delta_ns;
65
674e7541
VK
66 /*
67 * Since cpufreq_update_util() is called with rq->lock held for
97fb7a0a 68 * the @target_cpu, our per-CPU data is fully serialized.
674e7541 69 *
97fb7a0a 70 * However, drivers cannot in general deal with cross-CPU
674e7541 71 * requests, so while get_next_freq() will work, our
c49cbc19 72 * sugov_update_commit() call may not for the fast switching platforms.
674e7541
VK
73 *
74 * Hence stop here for remote requests if they aren't supported
75 * by the hardware, as calculating the frequency is pointless if
76 * we cannot in fact act on it.
c49cbc19 77 *
85572c2c
RW
78 * This is needed on the slow switching platforms too to prevent CPUs
79 * going offline from leaving stale IRQ work items behind.
674e7541 80 */
85572c2c 81 if (!cpufreq_this_cpu_can_update(sg_policy->policy))
674e7541
VK
82 return false;
83
600f5bad
VK
84 if (unlikely(sg_policy->limits_changed)) {
85 sg_policy->limits_changed = false;
86 sg_policy->need_freq_update = true;
9bdcb44e 87 return true;
600f5bad 88 }
9bdcb44e
RW
89
90 delta_ns = time - sg_policy->last_freq_update_time;
97fb7a0a 91
9bdcb44e
RW
92 return delta_ns >= sg_policy->freq_update_delay_ns;
93}
94
a61dec74
RW
95static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
96 unsigned int next_freq)
9bdcb44e 97{
90ac908a 98 if (sg_policy->need_freq_update)
23a88185 99 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
90ac908a
RW
100 else if (sg_policy->next_freq == next_freq)
101 return false;
38d4ea22
RW
102
103 sg_policy->next_freq = next_freq;
9bdcb44e
RW
104 sg_policy->last_freq_update_time = time;
105
a61dec74
RW
106 return true;
107}
9bdcb44e 108
389e4ecf 109static void sugov_deferred_update(struct sugov_policy *sg_policy)
a61dec74 110{
a61dec74 111 if (!sg_policy->work_in_progress) {
9bdcb44e
RW
112 sg_policy->work_in_progress = true;
113 irq_work_queue(&sg_policy->irq_work);
114 }
115}
116
117/**
118 * get_next_freq - Compute a new frequency for a given cpufreq policy.
655cb1eb 119 * @sg_policy: schedutil policy object to compute the new frequency for.
9bdcb44e
RW
120 * @util: Current CPU utilization.
121 * @max: CPU capacity.
122 *
123 * If the utilization is frequency-invariant, choose the new frequency to be
124 * proportional to it, that is
125 *
126 * next_freq = C * max_freq * util / max
127 *
128 * Otherwise, approximate the would-be frequency-invariant utilization by
129 * util_raw * (curr_freq / max_freq) which leads to
130 *
131 * next_freq = C * curr_freq * util_raw / max
132 *
133 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
5cbea469
SM
134 *
135 * The lowest driver-supported frequency which is equal or greater than the raw
136 * next_freq (as calculated above) is returned, subject to policy min/max and
137 * cpufreq driver limitations.
9bdcb44e 138 */
655cb1eb
VK
139static unsigned int get_next_freq(struct sugov_policy *sg_policy,
140 unsigned long util, unsigned long max)
9bdcb44e 141{
5cbea469 142 struct cpufreq_policy *policy = sg_policy->policy;
9bdcb44e
RW
143 unsigned int freq = arch_scale_freq_invariant() ?
144 policy->cpuinfo.max_freq : policy->cur;
145
8f1b971b 146 util = map_util_perf(util);
938e5e4b 147 freq = map_util_freq(util, freq, max);
5cbea469 148
ecd28842 149 if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
5cbea469 150 return sg_policy->next_freq;
ecd28842 151
6c4f0fa6 152 sg_policy->cached_raw_freq = freq;
5cbea469 153 return cpufreq_driver_resolve_freq(policy, freq);
9bdcb44e
RW
154}
155
ca6827de 156static void sugov_get_util(struct sugov_cpu *sg_cpu)
938e5e4b
QP
157{
158 struct rq *rq = cpu_rq(sg_cpu->cpu);
938e5e4b 159
938e5e4b 160 sg_cpu->bw_dl = cpu_bw_dl(rq);
bb447999 161 sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu),
ca6827de 162 FREQUENCY_UTIL, NULL);
58919e83
RW
163}
164
fd7d5287
PB
165/**
166 * sugov_iowait_reset() - Reset the IO boost status of a CPU.
167 * @sg_cpu: the sugov data for the CPU to boost
168 * @time: the update time from the caller
169 * @set_iowait_boost: true if an IO boost has been requested
170 *
171 * The IO wait boost of a task is disabled after a tick since the last update
172 * of a CPU. If a new IO wait boost is requested after more then a tick, then
9eca544b
RW
173 * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
174 * efficiency by ignoring sporadic wakeups from IO.
fd7d5287
PB
175 */
176static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
177 bool set_iowait_boost)
21ca6d2c 178{
fd7d5287 179 s64 delta_ns = time - sg_cpu->last_update;
a5a0809b 180
fd7d5287
PB
181 /* Reset boost only if a tick has elapsed since last request */
182 if (delta_ns <= TICK_NSEC)
183 return false;
a5a0809b 184
9eca544b 185 sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
fd7d5287 186 sg_cpu->iowait_boost_pending = set_iowait_boost;
21ca6d2c 187
fd7d5287
PB
188 return true;
189}
a5a0809b 190
fd7d5287
PB
191/**
192 * sugov_iowait_boost() - Updates the IO boost status of a CPU.
193 * @sg_cpu: the sugov data for the CPU to boost
194 * @time: the update time from the caller
195 * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
196 *
197 * Each time a task wakes up after an IO operation, the CPU utilization can be
198 * boosted to a certain utilization which doubles at each "frequent and
9eca544b
RW
199 * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
200 * of the maximum OPP.
201 *
fd7d5287
PB
202 * To keep doubling, an IO boost has to be requested at least once per tick,
203 * otherwise we restart from the utilization of the minimum OPP.
204 */
205static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
206 unsigned int flags)
207{
208 bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
209
210 /* Reset boost if the CPU appears to have been idle enough */
211 if (sg_cpu->iowait_boost &&
212 sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
213 return;
214
215 /* Boost only tasks waking up after IO */
216 if (!set_iowait_boost)
217 return;
218
219 /* Ensure boost doubles only one time at each request */
220 if (sg_cpu->iowait_boost_pending)
221 return;
222 sg_cpu->iowait_boost_pending = true;
223
224 /* Double the boost at each request */
225 if (sg_cpu->iowait_boost) {
a23314e9
PZ
226 sg_cpu->iowait_boost =
227 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
fd7d5287 228 return;
21ca6d2c 229 }
fd7d5287
PB
230
231 /* First wakeup after IO: start with minimum boost */
9eca544b 232 sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
21ca6d2c
RW
233}
234
fd7d5287
PB
235/**
236 * sugov_iowait_apply() - Apply the IO boost to a CPU.
237 * @sg_cpu: the sugov data for the cpu to boost
238 * @time: the update time from the caller
948fb4c4 239 * @max_cap: the max CPU capacity
fd7d5287
PB
240 *
241 * A CPU running a task which woken up after an IO operation can have its
242 * utilization boosted to speed up the completion of those IO operations.
243 * The IO boost value is increased each time a task wakes up from IO, in
244 * sugov_iowait_apply(), and it's instead decreased by this function,
245 * each time an increase has not been requested (!iowait_boost_pending).
246 *
247 * A CPU which also appears to have been idle for at least one tick has also
248 * its IO boost utilization reset.
249 *
250 * This mechanism is designed to boost high frequently IO waiting tasks, while
251 * being more conservative on tasks which does sporadic IO operations.
252 */
948fb4c4
LL
253static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
254 unsigned long max_cap)
21ca6d2c 255{
a23314e9 256 unsigned long boost;
21ca6d2c 257
fd7d5287 258 /* No boost currently required */
a5a0809b 259 if (!sg_cpu->iowait_boost)
ca6827de 260 return;
21ca6d2c 261
fd7d5287
PB
262 /* Reset boost if the CPU appears to have been idle enough */
263 if (sugov_iowait_reset(sg_cpu, time, false))
ca6827de 264 return;
fd7d5287 265
a23314e9 266 if (!sg_cpu->iowait_boost_pending) {
fd7d5287 267 /*
a23314e9 268 * No boost pending; reduce the boost value.
fd7d5287 269 */
a5a0809b 270 sg_cpu->iowait_boost >>= 1;
9eca544b 271 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
a5a0809b 272 sg_cpu->iowait_boost = 0;
ca6827de 273 return;
a5a0809b
JF
274 }
275 }
276
a23314e9
PZ
277 sg_cpu->iowait_boost_pending = false;
278
fd7d5287 279 /*
ca6827de 280 * sg_cpu->util is already in capacity scale; convert iowait_boost
a23314e9 281 * into the same scale so we can compare.
fd7d5287 282 */
948fb4c4 283 boost = (sg_cpu->iowait_boost * max_cap) >> SCHED_CAPACITY_SHIFT;
d37aee90 284 boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL);
ca6827de
RW
285 if (sg_cpu->util < boost)
286 sg_cpu->util = boost;
21ca6d2c
RW
287}
288
b7eaf1aa
RW
289#ifdef CONFIG_NO_HZ_COMMON
290static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
291{
466a2b42 292 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
b7eaf1aa
RW
293 bool ret = idle_calls == sg_cpu->saved_idle_calls;
294
295 sg_cpu->saved_idle_calls = idle_calls;
296 return ret;
297}
298#else
299static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
300#endif /* CONFIG_NO_HZ_COMMON */
301
e97a90f7
CS
302/*
303 * Make sugov_should_update_freq() ignore the rate limit when DL
304 * has increased the utilization.
305 */
71f1309f 306static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
e97a90f7 307{
8cc90515 308 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
71f1309f 309 sg_cpu->sg_policy->limits_changed = true;
e97a90f7
CS
310}
311
ee2cc427 312static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
948fb4c4
LL
313 u64 time, unsigned long max_cap,
314 unsigned int flags)
9bdcb44e 315{
fd7d5287 316 sugov_iowait_boost(sg_cpu, time, flags);
21ca6d2c
RW
317 sg_cpu->last_update = time;
318
71f1309f 319 ignore_dl_rate_limit(sg_cpu);
e97a90f7 320
71f1309f 321 if (!sugov_should_update_freq(sg_cpu->sg_policy, time))
ee2cc427 322 return false;
9bdcb44e 323
ca6827de 324 sugov_get_util(sg_cpu);
948fb4c4 325 sugov_iowait_apply(sg_cpu, time, max_cap);
ca6827de 326
ee2cc427
RW
327 return true;
328}
329
330static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
331 unsigned int flags)
332{
333 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
334 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
335 unsigned int cached_freq = sg_policy->cached_raw_freq;
948fb4c4 336 unsigned long max_cap;
ee2cc427
RW
337 unsigned int next_f;
338
948fb4c4
LL
339 max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
340
341 if (!sugov_update_single_common(sg_cpu, time, max_cap, flags))
9bdcb44e
RW
342 return;
343
948fb4c4 344 next_f = get_next_freq(sg_policy, sg_cpu->util, max_cap);
8f111bc3
PZ
345 /*
346 * Do not reduce the frequency if the CPU has not been idle
347 * recently, as the reduction is likely to be premature then.
7a17e1db
QY
348 *
349 * Except when the rq is capped by uclamp_max.
8f111bc3 350 */
7a17e1db
QY
351 if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
352 sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) {
8f111bc3 353 next_f = sg_policy->next_freq;
07458f6a 354
0070ea29
WW
355 /* Restore cached freq as next_freq has changed */
356 sg_policy->cached_raw_freq = cached_freq;
58919e83 357 }
8f111bc3 358
389e4ecf
YH
359 if (!sugov_update_next_freq(sg_policy, time, next_f))
360 return;
361
a61dec74
RW
362 /*
363 * This code runs under rq->lock for the target CPU, so it won't run
364 * concurrently on two different CPUs for the same target and it is not
365 * necessary to acquire the lock in the fast switch case.
366 */
367 if (sg_policy->policy->fast_switch_enabled) {
389e4ecf 368 cpufreq_driver_fast_switch(sg_policy->policy, next_f);
a61dec74
RW
369 } else {
370 raw_spin_lock(&sg_policy->update_lock);
389e4ecf 371 sugov_deferred_update(sg_policy);
a61dec74
RW
372 raw_spin_unlock(&sg_policy->update_lock);
373 }
9bdcb44e
RW
374}
375
ee2cc427
RW
376static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
377 unsigned int flags)
378{
379 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
380 unsigned long prev_util = sg_cpu->util;
948fb4c4 381 unsigned long max_cap;
ee2cc427
RW
382
383 /*
384 * Fall back to the "frequency" path if frequency invariance is not
385 * supported, because the direct mapping between the utilization and
386 * the performance levels depends on the frequency invariance.
387 */
388 if (!arch_scale_freq_invariant()) {
389 sugov_update_single_freq(hook, time, flags);
390 return;
391 }
392
948fb4c4
LL
393 max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
394
395 if (!sugov_update_single_common(sg_cpu, time, max_cap, flags))
ee2cc427
RW
396 return;
397
398 /*
399 * Do not reduce the target performance level if the CPU has not been
400 * idle recently, as the reduction is likely to be premature then.
7a17e1db
QY
401 *
402 * Except when the rq is capped by uclamp_max.
ee2cc427 403 */
7a17e1db
QY
404 if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
405 sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util)
ee2cc427
RW
406 sg_cpu->util = prev_util;
407
408 cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
948fb4c4 409 map_util_perf(sg_cpu->util), max_cap);
ee2cc427
RW
410
411 sg_cpu->sg_policy->last_freq_update_time = time;
412}
413
d86ab9cf 414static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
9bdcb44e 415{
5cbea469 416 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
9bdcb44e 417 struct cpufreq_policy *policy = sg_policy->policy;
948fb4c4 418 unsigned long util = 0, max_cap;
9bdcb44e
RW
419 unsigned int j;
420
948fb4c4
LL
421 max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
422
9bdcb44e 423 for_each_cpu(j, policy->cpus) {
cba1dfb5 424 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
9bdcb44e 425
ca6827de 426 sugov_get_util(j_sg_cpu);
948fb4c4 427 sugov_iowait_apply(j_sg_cpu, time, max_cap);
fd7d5287 428
948fb4c4 429 util = max(j_sg_cpu->util, util);
9bdcb44e
RW
430 }
431
948fb4c4 432 return get_next_freq(sg_policy, util, max_cap);
9bdcb44e
RW
433}
434
97fb7a0a
IM
435static void
436sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
9bdcb44e
RW
437{
438 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
439 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
440 unsigned int next_f;
441
442 raw_spin_lock(&sg_policy->update_lock);
443
fd7d5287 444 sugov_iowait_boost(sg_cpu, time, flags);
9bdcb44e
RW
445 sg_cpu->last_update = time;
446
71f1309f 447 ignore_dl_rate_limit(sg_cpu);
cba1dfb5 448
9bdcb44e 449 if (sugov_should_update_freq(sg_policy, time)) {
8f111bc3 450 next_f = sugov_next_freq_shared(sg_cpu, time);
a61dec74 451
389e4ecf
YH
452 if (!sugov_update_next_freq(sg_policy, time, next_f))
453 goto unlock;
454
a61dec74 455 if (sg_policy->policy->fast_switch_enabled)
389e4ecf 456 cpufreq_driver_fast_switch(sg_policy->policy, next_f);
a61dec74 457 else
389e4ecf 458 sugov_deferred_update(sg_policy);
9bdcb44e 459 }
389e4ecf 460unlock:
9bdcb44e
RW
461 raw_spin_unlock(&sg_policy->update_lock);
462}
463
02a7b1ee 464static void sugov_work(struct kthread_work *work)
9bdcb44e
RW
465{
466 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
152db033
JFG
467 unsigned int freq;
468 unsigned long flags;
469
470 /*
471 * Hold sg_policy->update_lock shortly to handle the case where:
3b03706f 472 * in case sg_policy->next_freq is read here, and then updated by
a61dec74 473 * sugov_deferred_update() just before work_in_progress is set to false
152db033
JFG
474 * here, we may miss queueing the new update.
475 *
476 * Note: If a work was queued after the update_lock is released,
a61dec74 477 * sugov_work() will just be called again by kthread_work code; and the
152db033
JFG
478 * request will be proceed before the sugov thread sleeps.
479 */
480 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
481 freq = sg_policy->next_freq;
482 sg_policy->work_in_progress = false;
483 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
9bdcb44e
RW
484
485 mutex_lock(&sg_policy->work_lock);
152db033 486 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
9bdcb44e 487 mutex_unlock(&sg_policy->work_lock);
9bdcb44e
RW
488}
489
490static void sugov_irq_work(struct irq_work *irq_work)
491{
492 struct sugov_policy *sg_policy;
493
494 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
02a7b1ee 495
02a7b1ee 496 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
9bdcb44e
RW
497}
498
499/************************** sysfs interface ************************/
500
501static struct sugov_tunables *global_tunables;
502static DEFINE_MUTEX(global_tunables_lock);
503
504static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
505{
506 return container_of(attr_set, struct sugov_tunables, attr_set);
507}
508
509static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
510{
511 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
512
513 return sprintf(buf, "%u\n", tunables->rate_limit_us);
514}
515
97fb7a0a
IM
516static ssize_t
517rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
9bdcb44e
RW
518{
519 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
520 struct sugov_policy *sg_policy;
521 unsigned int rate_limit_us;
522
523 if (kstrtouint(buf, 10, &rate_limit_us))
524 return -EINVAL;
525
526 tunables->rate_limit_us = rate_limit_us;
527
528 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
529 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
530
531 return count;
532}
533
534static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
535
9782adeb 536static struct attribute *sugov_attrs[] = {
9bdcb44e
RW
537 &rate_limit_us.attr,
538 NULL
539};
9782adeb 540ATTRIBUTE_GROUPS(sugov);
9bdcb44e 541
e5c6b312
KH
542static void sugov_tunables_free(struct kobject *kobj)
543{
53725c4c 544 struct gov_attr_set *attr_set = to_gov_attr_set(kobj);
e5c6b312
KH
545
546 kfree(to_sugov_tunables(attr_set));
547}
548
70ba26cb 549static const struct kobj_type sugov_tunables_ktype = {
9782adeb 550 .default_groups = sugov_groups,
9bdcb44e 551 .sysfs_ops = &governor_sysfs_ops,
e5c6b312 552 .release = &sugov_tunables_free,
9bdcb44e
RW
553};
554
555/********************** cpufreq governor interface *********************/
556
531b5c9f 557struct cpufreq_governor schedutil_gov;
9bdcb44e
RW
558
559static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
560{
561 struct sugov_policy *sg_policy;
562
563 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
564 if (!sg_policy)
565 return NULL;
566
567 sg_policy->policy = policy;
9bdcb44e
RW
568 raw_spin_lock_init(&sg_policy->update_lock);
569 return sg_policy;
570}
571
572static void sugov_policy_free(struct sugov_policy *sg_policy)
573{
9bdcb44e
RW
574 kfree(sg_policy);
575}
576
02a7b1ee
VK
577static int sugov_kthread_create(struct sugov_policy *sg_policy)
578{
579 struct task_struct *thread;
794a56eb 580 struct sched_attr attr = {
97fb7a0a
IM
581 .size = sizeof(struct sched_attr),
582 .sched_policy = SCHED_DEADLINE,
583 .sched_flags = SCHED_FLAG_SUGOV,
584 .sched_nice = 0,
585 .sched_priority = 0,
794a56eb
JL
586 /*
587 * Fake (unused) bandwidth; workaround to "fix"
588 * priority inheritance.
589 */
590 .sched_runtime = 1000000,
591 .sched_deadline = 10000000,
592 .sched_period = 10000000,
593 };
02a7b1ee
VK
594 struct cpufreq_policy *policy = sg_policy->policy;
595 int ret;
596
597 /* kthread only required for slow path */
598 if (policy->fast_switch_enabled)
599 return 0;
600
601 kthread_init_work(&sg_policy->work, sugov_work);
602 kthread_init_worker(&sg_policy->worker);
603 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
604 "sugov:%d",
605 cpumask_first(policy->related_cpus));
606 if (IS_ERR(thread)) {
607 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
608 return PTR_ERR(thread);
609 }
610
794a56eb 611 ret = sched_setattr_nocheck(thread, &attr);
02a7b1ee
VK
612 if (ret) {
613 kthread_stop(thread);
794a56eb 614 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
02a7b1ee
VK
615 return ret;
616 }
617
618 sg_policy->thread = thread;
1b04722c 619 kthread_bind_mask(thread, policy->related_cpus);
21ef5729
VK
620 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
621 mutex_init(&sg_policy->work_lock);
622
02a7b1ee
VK
623 wake_up_process(thread);
624
625 return 0;
626}
627
628static void sugov_kthread_stop(struct sugov_policy *sg_policy)
629{
630 /* kthread only required for slow path */
631 if (sg_policy->policy->fast_switch_enabled)
632 return;
633
634 kthread_flush_worker(&sg_policy->worker);
635 kthread_stop(sg_policy->thread);
21ef5729 636 mutex_destroy(&sg_policy->work_lock);
02a7b1ee
VK
637}
638
9bdcb44e
RW
639static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
640{
641 struct sugov_tunables *tunables;
642
643 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
644 if (tunables) {
645 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
646 if (!have_governor_per_policy())
647 global_tunables = tunables;
648 }
649 return tunables;
650}
651
e5c6b312 652static void sugov_clear_global_tunables(void)
9bdcb44e
RW
653{
654 if (!have_governor_per_policy())
655 global_tunables = NULL;
9bdcb44e
RW
656}
657
658static int sugov_init(struct cpufreq_policy *policy)
659{
660 struct sugov_policy *sg_policy;
661 struct sugov_tunables *tunables;
9bdcb44e
RW
662 int ret = 0;
663
664 /* State should be equivalent to EXIT */
665 if (policy->governor_data)
666 return -EBUSY;
667
4a71ce43
VK
668 cpufreq_enable_fast_switch(policy);
669
9bdcb44e 670 sg_policy = sugov_policy_alloc(policy);
4a71ce43
VK
671 if (!sg_policy) {
672 ret = -ENOMEM;
673 goto disable_fast_switch;
674 }
9bdcb44e 675
02a7b1ee
VK
676 ret = sugov_kthread_create(sg_policy);
677 if (ret)
678 goto free_sg_policy;
679
9bdcb44e
RW
680 mutex_lock(&global_tunables_lock);
681
682 if (global_tunables) {
683 if (WARN_ON(have_governor_per_policy())) {
684 ret = -EINVAL;
02a7b1ee 685 goto stop_kthread;
9bdcb44e
RW
686 }
687 policy->governor_data = sg_policy;
688 sg_policy->tunables = global_tunables;
689
690 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
691 goto out;
692 }
693
694 tunables = sugov_tunables_alloc(sg_policy);
695 if (!tunables) {
696 ret = -ENOMEM;
02a7b1ee 697 goto stop_kthread;
9bdcb44e
RW
698 }
699
aa7519af 700 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
9bdcb44e
RW
701
702 policy->governor_data = sg_policy;
703 sg_policy->tunables = tunables;
704
705 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
706 get_governor_parent_kobj(policy), "%s",
707 schedutil_gov.name);
708 if (ret)
709 goto fail;
710
8e2ddb03 711out:
9bdcb44e 712 mutex_unlock(&global_tunables_lock);
9bdcb44e
RW
713 return 0;
714
8e2ddb03 715fail:
9a4f26cc 716 kobject_put(&tunables->attr_set.kobj);
9bdcb44e 717 policy->governor_data = NULL;
e5c6b312 718 sugov_clear_global_tunables();
9bdcb44e 719
02a7b1ee
VK
720stop_kthread:
721 sugov_kthread_stop(sg_policy);
9bdcb44e
RW
722 mutex_unlock(&global_tunables_lock);
723
1b5d43cf 724free_sg_policy:
9bdcb44e 725 sugov_policy_free(sg_policy);
4a71ce43
VK
726
727disable_fast_switch:
728 cpufreq_disable_fast_switch(policy);
729
60f05e86 730 pr_err("initialization failed (error %d)\n", ret);
9bdcb44e
RW
731 return ret;
732}
733
e788892b 734static void sugov_exit(struct cpufreq_policy *policy)
9bdcb44e
RW
735{
736 struct sugov_policy *sg_policy = policy->governor_data;
737 struct sugov_tunables *tunables = sg_policy->tunables;
738 unsigned int count;
739
740 mutex_lock(&global_tunables_lock);
741
742 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
743 policy->governor_data = NULL;
744 if (!count)
e5c6b312 745 sugov_clear_global_tunables();
9bdcb44e
RW
746
747 mutex_unlock(&global_tunables_lock);
748
02a7b1ee 749 sugov_kthread_stop(sg_policy);
9bdcb44e 750 sugov_policy_free(sg_policy);
4a71ce43 751 cpufreq_disable_fast_switch(policy);
9bdcb44e
RW
752}
753
754static int sugov_start(struct cpufreq_policy *policy)
755{
756 struct sugov_policy *sg_policy = policy->governor_data;
ee2cc427 757 void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
cdcc5ef2 758 unsigned int cpu;
9bdcb44e 759
97fb7a0a
IM
760 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
761 sg_policy->last_freq_update_time = 0;
ecd28842 762 sg_policy->next_freq = 0;
97fb7a0a 763 sg_policy->work_in_progress = false;
600f5bad 764 sg_policy->limits_changed = false;
97fb7a0a 765 sg_policy->cached_raw_freq = 0;
9bdcb44e 766
23a88185
VK
767 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
768
9bdcb44e
RW
769 for_each_cpu(cpu, policy->cpus) {
770 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
771
4296f23e 772 memset(sg_cpu, 0, sizeof(*sg_cpu));
97fb7a0a
IM
773 sg_cpu->cpu = cpu;
774 sg_cpu->sg_policy = sg_policy;
ab2f7cf1
VM
775 }
776
ee2cc427
RW
777 if (policy_is_shared(policy))
778 uu = sugov_update_shared;
779 else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf())
780 uu = sugov_update_single_perf;
781 else
782 uu = sugov_update_single_freq;
783
ab2f7cf1
VM
784 for_each_cpu(cpu, policy->cpus) {
785 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
786
ee2cc427 787 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
9bdcb44e
RW
788 }
789 return 0;
790}
791
e788892b 792static void sugov_stop(struct cpufreq_policy *policy)
9bdcb44e
RW
793{
794 struct sugov_policy *sg_policy = policy->governor_data;
795 unsigned int cpu;
796
797 for_each_cpu(cpu, policy->cpus)
798 cpufreq_remove_update_util_hook(cpu);
799
b290ebcf 800 synchronize_rcu();
9bdcb44e 801
21ef5729
VK
802 if (!policy->fast_switch_enabled) {
803 irq_work_sync(&sg_policy->irq_work);
804 kthread_cancel_work_sync(&sg_policy->work);
805 }
9bdcb44e
RW
806}
807
e788892b 808static void sugov_limits(struct cpufreq_policy *policy)
9bdcb44e
RW
809{
810 struct sugov_policy *sg_policy = policy->governor_data;
811
812 if (!policy->fast_switch_enabled) {
813 mutex_lock(&sg_policy->work_lock);
bf2be2de 814 cpufreq_policy_apply_limits(policy);
9bdcb44e
RW
815 mutex_unlock(&sg_policy->work_lock);
816 }
817
600f5bad 818 sg_policy->limits_changed = true;
9bdcb44e
RW
819}
820
531b5c9f 821struct cpufreq_governor schedutil_gov = {
97fb7a0a
IM
822 .name = "schedutil",
823 .owner = THIS_MODULE,
9a2a9ebc 824 .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING,
97fb7a0a
IM
825 .init = sugov_init,
826 .exit = sugov_exit,
827 .start = sugov_start,
828 .stop = sugov_stop,
829 .limits = sugov_limits,
9bdcb44e
RW
830};
831
9bdcb44e
RW
832#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
833struct cpufreq_governor *cpufreq_default_governor(void)
834{
835 return &schedutil_gov;
836}
9bdcb44e 837#endif
58919e83 838
10dd8573 839cpufreq_governor_init(schedutil_gov);
531b5c9f
QP
840
841#ifdef CONFIG_ENERGY_MODEL
531b5c9f
QP
842static void rebuild_sd_workfn(struct work_struct *work)
843{
31f6a8c0 844 rebuild_sched_domains_energy();
531b5c9f
QP
845}
846static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
847
848/*
849 * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
850 * on governor changes to make sure the scheduler knows about it.
851 */
852void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
853 struct cpufreq_governor *old_gov)
854{
855 if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
856 /*
857 * When called from the cpufreq_register_driver() path, the
858 * cpu_hotplug_lock is already held, so use a work item to
859 * avoid nested locking in rebuild_sched_domains().
860 */
861 schedule_work(&rebuild_sd_work);
862 }
863
864}
865#endif