Merge tag 'pci-v6.16-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci
[linux-2.6-block.git] / drivers / cpufreq / cpufreq_governor.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
2aacdfff 2/*
3 * drivers/cpufreq/cpufreq_governor.c
4 *
5 * CPUFREQ governors common code
6 *
4471a34f
VK
7 * Copyright (C) 2001 Russell King
8 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
9 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
10 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
11 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
2aacdfff 12 */
13
4471a34f
VK
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
2aacdfff 16#include <linux/export.h>
17#include <linux/kernel_stat.h>
4d5dcc42 18#include <linux/slab.h>
4471a34f
VK
19
20#include "cpufreq_governor.h"
21
56026645
RW
22#define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL (2 * TICK_NSEC / NSEC_PER_USEC)
23
8c8f77fd
RW
24static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
25
1112e9d8 26static DEFINE_MUTEX(gov_dbs_data_mutex);
2bb8d94f 27
aded387b 28/* Common sysfs tunables */
dd2e65f2 29/*
85750bcd 30 * sampling_rate_store - update sampling rate effective immediately if needed.
aded387b
VK
31 *
32 * If new rate is smaller than the old, simply updating
33 * dbs.sampling_rate might not be appropriate. For example, if the
34 * original sampling_rate was 1 second and the requested new sampling rate is 10
35 * ms because the user needs immediate reaction from ondemand governor, but not
36 * sure if higher frequency will be required or not, then, the governor may
37 * change the sampling rate too late; up to 1 second later. Thus, if we are
38 * reducing the sampling rate, we need to make the new value effective
39 * immediately.
40 *
aded387b
VK
41 * This must be called with dbs_data->mutex held, otherwise traversing
42 * policy_dbs_list isn't safe.
43 */
85750bcd 44ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf,
aded387b
VK
45 size_t count)
46{
0dd3c1d6 47 struct dbs_data *dbs_data = to_dbs_data(attr_set);
aded387b 48 struct policy_dbs_info *policy_dbs;
56026645 49 unsigned int sampling_interval;
aded387b 50 int ret;
56026645
RW
51
52 ret = sscanf(buf, "%u", &sampling_interval);
53 if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
aded387b
VK
54 return -EINVAL;
55
56026645
RW
56 dbs_data->sampling_rate = sampling_interval;
57
aded387b
VK
58 /*
59 * We are operating under dbs_data->mutex and so the list and its
60 * entries can't be freed concurrently.
61 */
0dd3c1d6 62 list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
26f0dbc9 63 mutex_lock(&policy_dbs->update_mutex);
aded387b
VK
64 /*
65 * On 32-bit architectures this may race with the
66 * sample_delay_ns read in dbs_update_util_handler(), but that
67 * really doesn't matter. If the read returns a value that's
68 * too big, the sample will be skipped, but the next invocation
69 * of dbs_update_util_handler() (when the update has been
78347cdb 70 * completed) will take a sample.
aded387b
VK
71 *
72 * If this runs in parallel with dbs_work_handler(), we may end
73 * up overwriting the sample_delay_ns value that it has just
78347cdb
RW
74 * written, but it will be corrected next time a sample is
75 * taken, so it shouldn't be significant.
aded387b 76 */
78347cdb 77 gov_update_sample_delay(policy_dbs, 0);
26f0dbc9 78 mutex_unlock(&policy_dbs->update_mutex);
aded387b
VK
79 }
80
81 return count;
82}
85750bcd 83EXPORT_SYMBOL_GPL(sampling_rate_store);
aded387b 84
a33cce1c
RW
85/**
86 * gov_update_cpu_data - Update CPU load data.
a33cce1c
RW
87 * @dbs_data: Top-level governor data pointer.
88 *
89 * Update CPU load data for all CPUs in the domain governed by @dbs_data
90 * (that may be a single policy or a bunch of them if governor tunables are
91 * system-wide).
92 *
93 * Call under the @dbs_data mutex.
94 */
8c8f77fd 95void gov_update_cpu_data(struct dbs_data *dbs_data)
a33cce1c
RW
96{
97 struct policy_dbs_info *policy_dbs;
98
0dd3c1d6 99 list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) {
a33cce1c
RW
100 unsigned int j;
101
102 for_each_cpu(j, policy_dbs->policy->cpus) {
8c8f77fd 103 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
a33cce1c 104
b4f4b4b3 105 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time,
a33cce1c
RW
106 dbs_data->io_is_busy);
107 if (dbs_data->ignore_nice_load)
5720821b 108 j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
a33cce1c
RW
109 }
110 }
111}
112EXPORT_SYMBOL_GPL(gov_update_cpu_data);
113
4cccf755 114unsigned int dbs_update(struct cpufreq_policy *policy)
4471a34f 115{
bc505475
RW
116 struct policy_dbs_info *policy_dbs = policy->governor_data;
117 struct dbs_data *dbs_data = policy_dbs->dbs_data;
ff4b1789 118 unsigned int ignore_nice = dbs_data->ignore_nice_load;
00bfe058 119 unsigned int max_load = 0, idle_periods = UINT_MAX;
8847e038 120 unsigned int sampling_rate, io_busy, j;
4471a34f 121
57dc3bcd
RW
122 /*
123 * Sometimes governors may use an additional multiplier to increase
124 * sample delays temporarily. Apply that multiplier to sampling_rate
125 * so as to keep the wake-up-from-idle detection logic a bit
126 * conservative.
127 */
128 sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
8847e038
RW
129 /*
130 * For the purpose of ondemand, waiting for disk IO is an indication
131 * that you're performance critical, and not that the system is actually
132 * idle, so do not add the iowait time to the CPU idle time then.
133 */
134 io_busy = dbs_data->io_is_busy;
4471a34f 135
dfa5bb62 136 /* Get Absolute Load */
4471a34f 137 for_each_cpu(j, policy->cpus) {
8c8f77fd 138 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
b4f4b4b3
RW
139 u64 update_time, cur_idle_time;
140 unsigned int idle_time, time_elapsed;
4471a34f
VK
141 unsigned int load;
142
b4f4b4b3 143 cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy);
4471a34f 144
b4f4b4b3
RW
145 time_elapsed = update_time - j_cdbs->prev_update_time;
146 j_cdbs->prev_update_time = update_time;
4471a34f 147
3698dd6b
JZ
148 /*
149 * cur_idle_time could be smaller than j_cdbs->prev_cpu_idle if
150 * it's obtained from get_cpu_idle_time_jiffy() when NOHZ is
151 * off, where idle_time is calculated by the difference between
152 * time elapsed in jiffies and "busy time" obtained from CPU
153 * statistics. If a CPU is 100% busy, the time elapsed and busy
154 * time should grow with the same amount in two consecutive
155 * samples, but in practice there could be a tiny difference,
156 * making the accumulated idle time decrease sometimes. Hence,
157 * in this case, idle_time should be regarded as 0 in order to
158 * make the further process correct.
159 */
160 if (cur_idle_time > j_cdbs->prev_cpu_idle)
161 idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
162 else
163 idle_time = 0;
164
94862a62 165 j_cdbs->prev_cpu_idle = cur_idle_time;
4471a34f
VK
166
167 if (ignore_nice) {
5720821b 168 u64 cur_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
679b8fe4 169
7fb1327e 170 idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
679b8fe4 171 j_cdbs->prev_cpu_nice = cur_nice;
4471a34f
VK
172 }
173
9485e4ca
RW
174 if (unlikely(!time_elapsed)) {
175 /*
176 * That can only happen when this function is called
177 * twice in a row with a very short interval between the
178 * calls, so the previous load value can be used then.
179 */
18b46abd 180 load = j_cdbs->prev_load;
3698dd6b 181 } else if (unlikely(idle_time > 2 * sampling_rate &&
9485e4ca 182 j_cdbs->prev_load)) {
c8ae481b 183 /*
9485e4ca
RW
184 * If the CPU had gone completely idle and a task has
185 * just woken up on this CPU now, it would be unfair to
186 * calculate 'load' the usual way for this elapsed
187 * time-window, because it would show near-zero load,
188 * irrespective of how CPU intensive that task actually
189 * was. This is undesirable for latency-sensitive bursty
190 * workloads.
191 *
192 * To avoid this, reuse the 'load' from the previous
193 * time-window and give this task a chance to start with
194 * a reasonably high CPU frequency. However, that
195 * shouldn't be over-done, lest we get stuck at a high
196 * load (high frequency) for too long, even when the
197 * current system load has actually dropped down, so
198 * clear prev_load to guarantee that the load will be
199 * computed again next time.
200 *
75920196
CY
201 * Detecting this situation is easy: an unusually large
202 * 'idle_time' (as compared to the sampling rate)
9485e4ca 203 * indicates this scenario.
c8ae481b 204 */
9485e4ca 205 load = j_cdbs->prev_load;
c8ae481b 206 j_cdbs->prev_load = 0;
18b46abd 207 } else {
3698dd6b 208 if (time_elapsed > idle_time)
9485e4ca 209 load = 100 * (time_elapsed - idle_time) / time_elapsed;
3698dd6b
JZ
210 else
211 load = 0;
212
18b46abd 213 j_cdbs->prev_load = load;
18b46abd 214 }
4471a34f 215
3698dd6b 216 if (unlikely(idle_time > 2 * sampling_rate)) {
75920196 217 unsigned int periods = idle_time / sampling_rate;
00bfe058
SK
218
219 if (periods < idle_periods)
220 idle_periods = periods;
221 }
222
4471a34f
VK
223 if (load > max_load)
224 max_load = load;
225 }
00bfe058
SK
226
227 policy_dbs->idle_periods = idle_periods;
228
4cccf755 229 return max_load;
4471a34f 230}
4cccf755 231EXPORT_SYMBOL_GPL(dbs_update);
4471a34f 232
70f43e5e 233static void dbs_work_handler(struct work_struct *work)
43e0ee36 234{
e40e7b25 235 struct policy_dbs_info *policy_dbs;
3a91b069 236 struct cpufreq_policy *policy;
ea59ee0d 237 struct dbs_governor *gov;
43e0ee36 238
e40e7b25
RW
239 policy_dbs = container_of(work, struct policy_dbs_info, work);
240 policy = policy_dbs->policy;
ea59ee0d 241 gov = dbs_governor_of(policy);
3a91b069 242
70f43e5e 243 /*
9be4fd2c
RW
244 * Make sure cpufreq_governor_limits() isn't evaluating load or the
245 * ondemand governor isn't updating the sampling rate in parallel.
70f43e5e 246 */
26f0dbc9
VK
247 mutex_lock(&policy_dbs->update_mutex);
248 gov_update_sample_delay(policy_dbs, gov->gov_dbs_update(policy));
249 mutex_unlock(&policy_dbs->update_mutex);
70f43e5e 250
e4db2813
RW
251 /* Allow the utilization update handler to queue up more work. */
252 atomic_set(&policy_dbs->work_count, 0);
9be4fd2c 253 /*
e4db2813
RW
254 * If the update below is reordered with respect to the sample delay
255 * modification, the utilization update handler may end up using a stale
256 * sample delay value.
9be4fd2c 257 */
e4db2813
RW
258 smp_wmb();
259 policy_dbs->work_in_progress = false;
9be4fd2c
RW
260}
261
262static void dbs_irq_work(struct irq_work *irq_work)
263{
e40e7b25 264 struct policy_dbs_info *policy_dbs;
70f43e5e 265
e40e7b25 266 policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
539a4c42 267 schedule_work_on(smp_processor_id(), &policy_dbs->work);
70f43e5e
VK
268}
269
9be4fd2c 270static void dbs_update_util_handler(struct update_util_data *data, u64 time,
58919e83 271 unsigned int flags)
9be4fd2c
RW
272{
273 struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
e40e7b25 274 struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
27de3482 275 u64 delta_ns, lst;
70f43e5e 276
03639978 277 if (!cpufreq_this_cpu_can_update(policy_dbs->policy))
674e7541
VK
278 return;
279
70f43e5e 280 /*
9be4fd2c
RW
281 * The work may not be allowed to be queued up right now.
282 * Possible reasons:
283 * - Work has already been queued up or is in progress.
9be4fd2c 284 * - It is too early (too little time from the previous sample).
70f43e5e 285 */
e4db2813
RW
286 if (policy_dbs->work_in_progress)
287 return;
288
289 /*
290 * If the reads below are reordered before the check above, the value
291 * of sample_delay_ns used in the computation may be stale.
292 */
293 smp_rmb();
27de3482
RW
294 lst = READ_ONCE(policy_dbs->last_sample_time);
295 delta_ns = time - lst;
e4db2813
RW
296 if ((s64)delta_ns < policy_dbs->sample_delay_ns)
297 return;
298
299 /*
300 * If the policy is not shared, the irq_work may be queued up right away
301 * at this point. Otherwise, we need to ensure that only one of the
302 * CPUs sharing the policy will do that.
303 */
27de3482
RW
304 if (policy_dbs->is_shared) {
305 if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
306 return;
307
308 /*
309 * If another CPU updated last_sample_time in the meantime, we
310 * shouldn't be here, so clear the work counter and bail out.
311 */
312 if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) {
313 atomic_set(&policy_dbs->work_count, 0);
314 return;
315 }
316 }
e4db2813
RW
317
318 policy_dbs->last_sample_time = time;
319 policy_dbs->work_in_progress = true;
320 irq_work_queue(&policy_dbs->irq_work);
43e0ee36 321}
4447266b 322
0bed612b
RW
323static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
324 unsigned int delay_us)
325{
326 struct cpufreq_policy *policy = policy_dbs->policy;
327 int cpu;
328
329 gov_update_sample_delay(policy_dbs, delay_us);
330 policy_dbs->last_sample_time = 0;
331
332 for_each_cpu(cpu, policy->cpus) {
333 struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
334
335 cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
336 dbs_update_util_handler);
337 }
338}
339
340static inline void gov_clear_update_util(struct cpufreq_policy *policy)
341{
342 int i;
343
344 for_each_cpu(i, policy->cpus)
345 cpufreq_remove_update_util_hook(i);
346
cc69b389 347 synchronize_rcu();
0bed612b
RW
348}
349
bc505475
RW
350static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
351 struct dbs_governor *gov)
44152cb8 352{
e40e7b25 353 struct policy_dbs_info *policy_dbs;
44152cb8
VK
354 int j;
355
7d5a9956
RW
356 /* Allocate memory for per-policy governor data. */
357 policy_dbs = gov->alloc();
e40e7b25 358 if (!policy_dbs)
bc505475 359 return NULL;
44152cb8 360
581c214b 361 policy_dbs->policy = policy;
26f0dbc9 362 mutex_init(&policy_dbs->update_mutex);
686cc637 363 atomic_set(&policy_dbs->work_count, 0);
e40e7b25
RW
364 init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
365 INIT_WORK(&policy_dbs->work, dbs_work_handler);
cea6a9e7
RW
366
367 /* Set policy_dbs for all CPUs, online+offline */
368 for_each_cpu(j, policy->related_cpus) {
8c8f77fd 369 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
cea6a9e7
RW
370
371 j_cdbs->policy_dbs = policy_dbs;
cea6a9e7 372 }
bc505475 373 return policy_dbs;
44152cb8
VK
374}
375
8c8f77fd 376static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
7bdad34d 377 struct dbs_governor *gov)
44152cb8 378{
44152cb8
VK
379 int j;
380
26f0dbc9 381 mutex_destroy(&policy_dbs->update_mutex);
5e4500d8 382
8c8f77fd
RW
383 for_each_cpu(j, policy_dbs->policy->related_cpus) {
384 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
44152cb8 385
cea6a9e7
RW
386 j_cdbs->policy_dbs = NULL;
387 j_cdbs->update_util.func = NULL;
388 }
7d5a9956 389 gov->free(policy_dbs);
44152cb8
VK
390}
391
a85ee640
KH
392static void cpufreq_dbs_data_release(struct kobject *kobj)
393{
394 struct dbs_data *dbs_data = to_dbs_data(to_gov_attr_set(kobj));
395 struct dbs_governor *gov = dbs_data->gov;
396
397 gov->exit(dbs_data);
398 kfree(dbs_data);
399}
400
e788892b 401int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
4471a34f 402{
ea59ee0d 403 struct dbs_governor *gov = dbs_governor_of(policy);
1112e9d8 404 struct dbs_data *dbs_data;
bc505475 405 struct policy_dbs_info *policy_dbs;
1112e9d8 406 int ret = 0;
4471a34f 407
a72c4959
VK
408 /* State should be equivalent to EXIT */
409 if (policy->governor_data)
410 return -EBUSY;
411
bc505475
RW
412 policy_dbs = alloc_policy_dbs_info(policy, gov);
413 if (!policy_dbs)
414 return -ENOMEM;
44152cb8 415
1112e9d8
RW
416 /* Protect gov->gdbs_data against concurrent updates. */
417 mutex_lock(&gov_dbs_data_mutex);
418
419 dbs_data = gov->gdbs_data;
bc505475
RW
420 if (dbs_data) {
421 if (WARN_ON(have_governor_per_policy())) {
422 ret = -EINVAL;
423 goto free_policy_dbs_info;
424 }
bc505475
RW
425 policy_dbs->dbs_data = dbs_data;
426 policy->governor_data = policy_dbs;
c54df071 427
0dd3c1d6 428 gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list);
1112e9d8 429 goto out;
714a2d9c 430 }
4d5dcc42 431
714a2d9c 432 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
bc505475
RW
433 if (!dbs_data) {
434 ret = -ENOMEM;
435 goto free_policy_dbs_info;
436 }
44152cb8 437
a85ee640 438 dbs_data->gov = gov;
0dd3c1d6 439 gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
4d5dcc42 440
9a15fb2c 441 ret = gov->init(dbs_data);
714a2d9c 442 if (ret)
916f1388 443 goto free_dbs_data;
4d5dcc42 444
56026645
RW
445 /*
446 * The sampling interval should not be less than the transition latency
447 * of the CPU and it also cannot be too small for dbs_update() to work
448 * correctly.
449 */
450 dbs_data->sampling_rate = max_t(unsigned int,
451 CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
452 cpufreq_policy_transition_delay_us(policy));
2361be23 453
8eec1020 454 if (!have_governor_per_policy())
7bdad34d 455 gov->gdbs_data = dbs_data;
4d5dcc42 456
c54df071 457 policy_dbs->dbs_data = dbs_data;
0dd3c1d6 458 policy->governor_data = policy_dbs;
c54df071 459
c4435630 460 gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
a85ee640 461 gov->kobj_type.release = cpufreq_dbs_data_release;
0dd3c1d6 462 ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type,
c4435630
VK
463 get_governor_parent_kobj(policy),
464 "%s", gov->gov.name);
fafd5e8a 465 if (!ret)
1112e9d8 466 goto out;
4d5dcc42 467
fafd5e8a 468 /* Failure, so roll back. */
666f4ccc 469 pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
4d5dcc42 470
4ebe36c9
VK
471 kobject_put(&dbs_data->attr_set.kobj);
472
e4b133cc
VK
473 policy->governor_data = NULL;
474
8eec1020 475 if (!have_governor_per_policy())
7bdad34d 476 gov->gdbs_data = NULL;
9a15fb2c 477 gov->exit(dbs_data);
916f1388
LC
478
479free_dbs_data:
bc505475
RW
480 kfree(dbs_data);
481
e40e7b25 482free_policy_dbs_info:
8c8f77fd 483 free_policy_dbs_info(policy_dbs, gov);
1112e9d8
RW
484
485out:
486 mutex_unlock(&gov_dbs_data_mutex);
714a2d9c
VK
487 return ret;
488}
e788892b 489EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init);
4d5dcc42 490
e788892b 491void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy)
714a2d9c 492{
ea59ee0d 493 struct dbs_governor *gov = dbs_governor_of(policy);
bc505475
RW
494 struct policy_dbs_info *policy_dbs = policy->governor_data;
495 struct dbs_data *dbs_data = policy_dbs->dbs_data;
0dd3c1d6 496 unsigned int count;
a72c4959 497
1112e9d8
RW
498 /* Protect gov->gdbs_data against concurrent updates. */
499 mutex_lock(&gov_dbs_data_mutex);
500
0dd3c1d6 501 count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list);
2361be23 502
0dd3c1d6 503 policy->governor_data = NULL;
e4b133cc 504
a85ee640
KH
505 if (!count && !have_governor_per_policy())
506 gov->gdbs_data = NULL;
44152cb8 507
8c8f77fd 508 free_policy_dbs_info(policy_dbs, gov);
1112e9d8
RW
509
510 mutex_unlock(&gov_dbs_data_mutex);
714a2d9c 511}
e788892b 512EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit);
4d5dcc42 513
e788892b 514int cpufreq_dbs_governor_start(struct cpufreq_policy *policy)
714a2d9c 515{
ea59ee0d 516 struct dbs_governor *gov = dbs_governor_of(policy);
bc505475
RW
517 struct policy_dbs_info *policy_dbs = policy->governor_data;
518 struct dbs_data *dbs_data = policy_dbs->dbs_data;
702c9e54 519 unsigned int sampling_rate, ignore_nice, j;
8847e038 520 unsigned int io_busy;
714a2d9c
VK
521
522 if (!policy->cur)
523 return -EINVAL;
524
e4db2813 525 policy_dbs->is_shared = policy_is_shared(policy);
57dc3bcd 526 policy_dbs->rate_mult = 1;
e4db2813 527
ff4b1789
VK
528 sampling_rate = dbs_data->sampling_rate;
529 ignore_nice = dbs_data->ignore_nice_load;
8847e038 530 io_busy = dbs_data->io_is_busy;
4471a34f 531
714a2d9c 532 for_each_cpu(j, policy->cpus) {
8c8f77fd 533 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
4471a34f 534
b4f4b4b3 535 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy);
ba1ca654
RW
536 /*
537 * Make the first invocation of dbs_update() compute the load.
538 */
539 j_cdbs->prev_load = 0;
18b46abd 540
714a2d9c 541 if (ignore_nice)
5720821b 542 j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
714a2d9c 543 }
2abfa876 544
702c9e54 545 gov->start(policy);
4471a34f 546
e40e7b25 547 gov_set_update_util(policy_dbs, sampling_rate);
714a2d9c
VK
548 return 0;
549}
e788892b 550EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start);
714a2d9c 551
e788892b 552void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy)
714a2d9c 553{
f6709b8a
RW
554 struct policy_dbs_info *policy_dbs = policy->governor_data;
555
556 gov_clear_update_util(policy_dbs->policy);
557 irq_work_sync(&policy_dbs->irq_work);
558 cancel_work_sync(&policy_dbs->work);
559 atomic_set(&policy_dbs->work_count, 0);
560 policy_dbs->work_in_progress = false;
714a2d9c 561}
e788892b 562EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
4471a34f 563
e788892b 564void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
714a2d9c 565{
2a3eb51e
HW
566 struct policy_dbs_info *policy_dbs;
567
568 /* Protect gov->gdbs_data against cpufreq_dbs_governor_exit() */
569 mutex_lock(&gov_dbs_data_mutex);
570 policy_dbs = policy->governor_data;
571 if (!policy_dbs)
572 goto out;
8eeed095 573
26f0dbc9 574 mutex_lock(&policy_dbs->update_mutex);
bf2be2de 575 cpufreq_policy_apply_limits(policy);
4cccf755 576 gov_update_sample_delay(policy_dbs, 0);
26f0dbc9 577 mutex_unlock(&policy_dbs->update_mutex);
2a3eb51e
HW
578
579out:
580 mutex_unlock(&gov_dbs_data_mutex);
4471a34f 581}
e788892b 582EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);