Commit | Line | Data |
---|---|---|
2aacdfff | 1 | /* |
2 | * drivers/cpufreq/cpufreq_governor.c | |
3 | * | |
4 | * CPUFREQ governors common code | |
5 | * | |
4471a34f VK |
6 | * Copyright (C) 2001 Russell King |
7 | * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. | |
8 | * (C) 2003 Jun Nakajima <jun.nakajima@intel.com> | |
9 | * (C) 2009 Alexander Clouter <alex@digriz.org.uk> | |
10 | * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org> | |
11 | * | |
2aacdfff | 12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as | |
14 | * published by the Free Software Foundation. | |
15 | */ | |
16 | ||
4471a34f VK |
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
18 | ||
2aacdfff | 19 | #include <linux/export.h> |
20 | #include <linux/kernel_stat.h> | |
4d5dcc42 | 21 | #include <linux/slab.h> |
4471a34f VK |
22 | |
23 | #include "cpufreq_governor.h" | |
24 | ||
8c8f77fd RW |
25 | static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs); |
26 | ||
1112e9d8 | 27 | static DEFINE_MUTEX(gov_dbs_data_mutex); |
2bb8d94f | 28 | |
aded387b VK |
29 | /* Common sysfs tunables */ |
30 | /** | |
31 | * store_sampling_rate - update sampling rate effective immediately if needed. | |
32 | * | |
33 | * If new rate is smaller than the old, simply updating | |
34 | * dbs.sampling_rate might not be appropriate. For example, if the | |
35 | * original sampling_rate was 1 second and the requested new sampling rate is 10 | |
36 | * ms because the user needs immediate reaction from ondemand governor, but not | |
37 | * sure if higher frequency will be required or not, then, the governor may | |
38 | * change the sampling rate too late; up to 1 second later. Thus, if we are | |
39 | * reducing the sampling rate, we need to make the new value effective | |
40 | * immediately. | |
41 | * | |
aded387b VK |
42 | * This must be called with dbs_data->mutex held, otherwise traversing |
43 | * policy_dbs_list isn't safe. | |
44 | */ | |
0dd3c1d6 | 45 | ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf, |
aded387b VK |
46 | size_t count) |
47 | { | |
0dd3c1d6 | 48 | struct dbs_data *dbs_data = to_dbs_data(attr_set); |
aded387b | 49 | struct policy_dbs_info *policy_dbs; |
aded387b | 50 | int ret; |
2d045036 | 51 | ret = sscanf(buf, "%u", &dbs_data->sampling_rate); |
aded387b VK |
52 | if (ret != 1) |
53 | return -EINVAL; | |
54 | ||
aded387b VK |
55 | /* |
56 | * We are operating under dbs_data->mutex and so the list and its | |
57 | * entries can't be freed concurrently. | |
58 | */ | |
0dd3c1d6 | 59 | list_for_each_entry(policy_dbs, &attr_set->policy_list, list) { |
26f0dbc9 | 60 | mutex_lock(&policy_dbs->update_mutex); |
aded387b VK |
61 | /* |
62 | * On 32-bit architectures this may race with the | |
63 | * sample_delay_ns read in dbs_update_util_handler(), but that | |
64 | * really doesn't matter. If the read returns a value that's | |
65 | * too big, the sample will be skipped, but the next invocation | |
66 | * of dbs_update_util_handler() (when the update has been | |
78347cdb | 67 | * completed) will take a sample. |
aded387b VK |
68 | * |
69 | * If this runs in parallel with dbs_work_handler(), we may end | |
70 | * up overwriting the sample_delay_ns value that it has just | |
78347cdb RW |
71 | * written, but it will be corrected next time a sample is |
72 | * taken, so it shouldn't be significant. | |
aded387b | 73 | */ |
78347cdb | 74 | gov_update_sample_delay(policy_dbs, 0); |
26f0dbc9 | 75 | mutex_unlock(&policy_dbs->update_mutex); |
aded387b VK |
76 | } |
77 | ||
78 | return count; | |
79 | } | |
80 | EXPORT_SYMBOL_GPL(store_sampling_rate); | |
81 | ||
a33cce1c RW |
82 | /** |
83 | * gov_update_cpu_data - Update CPU load data. | |
a33cce1c RW |
84 | * @dbs_data: Top-level governor data pointer. |
85 | * | |
86 | * Update CPU load data for all CPUs in the domain governed by @dbs_data | |
87 | * (that may be a single policy or a bunch of them if governor tunables are | |
88 | * system-wide). | |
89 | * | |
90 | * Call under the @dbs_data mutex. | |
91 | */ | |
8c8f77fd | 92 | void gov_update_cpu_data(struct dbs_data *dbs_data) |
a33cce1c RW |
93 | { |
94 | struct policy_dbs_info *policy_dbs; | |
95 | ||
0dd3c1d6 | 96 | list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) { |
a33cce1c RW |
97 | unsigned int j; |
98 | ||
99 | for_each_cpu(j, policy_dbs->policy->cpus) { | |
8c8f77fd | 100 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); |
a33cce1c | 101 | |
b4f4b4b3 | 102 | j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, |
a33cce1c RW |
103 | dbs_data->io_is_busy); |
104 | if (dbs_data->ignore_nice_load) | |
105 | j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | |
106 | } | |
107 | } | |
108 | } | |
109 | EXPORT_SYMBOL_GPL(gov_update_cpu_data); | |
110 | ||
4cccf755 | 111 | unsigned int dbs_update(struct cpufreq_policy *policy) |
4471a34f | 112 | { |
bc505475 RW |
113 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
114 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
ff4b1789 | 115 | unsigned int ignore_nice = dbs_data->ignore_nice_load; |
00bfe058 | 116 | unsigned int max_load = 0, idle_periods = UINT_MAX; |
8847e038 | 117 | unsigned int sampling_rate, io_busy, j; |
4471a34f | 118 | |
57dc3bcd RW |
119 | /* |
120 | * Sometimes governors may use an additional multiplier to increase | |
121 | * sample delays temporarily. Apply that multiplier to sampling_rate | |
122 | * so as to keep the wake-up-from-idle detection logic a bit | |
123 | * conservative. | |
124 | */ | |
125 | sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult; | |
8847e038 RW |
126 | /* |
127 | * For the purpose of ondemand, waiting for disk IO is an indication | |
128 | * that you're performance critical, and not that the system is actually | |
129 | * idle, so do not add the iowait time to the CPU idle time then. | |
130 | */ | |
131 | io_busy = dbs_data->io_is_busy; | |
4471a34f | 132 | |
dfa5bb62 | 133 | /* Get Absolute Load */ |
4471a34f | 134 | for_each_cpu(j, policy->cpus) { |
8c8f77fd | 135 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); |
b4f4b4b3 RW |
136 | u64 update_time, cur_idle_time; |
137 | unsigned int idle_time, time_elapsed; | |
4471a34f VK |
138 | unsigned int load; |
139 | ||
b4f4b4b3 | 140 | cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy); |
4471a34f | 141 | |
b4f4b4b3 RW |
142 | time_elapsed = update_time - j_cdbs->prev_update_time; |
143 | j_cdbs->prev_update_time = update_time; | |
4471a34f | 144 | |
94862a62 RW |
145 | idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; |
146 | j_cdbs->prev_cpu_idle = cur_idle_time; | |
4471a34f VK |
147 | |
148 | if (ignore_nice) { | |
679b8fe4 RW |
149 | u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
150 | ||
7fb1327e | 151 | idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC); |
679b8fe4 | 152 | j_cdbs->prev_cpu_nice = cur_nice; |
4471a34f VK |
153 | } |
154 | ||
9485e4ca RW |
155 | if (unlikely(!time_elapsed)) { |
156 | /* | |
157 | * That can only happen when this function is called | |
158 | * twice in a row with a very short interval between the | |
159 | * calls, so the previous load value can be used then. | |
160 | */ | |
18b46abd | 161 | load = j_cdbs->prev_load; |
9485e4ca RW |
162 | } else if (unlikely(time_elapsed > 2 * sampling_rate && |
163 | j_cdbs->prev_load)) { | |
c8ae481b | 164 | /* |
9485e4ca RW |
165 | * If the CPU had gone completely idle and a task has |
166 | * just woken up on this CPU now, it would be unfair to | |
167 | * calculate 'load' the usual way for this elapsed | |
168 | * time-window, because it would show near-zero load, | |
169 | * irrespective of how CPU intensive that task actually | |
170 | * was. This is undesirable for latency-sensitive bursty | |
171 | * workloads. | |
172 | * | |
173 | * To avoid this, reuse the 'load' from the previous | |
174 | * time-window and give this task a chance to start with | |
175 | * a reasonably high CPU frequency. However, that | |
176 | * shouldn't be over-done, lest we get stuck at a high | |
177 | * load (high frequency) for too long, even when the | |
178 | * current system load has actually dropped down, so | |
179 | * clear prev_load to guarantee that the load will be | |
180 | * computed again next time. | |
181 | * | |
182 | * Detecting this situation is easy: the governor's | |
183 | * utilization update handler would not have run during | |
184 | * CPU-idle periods. Hence, an unusually large | |
185 | * 'time_elapsed' (as compared to the sampling rate) | |
186 | * indicates this scenario. | |
c8ae481b | 187 | */ |
9485e4ca | 188 | load = j_cdbs->prev_load; |
c8ae481b | 189 | j_cdbs->prev_load = 0; |
18b46abd | 190 | } else { |
9485e4ca RW |
191 | if (time_elapsed >= idle_time) { |
192 | load = 100 * (time_elapsed - idle_time) / time_elapsed; | |
193 | } else { | |
194 | /* | |
195 | * That can happen if idle_time is returned by | |
196 | * get_cpu_idle_time_jiffy(). In that case | |
197 | * idle_time is roughly equal to the difference | |
198 | * between time_elapsed and "busy time" obtained | |
199 | * from CPU statistics. Then, the "busy time" | |
200 | * can end up being greater than time_elapsed | |
201 | * (for example, if jiffies_64 and the CPU | |
202 | * statistics are updated by different CPUs), | |
203 | * so idle_time may in fact be negative. That | |
204 | * means, though, that the CPU was busy all | |
205 | * the time (on the rough average) during the | |
206 | * last sampling interval and 100 can be | |
207 | * returned as the load. | |
208 | */ | |
209 | load = (int)idle_time < 0 ? 100 : 0; | |
210 | } | |
18b46abd | 211 | j_cdbs->prev_load = load; |
18b46abd | 212 | } |
4471a34f | 213 | |
00bfe058 SK |
214 | if (time_elapsed > 2 * sampling_rate) { |
215 | unsigned int periods = time_elapsed / sampling_rate; | |
216 | ||
217 | if (periods < idle_periods) | |
218 | idle_periods = periods; | |
219 | } | |
220 | ||
4471a34f VK |
221 | if (load > max_load) |
222 | max_load = load; | |
223 | } | |
00bfe058 SK |
224 | |
225 | policy_dbs->idle_periods = idle_periods; | |
226 | ||
4cccf755 | 227 | return max_load; |
4471a34f | 228 | } |
4cccf755 | 229 | EXPORT_SYMBOL_GPL(dbs_update); |
4471a34f | 230 | |
70f43e5e | 231 | static void dbs_work_handler(struct work_struct *work) |
43e0ee36 | 232 | { |
e40e7b25 | 233 | struct policy_dbs_info *policy_dbs; |
3a91b069 | 234 | struct cpufreq_policy *policy; |
ea59ee0d | 235 | struct dbs_governor *gov; |
43e0ee36 | 236 | |
e40e7b25 RW |
237 | policy_dbs = container_of(work, struct policy_dbs_info, work); |
238 | policy = policy_dbs->policy; | |
ea59ee0d | 239 | gov = dbs_governor_of(policy); |
3a91b069 | 240 | |
70f43e5e | 241 | /* |
9be4fd2c RW |
242 | * Make sure cpufreq_governor_limits() isn't evaluating load or the |
243 | * ondemand governor isn't updating the sampling rate in parallel. | |
70f43e5e | 244 | */ |
26f0dbc9 VK |
245 | mutex_lock(&policy_dbs->update_mutex); |
246 | gov_update_sample_delay(policy_dbs, gov->gov_dbs_update(policy)); | |
247 | mutex_unlock(&policy_dbs->update_mutex); | |
70f43e5e | 248 | |
e4db2813 RW |
249 | /* Allow the utilization update handler to queue up more work. */ |
250 | atomic_set(&policy_dbs->work_count, 0); | |
9be4fd2c | 251 | /* |
e4db2813 RW |
252 | * If the update below is reordered with respect to the sample delay |
253 | * modification, the utilization update handler may end up using a stale | |
254 | * sample delay value. | |
9be4fd2c | 255 | */ |
e4db2813 RW |
256 | smp_wmb(); |
257 | policy_dbs->work_in_progress = false; | |
9be4fd2c RW |
258 | } |
259 | ||
260 | static void dbs_irq_work(struct irq_work *irq_work) | |
261 | { | |
e40e7b25 | 262 | struct policy_dbs_info *policy_dbs; |
70f43e5e | 263 | |
e40e7b25 | 264 | policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work); |
539a4c42 | 265 | schedule_work_on(smp_processor_id(), &policy_dbs->work); |
70f43e5e VK |
266 | } |
267 | ||
9be4fd2c | 268 | static void dbs_update_util_handler(struct update_util_data *data, u64 time, |
58919e83 | 269 | unsigned int flags) |
9be4fd2c RW |
270 | { |
271 | struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util); | |
e40e7b25 | 272 | struct policy_dbs_info *policy_dbs = cdbs->policy_dbs; |
27de3482 | 273 | u64 delta_ns, lst; |
70f43e5e | 274 | |
674e7541 VK |
275 | if (!cpufreq_can_do_remote_dvfs(policy_dbs->policy)) |
276 | return; | |
277 | ||
70f43e5e | 278 | /* |
9be4fd2c RW |
279 | * The work may not be allowed to be queued up right now. |
280 | * Possible reasons: | |
281 | * - Work has already been queued up or is in progress. | |
9be4fd2c | 282 | * - It is too early (too little time from the previous sample). |
70f43e5e | 283 | */ |
e4db2813 RW |
284 | if (policy_dbs->work_in_progress) |
285 | return; | |
286 | ||
287 | /* | |
288 | * If the reads below are reordered before the check above, the value | |
289 | * of sample_delay_ns used in the computation may be stale. | |
290 | */ | |
291 | smp_rmb(); | |
27de3482 RW |
292 | lst = READ_ONCE(policy_dbs->last_sample_time); |
293 | delta_ns = time - lst; | |
e4db2813 RW |
294 | if ((s64)delta_ns < policy_dbs->sample_delay_ns) |
295 | return; | |
296 | ||
297 | /* | |
298 | * If the policy is not shared, the irq_work may be queued up right away | |
299 | * at this point. Otherwise, we need to ensure that only one of the | |
300 | * CPUs sharing the policy will do that. | |
301 | */ | |
27de3482 RW |
302 | if (policy_dbs->is_shared) { |
303 | if (!atomic_add_unless(&policy_dbs->work_count, 1, 1)) | |
304 | return; | |
305 | ||
306 | /* | |
307 | * If another CPU updated last_sample_time in the meantime, we | |
308 | * shouldn't be here, so clear the work counter and bail out. | |
309 | */ | |
310 | if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) { | |
311 | atomic_set(&policy_dbs->work_count, 0); | |
312 | return; | |
313 | } | |
314 | } | |
e4db2813 RW |
315 | |
316 | policy_dbs->last_sample_time = time; | |
317 | policy_dbs->work_in_progress = true; | |
318 | irq_work_queue(&policy_dbs->irq_work); | |
43e0ee36 | 319 | } |
4447266b | 320 | |
0bed612b RW |
321 | static void gov_set_update_util(struct policy_dbs_info *policy_dbs, |
322 | unsigned int delay_us) | |
323 | { | |
324 | struct cpufreq_policy *policy = policy_dbs->policy; | |
325 | int cpu; | |
326 | ||
327 | gov_update_sample_delay(policy_dbs, delay_us); | |
328 | policy_dbs->last_sample_time = 0; | |
329 | ||
330 | for_each_cpu(cpu, policy->cpus) { | |
331 | struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu); | |
332 | ||
333 | cpufreq_add_update_util_hook(cpu, &cdbs->update_util, | |
334 | dbs_update_util_handler); | |
335 | } | |
336 | } | |
337 | ||
338 | static inline void gov_clear_update_util(struct cpufreq_policy *policy) | |
339 | { | |
340 | int i; | |
341 | ||
342 | for_each_cpu(i, policy->cpus) | |
343 | cpufreq_remove_update_util_hook(i); | |
344 | ||
345 | synchronize_sched(); | |
346 | } | |
347 | ||
bc505475 RW |
348 | static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy, |
349 | struct dbs_governor *gov) | |
44152cb8 | 350 | { |
e40e7b25 | 351 | struct policy_dbs_info *policy_dbs; |
44152cb8 VK |
352 | int j; |
353 | ||
7d5a9956 RW |
354 | /* Allocate memory for per-policy governor data. */ |
355 | policy_dbs = gov->alloc(); | |
e40e7b25 | 356 | if (!policy_dbs) |
bc505475 | 357 | return NULL; |
44152cb8 | 358 | |
581c214b | 359 | policy_dbs->policy = policy; |
26f0dbc9 | 360 | mutex_init(&policy_dbs->update_mutex); |
686cc637 | 361 | atomic_set(&policy_dbs->work_count, 0); |
e40e7b25 RW |
362 | init_irq_work(&policy_dbs->irq_work, dbs_irq_work); |
363 | INIT_WORK(&policy_dbs->work, dbs_work_handler); | |
cea6a9e7 RW |
364 | |
365 | /* Set policy_dbs for all CPUs, online+offline */ | |
366 | for_each_cpu(j, policy->related_cpus) { | |
8c8f77fd | 367 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); |
cea6a9e7 RW |
368 | |
369 | j_cdbs->policy_dbs = policy_dbs; | |
cea6a9e7 | 370 | } |
bc505475 | 371 | return policy_dbs; |
44152cb8 VK |
372 | } |
373 | ||
8c8f77fd | 374 | static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs, |
7bdad34d | 375 | struct dbs_governor *gov) |
44152cb8 | 376 | { |
44152cb8 VK |
377 | int j; |
378 | ||
26f0dbc9 | 379 | mutex_destroy(&policy_dbs->update_mutex); |
5e4500d8 | 380 | |
8c8f77fd RW |
381 | for_each_cpu(j, policy_dbs->policy->related_cpus) { |
382 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); | |
44152cb8 | 383 | |
cea6a9e7 RW |
384 | j_cdbs->policy_dbs = NULL; |
385 | j_cdbs->update_util.func = NULL; | |
386 | } | |
7d5a9956 | 387 | gov->free(policy_dbs); |
44152cb8 VK |
388 | } |
389 | ||
e788892b | 390 | int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) |
4471a34f | 391 | { |
ea59ee0d | 392 | struct dbs_governor *gov = dbs_governor_of(policy); |
1112e9d8 | 393 | struct dbs_data *dbs_data; |
bc505475 | 394 | struct policy_dbs_info *policy_dbs; |
1112e9d8 | 395 | int ret = 0; |
4471a34f | 396 | |
a72c4959 VK |
397 | /* State should be equivalent to EXIT */ |
398 | if (policy->governor_data) | |
399 | return -EBUSY; | |
400 | ||
bc505475 RW |
401 | policy_dbs = alloc_policy_dbs_info(policy, gov); |
402 | if (!policy_dbs) | |
403 | return -ENOMEM; | |
44152cb8 | 404 | |
1112e9d8 RW |
405 | /* Protect gov->gdbs_data against concurrent updates. */ |
406 | mutex_lock(&gov_dbs_data_mutex); | |
407 | ||
408 | dbs_data = gov->gdbs_data; | |
bc505475 RW |
409 | if (dbs_data) { |
410 | if (WARN_ON(have_governor_per_policy())) { | |
411 | ret = -EINVAL; | |
412 | goto free_policy_dbs_info; | |
413 | } | |
bc505475 RW |
414 | policy_dbs->dbs_data = dbs_data; |
415 | policy->governor_data = policy_dbs; | |
c54df071 | 416 | |
0dd3c1d6 | 417 | gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list); |
1112e9d8 | 418 | goto out; |
714a2d9c | 419 | } |
4d5dcc42 | 420 | |
714a2d9c | 421 | dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL); |
bc505475 RW |
422 | if (!dbs_data) { |
423 | ret = -ENOMEM; | |
424 | goto free_policy_dbs_info; | |
425 | } | |
44152cb8 | 426 | |
0dd3c1d6 | 427 | gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list); |
4d5dcc42 | 428 | |
9a15fb2c | 429 | ret = gov->init(dbs_data); |
714a2d9c | 430 | if (ret) |
e40e7b25 | 431 | goto free_policy_dbs_info; |
4d5dcc42 | 432 | |
aa7519af | 433 | dbs_data->sampling_rate = cpufreq_policy_transition_delay_us(policy); |
2361be23 | 434 | |
8eec1020 | 435 | if (!have_governor_per_policy()) |
7bdad34d | 436 | gov->gdbs_data = dbs_data; |
4d5dcc42 | 437 | |
c54df071 | 438 | policy_dbs->dbs_data = dbs_data; |
0dd3c1d6 | 439 | policy->governor_data = policy_dbs; |
c54df071 | 440 | |
c4435630 | 441 | gov->kobj_type.sysfs_ops = &governor_sysfs_ops; |
0dd3c1d6 | 442 | ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type, |
c4435630 VK |
443 | get_governor_parent_kobj(policy), |
444 | "%s", gov->gov.name); | |
fafd5e8a | 445 | if (!ret) |
1112e9d8 | 446 | goto out; |
4d5dcc42 | 447 | |
fafd5e8a | 448 | /* Failure, so roll back. */ |
666f4ccc | 449 | pr_err("initialization failed (dbs_data kobject init error %d)\n", ret); |
4d5dcc42 | 450 | |
e4b133cc VK |
451 | policy->governor_data = NULL; |
452 | ||
8eec1020 | 453 | if (!have_governor_per_policy()) |
7bdad34d | 454 | gov->gdbs_data = NULL; |
9a15fb2c | 455 | gov->exit(dbs_data); |
bc505475 RW |
456 | kfree(dbs_data); |
457 | ||
e40e7b25 | 458 | free_policy_dbs_info: |
8c8f77fd | 459 | free_policy_dbs_info(policy_dbs, gov); |
1112e9d8 RW |
460 | |
461 | out: | |
462 | mutex_unlock(&gov_dbs_data_mutex); | |
714a2d9c VK |
463 | return ret; |
464 | } | |
e788892b | 465 | EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init); |
4d5dcc42 | 466 | |
e788892b | 467 | void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy) |
714a2d9c | 468 | { |
ea59ee0d | 469 | struct dbs_governor *gov = dbs_governor_of(policy); |
bc505475 RW |
470 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
471 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
0dd3c1d6 | 472 | unsigned int count; |
a72c4959 | 473 | |
1112e9d8 RW |
474 | /* Protect gov->gdbs_data against concurrent updates. */ |
475 | mutex_lock(&gov_dbs_data_mutex); | |
476 | ||
0dd3c1d6 | 477 | count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list); |
2361be23 | 478 | |
0dd3c1d6 | 479 | policy->governor_data = NULL; |
e4b133cc | 480 | |
0dd3c1d6 | 481 | if (!count) { |
8eec1020 | 482 | if (!have_governor_per_policy()) |
7bdad34d | 483 | gov->gdbs_data = NULL; |
4471a34f | 484 | |
9a15fb2c | 485 | gov->exit(dbs_data); |
714a2d9c | 486 | kfree(dbs_data); |
4d5dcc42 | 487 | } |
44152cb8 | 488 | |
8c8f77fd | 489 | free_policy_dbs_info(policy_dbs, gov); |
1112e9d8 RW |
490 | |
491 | mutex_unlock(&gov_dbs_data_mutex); | |
714a2d9c | 492 | } |
e788892b | 493 | EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit); |
4d5dcc42 | 494 | |
e788892b | 495 | int cpufreq_dbs_governor_start(struct cpufreq_policy *policy) |
714a2d9c | 496 | { |
ea59ee0d | 497 | struct dbs_governor *gov = dbs_governor_of(policy); |
bc505475 RW |
498 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
499 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | |
702c9e54 | 500 | unsigned int sampling_rate, ignore_nice, j; |
8847e038 | 501 | unsigned int io_busy; |
714a2d9c VK |
502 | |
503 | if (!policy->cur) | |
504 | return -EINVAL; | |
505 | ||
e4db2813 | 506 | policy_dbs->is_shared = policy_is_shared(policy); |
57dc3bcd | 507 | policy_dbs->rate_mult = 1; |
e4db2813 | 508 | |
ff4b1789 VK |
509 | sampling_rate = dbs_data->sampling_rate; |
510 | ignore_nice = dbs_data->ignore_nice_load; | |
8847e038 | 511 | io_busy = dbs_data->io_is_busy; |
4471a34f | 512 | |
714a2d9c | 513 | for_each_cpu(j, policy->cpus) { |
8c8f77fd | 514 | struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); |
4471a34f | 515 | |
b4f4b4b3 | 516 | j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy); |
ba1ca654 RW |
517 | /* |
518 | * Make the first invocation of dbs_update() compute the load. | |
519 | */ | |
520 | j_cdbs->prev_load = 0; | |
18b46abd | 521 | |
714a2d9c VK |
522 | if (ignore_nice) |
523 | j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | |
714a2d9c | 524 | } |
2abfa876 | 525 | |
702c9e54 | 526 | gov->start(policy); |
4471a34f | 527 | |
e40e7b25 | 528 | gov_set_update_util(policy_dbs, sampling_rate); |
714a2d9c VK |
529 | return 0; |
530 | } | |
e788892b | 531 | EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start); |
714a2d9c | 532 | |
e788892b | 533 | void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy) |
714a2d9c | 534 | { |
f6709b8a RW |
535 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
536 | ||
537 | gov_clear_update_util(policy_dbs->policy); | |
538 | irq_work_sync(&policy_dbs->irq_work); | |
539 | cancel_work_sync(&policy_dbs->work); | |
540 | atomic_set(&policy_dbs->work_count, 0); | |
541 | policy_dbs->work_in_progress = false; | |
714a2d9c | 542 | } |
e788892b | 543 | EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop); |
4471a34f | 544 | |
e788892b | 545 | void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy) |
714a2d9c | 546 | { |
bc505475 | 547 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
8eeed095 | 548 | |
26f0dbc9 | 549 | mutex_lock(&policy_dbs->update_mutex); |
bf2be2de | 550 | cpufreq_policy_apply_limits(policy); |
4cccf755 RW |
551 | gov_update_sample_delay(policy_dbs, 0); |
552 | ||
26f0dbc9 | 553 | mutex_unlock(&policy_dbs->update_mutex); |
4471a34f | 554 | } |
e788892b | 555 | EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits); |