cpufreq: governor: Drop the gov pointer from struct dbs_data
[linux-2.6-block.git] / drivers / cpufreq / cpufreq_governor.c
CommitLineData
2aacdfff 1/*
2 * drivers/cpufreq/cpufreq_governor.c
3 *
4 * CPUFREQ governors common code
5 *
4471a34f
VK
6 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
11 *
2aacdfff 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
4471a34f
VK
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
2aacdfff 19#include <linux/export.h>
20#include <linux/kernel_stat.h>
4d5dcc42 21#include <linux/slab.h>
4471a34f
VK
22
23#include "cpufreq_governor.h"
24
2bb8d94f
RW
25DEFINE_MUTEX(dbs_data_mutex);
26EXPORT_SYMBOL_GPL(dbs_data_mutex);
27
ea59ee0d 28static struct attribute_group *get_sysfs_attr(struct dbs_governor *gov)
4d5dcc42 29{
ea59ee0d
RW
30 return have_governor_per_policy() ?
31 gov->attr_group_gov_pol : gov->attr_group_gov_sys;
4d5dcc42
VK
32}
33
ea59ee0d 34void dbs_check_cpu(struct cpufreq_policy *policy, int cpu)
4471a34f 35{
ea59ee0d
RW
36 struct dbs_governor *gov = dbs_governor_of(policy);
37 struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu);
38 struct dbs_data *dbs_data = policy->governor_data;
4471a34f
VK
39 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
40 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
18b46abd 41 unsigned int sampling_rate;
4471a34f
VK
42 unsigned int max_load = 0;
43 unsigned int ignore_nice;
44 unsigned int j;
45
ea59ee0d 46 if (gov->governor == GOV_ONDEMAND) {
18b46abd 47 struct od_cpu_dbs_info_s *od_dbs_info =
ea59ee0d 48 gov->get_cpu_dbs_info_s(cpu);
18b46abd
SB
49
50 /*
51 * Sometimes, the ondemand governor uses an additional
52 * multiplier to give long delays. So apply this multiplier to
53 * the 'sampling_rate', so as to keep the wake-up-from-idle
54 * detection logic a bit conservative.
55 */
56 sampling_rate = od_tuners->sampling_rate;
57 sampling_rate *= od_dbs_info->rate_mult;
58
6c4640c3 59 ignore_nice = od_tuners->ignore_nice_load;
18b46abd
SB
60 } else {
61 sampling_rate = cs_tuners->sampling_rate;
6c4640c3 62 ignore_nice = cs_tuners->ignore_nice_load;
18b46abd 63 }
4471a34f 64
dfa5bb62 65 /* Get Absolute Load */
4471a34f 66 for_each_cpu(j, policy->cpus) {
875b8508 67 struct cpu_dbs_info *j_cdbs;
9366d840
SK
68 u64 cur_wall_time, cur_idle_time;
69 unsigned int idle_time, wall_time;
4471a34f 70 unsigned int load;
9366d840 71 int io_busy = 0;
4471a34f 72
ea59ee0d 73 j_cdbs = gov->get_cpu_cdbs(j);
4471a34f 74
9366d840
SK
75 /*
76 * For the purpose of ondemand, waiting for disk IO is
77 * an indication that you're performance critical, and
78 * not that the system is actually idle. So do not add
79 * the iowait time to the cpu idle time.
80 */
ea59ee0d 81 if (gov->governor == GOV_ONDEMAND)
9366d840
SK
82 io_busy = od_tuners->io_is_busy;
83 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
4471a34f
VK
84
85 wall_time = (unsigned int)
86 (cur_wall_time - j_cdbs->prev_cpu_wall);
87 j_cdbs->prev_cpu_wall = cur_wall_time;
88
0df35026
CY
89 if (cur_idle_time < j_cdbs->prev_cpu_idle)
90 cur_idle_time = j_cdbs->prev_cpu_idle;
91
4471a34f
VK
92 idle_time = (unsigned int)
93 (cur_idle_time - j_cdbs->prev_cpu_idle);
94 j_cdbs->prev_cpu_idle = cur_idle_time;
95
96 if (ignore_nice) {
97 u64 cur_nice;
98 unsigned long cur_nice_jiffies;
99
100 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
101 cdbs->prev_cpu_nice;
102 /*
103 * Assumption: nice time between sampling periods will
104 * be less than 2^32 jiffies for 32 bit sys
105 */
106 cur_nice_jiffies = (unsigned long)
107 cputime64_to_jiffies64(cur_nice);
108
109 cdbs->prev_cpu_nice =
110 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
111 idle_time += jiffies_to_usecs(cur_nice_jiffies);
112 }
113
4471a34f
VK
114 if (unlikely(!wall_time || wall_time < idle_time))
115 continue;
116
18b46abd
SB
117 /*
118 * If the CPU had gone completely idle, and a task just woke up
119 * on this CPU now, it would be unfair to calculate 'load' the
120 * usual way for this elapsed time-window, because it will show
121 * near-zero load, irrespective of how CPU intensive that task
122 * actually is. This is undesirable for latency-sensitive bursty
123 * workloads.
124 *
125 * To avoid this, we reuse the 'load' from the previous
126 * time-window and give this task a chance to start with a
127 * reasonably high CPU frequency. (However, we shouldn't over-do
128 * this copy, lest we get stuck at a high load (high frequency)
129 * for too long, even when the current system load has actually
130 * dropped down. So we perform the copy only once, upon the
131 * first wake-up from idle.)
132 *
9be4fd2c
RW
133 * Detecting this situation is easy: the governor's utilization
134 * update handler would not have run during CPU-idle periods.
135 * Hence, an unusually large 'wall_time' (as compared to the
136 * sampling rate) indicates this scenario.
c8ae481b
VK
137 *
138 * prev_load can be zero in two cases and we must recalculate it
139 * for both cases:
140 * - during long idle intervals
141 * - explicitly set to zero
18b46abd 142 */
c8ae481b
VK
143 if (unlikely(wall_time > (2 * sampling_rate) &&
144 j_cdbs->prev_load)) {
18b46abd 145 load = j_cdbs->prev_load;
c8ae481b
VK
146
147 /*
148 * Perform a destructive copy, to ensure that we copy
149 * the previous load only once, upon the first wake-up
150 * from idle.
151 */
152 j_cdbs->prev_load = 0;
18b46abd
SB
153 } else {
154 load = 100 * (wall_time - idle_time) / wall_time;
155 j_cdbs->prev_load = load;
18b46abd 156 }
4471a34f 157
4471a34f
VK
158 if (load > max_load)
159 max_load = load;
160 }
161
ea59ee0d 162 gov->gov_check_cpu(cpu, max_load);
4471a34f
VK
163}
164EXPORT_SYMBOL_GPL(dbs_check_cpu);
165
9be4fd2c
RW
166void gov_set_update_util(struct cpu_common_dbs_info *shared,
167 unsigned int delay_us)
4471a34f 168{
9be4fd2c 169 struct cpufreq_policy *policy = shared->policy;
ea59ee0d 170 struct dbs_governor *gov = dbs_governor_of(policy);
70f43e5e 171 int cpu;
031299b3 172
9be4fd2c
RW
173 gov_update_sample_delay(shared, delay_us);
174 shared->last_sample_time = 0;
175
70f43e5e 176 for_each_cpu(cpu, policy->cpus) {
ea59ee0d 177 struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu);
9be4fd2c
RW
178
179 cpufreq_set_update_util_data(cpu, &cdbs->update_util);
031299b3
VK
180 }
181}
9be4fd2c 182EXPORT_SYMBOL_GPL(gov_set_update_util);
031299b3 183
9be4fd2c 184static inline void gov_clear_update_util(struct cpufreq_policy *policy)
031299b3 185{
031299b3 186 int i;
58ddcead 187
9be4fd2c
RW
188 for_each_cpu(i, policy->cpus)
189 cpufreq_set_update_util_data(i, NULL);
190
191 synchronize_rcu();
4471a34f
VK
192}
193
9be4fd2c 194static void gov_cancel_work(struct cpu_common_dbs_info *shared)
70f43e5e 195{
9be4fd2c 196 /* Tell dbs_update_util_handler() to skip queuing up work items. */
2dd3e724 197 atomic_inc(&shared->skip_work);
70f43e5e 198 /*
9be4fd2c
RW
199 * If dbs_update_util_handler() is already running, it may not notice
200 * the incremented skip_work, so wait for it to complete to prevent its
201 * work item from being queued up after the cancel_work_sync() below.
70f43e5e 202 */
9be4fd2c
RW
203 gov_clear_update_util(shared->policy);
204 irq_work_sync(&shared->irq_work);
70f43e5e 205 cancel_work_sync(&shared->work);
2dd3e724 206 atomic_set(&shared->skip_work, 0);
70f43e5e 207}
43e0ee36 208
70f43e5e 209static void dbs_work_handler(struct work_struct *work)
43e0ee36 210{
70f43e5e
VK
211 struct cpu_common_dbs_info *shared = container_of(work, struct
212 cpu_common_dbs_info, work);
3a91b069 213 struct cpufreq_policy *policy;
ea59ee0d 214 struct dbs_governor *gov;
9be4fd2c 215 unsigned int delay;
43e0ee36 216
3a91b069 217 policy = shared->policy;
ea59ee0d 218 gov = dbs_governor_of(policy);
3a91b069 219
70f43e5e 220 /*
9be4fd2c
RW
221 * Make sure cpufreq_governor_limits() isn't evaluating load or the
222 * ondemand governor isn't updating the sampling rate in parallel.
70f43e5e
VK
223 */
224 mutex_lock(&shared->timer_mutex);
ea59ee0d 225 delay = gov->gov_dbs_timer(policy);
9be4fd2c 226 shared->sample_delay_ns = jiffies_to_nsecs(delay);
43e0ee36 227 mutex_unlock(&shared->timer_mutex);
70f43e5e 228
9be4fd2c
RW
229 /*
230 * If the atomic operation below is reordered with respect to the
231 * sample delay modification, the utilization update handler may end
232 * up using a stale sample delay value.
233 */
234 smp_mb__before_atomic();
2dd3e724 235 atomic_dec(&shared->skip_work);
9be4fd2c
RW
236}
237
238static void dbs_irq_work(struct irq_work *irq_work)
239{
240 struct cpu_common_dbs_info *shared;
70f43e5e 241
9be4fd2c
RW
242 shared = container_of(irq_work, struct cpu_common_dbs_info, irq_work);
243 schedule_work(&shared->work);
70f43e5e
VK
244}
245
9be4fd2c 246static inline void gov_queue_irq_work(struct cpu_common_dbs_info *shared)
70f43e5e 247{
9be4fd2c
RW
248#ifdef CONFIG_SMP
249 irq_work_queue_on(&shared->irq_work, smp_processor_id());
250#else
251 irq_work_queue(&shared->irq_work);
252#endif
253}
254
255static void dbs_update_util_handler(struct update_util_data *data, u64 time,
256 unsigned long util, unsigned long max)
257{
258 struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
70f43e5e 259 struct cpu_common_dbs_info *shared = cdbs->shared;
70f43e5e
VK
260
261 /*
9be4fd2c
RW
262 * The work may not be allowed to be queued up right now.
263 * Possible reasons:
264 * - Work has already been queued up or is in progress.
265 * - The governor is being stopped.
266 * - It is too early (too little time from the previous sample).
70f43e5e 267 */
9be4fd2c
RW
268 if (atomic_inc_return(&shared->skip_work) == 1) {
269 u64 delta_ns;
270
271 delta_ns = time - shared->last_sample_time;
272 if ((s64)delta_ns >= shared->sample_delay_ns) {
273 shared->last_sample_time = time;
274 gov_queue_irq_work(shared);
275 return;
276 }
277 }
278 atomic_dec(&shared->skip_work);
43e0ee36 279}
4447266b 280
4d5dcc42 281static void set_sampling_rate(struct dbs_data *dbs_data,
ea59ee0d
RW
282 struct dbs_governor *gov,
283 unsigned int sampling_rate)
4d5dcc42 284{
ea59ee0d 285 if (gov->governor == GOV_CONSERVATIVE) {
4d5dcc42
VK
286 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
287 cs_tuners->sampling_rate = sampling_rate;
288 } else {
289 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
290 od_tuners->sampling_rate = sampling_rate;
291 }
292}
293
44152cb8 294static int alloc_common_dbs_info(struct cpufreq_policy *policy,
7bdad34d 295 struct dbs_governor *gov)
44152cb8
VK
296{
297 struct cpu_common_dbs_info *shared;
298 int j;
299
300 /* Allocate memory for the common information for policy->cpus */
301 shared = kzalloc(sizeof(*shared), GFP_KERNEL);
302 if (!shared)
303 return -ENOMEM;
304
305 /* Set shared for all CPUs, online+offline */
306 for_each_cpu(j, policy->related_cpus)
7bdad34d 307 gov->get_cpu_cdbs(j)->shared = shared;
44152cb8 308
5e4500d8 309 mutex_init(&shared->timer_mutex);
2dd3e724 310 atomic_set(&shared->skip_work, 0);
9be4fd2c 311 init_irq_work(&shared->irq_work, dbs_irq_work);
70f43e5e 312 INIT_WORK(&shared->work, dbs_work_handler);
44152cb8
VK
313 return 0;
314}
315
316static void free_common_dbs_info(struct cpufreq_policy *policy,
7bdad34d 317 struct dbs_governor *gov)
44152cb8 318{
7bdad34d 319 struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(policy->cpu);
44152cb8
VK
320 struct cpu_common_dbs_info *shared = cdbs->shared;
321 int j;
322
5e4500d8
VK
323 mutex_destroy(&shared->timer_mutex);
324
44152cb8 325 for_each_cpu(j, policy->cpus)
7bdad34d 326 gov->get_cpu_cdbs(j)->shared = NULL;
44152cb8
VK
327
328 kfree(shared);
329}
330
906a6e5a 331static int cpufreq_governor_init(struct cpufreq_policy *policy)
4471a34f 332{
ea59ee0d 333 struct dbs_governor *gov = dbs_governor_of(policy);
7bdad34d 334 struct dbs_data *dbs_data = gov->gdbs_data;
714a2d9c
VK
335 unsigned int latency;
336 int ret;
4471a34f 337
a72c4959
VK
338 /* State should be equivalent to EXIT */
339 if (policy->governor_data)
340 return -EBUSY;
341
714a2d9c
VK
342 if (dbs_data) {
343 if (WARN_ON(have_governor_per_policy()))
344 return -EINVAL;
44152cb8 345
7bdad34d 346 ret = alloc_common_dbs_info(policy, gov);
44152cb8
VK
347 if (ret)
348 return ret;
349
714a2d9c
VK
350 dbs_data->usage_count++;
351 policy->governor_data = dbs_data;
352 return 0;
353 }
4d5dcc42 354
714a2d9c
VK
355 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
356 if (!dbs_data)
357 return -ENOMEM;
4d5dcc42 358
7bdad34d 359 ret = alloc_common_dbs_info(policy, gov);
44152cb8
VK
360 if (ret)
361 goto free_dbs_data;
362
714a2d9c 363 dbs_data->usage_count = 1;
4d5dcc42 364
7bdad34d 365 ret = gov->init(dbs_data, !policy->governor->initialized);
714a2d9c 366 if (ret)
44152cb8 367 goto free_common_dbs_info;
4d5dcc42 368
714a2d9c
VK
369 /* policy latency is in ns. Convert it to us first */
370 latency = policy->cpuinfo.transition_latency / 1000;
371 if (latency == 0)
372 latency = 1;
4d5dcc42 373
714a2d9c
VK
374 /* Bring kernel and HW constraints together */
375 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
376 MIN_LATENCY_MULTIPLIER * latency);
ea59ee0d 377 set_sampling_rate(dbs_data, gov, max(dbs_data->min_sampling_rate,
714a2d9c 378 latency * LATENCY_MULTIPLIER));
2361be23 379
8eec1020 380 if (!have_governor_per_policy())
7bdad34d 381 gov->gdbs_data = dbs_data;
4d5dcc42 382
e4b133cc
VK
383 policy->governor_data = dbs_data;
384
714a2d9c 385 ret = sysfs_create_group(get_governor_parent_kobj(policy),
ea59ee0d 386 get_sysfs_attr(gov));
714a2d9c 387 if (ret)
8eec1020 388 goto reset_gdbs_data;
4d5dcc42 389
714a2d9c 390 return 0;
4d5dcc42 391
8eec1020 392reset_gdbs_data:
e4b133cc
VK
393 policy->governor_data = NULL;
394
8eec1020 395 if (!have_governor_per_policy())
7bdad34d
RW
396 gov->gdbs_data = NULL;
397 gov->exit(dbs_data, !policy->governor->initialized);
44152cb8 398free_common_dbs_info:
7bdad34d 399 free_common_dbs_info(policy, gov);
714a2d9c
VK
400free_dbs_data:
401 kfree(dbs_data);
402 return ret;
403}
4d5dcc42 404
5da3dd1e 405static int cpufreq_governor_exit(struct cpufreq_policy *policy)
714a2d9c 406{
ea59ee0d 407 struct dbs_governor *gov = dbs_governor_of(policy);
5da3dd1e 408 struct dbs_data *dbs_data = policy->governor_data;
7bdad34d 409 struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(policy->cpu);
a72c4959
VK
410
411 /* State should be equivalent to INIT */
412 if (!cdbs->shared || cdbs->shared->policy)
413 return -EBUSY;
4d5dcc42 414
714a2d9c
VK
415 if (!--dbs_data->usage_count) {
416 sysfs_remove_group(get_governor_parent_kobj(policy),
ea59ee0d 417 get_sysfs_attr(gov));
2361be23 418
e4b133cc
VK
419 policy->governor_data = NULL;
420
8eec1020 421 if (!have_governor_per_policy())
7bdad34d 422 gov->gdbs_data = NULL;
4471a34f 423
7bdad34d 424 gov->exit(dbs_data, policy->governor->initialized == 1);
714a2d9c 425 kfree(dbs_data);
e4b133cc
VK
426 } else {
427 policy->governor_data = NULL;
4d5dcc42 428 }
44152cb8 429
7bdad34d 430 free_common_dbs_info(policy, gov);
a72c4959 431 return 0;
714a2d9c 432}
4d5dcc42 433
5da3dd1e 434static int cpufreq_governor_start(struct cpufreq_policy *policy)
714a2d9c 435{
ea59ee0d 436 struct dbs_governor *gov = dbs_governor_of(policy);
5da3dd1e 437 struct dbs_data *dbs_data = policy->governor_data;
714a2d9c 438 unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
7bdad34d 439 struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu);
44152cb8 440 struct cpu_common_dbs_info *shared = cdbs->shared;
714a2d9c
VK
441 int io_busy = 0;
442
443 if (!policy->cur)
444 return -EINVAL;
445
a72c4959
VK
446 /* State should be equivalent to INIT */
447 if (!shared || shared->policy)
448 return -EBUSY;
449
7bdad34d 450 if (gov->governor == GOV_CONSERVATIVE) {
714a2d9c 451 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
4d5dcc42 452
4d5dcc42 453 sampling_rate = cs_tuners->sampling_rate;
6c4640c3 454 ignore_nice = cs_tuners->ignore_nice_load;
4471a34f 455 } else {
714a2d9c
VK
456 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
457
4d5dcc42 458 sampling_rate = od_tuners->sampling_rate;
6c4640c3 459 ignore_nice = od_tuners->ignore_nice_load;
9366d840 460 io_busy = od_tuners->io_is_busy;
4471a34f
VK
461 }
462
714a2d9c 463 for_each_cpu(j, policy->cpus) {
7bdad34d 464 struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j);
714a2d9c 465 unsigned int prev_load;
4471a34f 466
714a2d9c
VK
467 j_cdbs->prev_cpu_idle =
468 get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
4471a34f 469
714a2d9c
VK
470 prev_load = (unsigned int)(j_cdbs->prev_cpu_wall -
471 j_cdbs->prev_cpu_idle);
472 j_cdbs->prev_load = 100 * prev_load /
473 (unsigned int)j_cdbs->prev_cpu_wall;
18b46abd 474
714a2d9c
VK
475 if (ignore_nice)
476 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
18b46abd 477
9be4fd2c 478 j_cdbs->update_util.func = dbs_update_util_handler;
714a2d9c 479 }
9be4fd2c 480 shared->policy = policy;
2abfa876 481
7bdad34d 482 if (gov->governor == GOV_CONSERVATIVE) {
714a2d9c 483 struct cs_cpu_dbs_info_s *cs_dbs_info =
7bdad34d 484 gov->get_cpu_dbs_info_s(cpu);
4471a34f 485
714a2d9c 486 cs_dbs_info->down_skip = 0;
714a2d9c
VK
487 cs_dbs_info->requested_freq = policy->cur;
488 } else {
7bdad34d
RW
489 struct od_ops *od_ops = gov->gov_ops;
490 struct od_cpu_dbs_info_s *od_dbs_info = gov->get_cpu_dbs_info_s(cpu);
4471a34f 491
714a2d9c
VK
492 od_dbs_info->rate_mult = 1;
493 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
494 od_ops->powersave_bias_init_cpu(cpu);
495 }
4471a34f 496
9be4fd2c 497 gov_set_update_util(shared, sampling_rate);
714a2d9c
VK
498 return 0;
499}
500
5da3dd1e 501static int cpufreq_governor_stop(struct cpufreq_policy *policy)
714a2d9c 502{
ea59ee0d
RW
503 struct dbs_governor *gov = dbs_governor_of(policy);
504 struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(policy->cpu);
44152cb8
VK
505 struct cpu_common_dbs_info *shared = cdbs->shared;
506
a72c4959
VK
507 /* State should be equivalent to START */
508 if (!shared || !shared->policy)
509 return -EBUSY;
510
70f43e5e 511 gov_cancel_work(shared);
3a91b069 512 shared->policy = NULL;
3a91b069 513
a72c4959 514 return 0;
714a2d9c 515}
4471a34f 516
5da3dd1e 517static int cpufreq_governor_limits(struct cpufreq_policy *policy)
714a2d9c 518{
ea59ee0d 519 struct dbs_governor *gov = dbs_governor_of(policy);
714a2d9c 520 unsigned int cpu = policy->cpu;
7bdad34d 521 struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu);
8eeed095 522
a72c4959 523 /* State should be equivalent to START */
44152cb8 524 if (!cdbs->shared || !cdbs->shared->policy)
a72c4959 525 return -EBUSY;
4471a34f 526
44152cb8
VK
527 mutex_lock(&cdbs->shared->timer_mutex);
528 if (policy->max < cdbs->shared->policy->cur)
529 __cpufreq_driver_target(cdbs->shared->policy, policy->max,
714a2d9c 530 CPUFREQ_RELATION_H);
44152cb8
VK
531 else if (policy->min > cdbs->shared->policy->cur)
532 __cpufreq_driver_target(cdbs->shared->policy, policy->min,
714a2d9c 533 CPUFREQ_RELATION_L);
ea59ee0d 534 dbs_check_cpu(policy, cpu);
44152cb8 535 mutex_unlock(&cdbs->shared->timer_mutex);
a72c4959
VK
536
537 return 0;
714a2d9c 538}
4471a34f 539
906a6e5a 540int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
714a2d9c 541{
5da3dd1e 542 int ret = -EINVAL;
714a2d9c 543
732b6d61 544 /* Lock governor to block concurrent initialization of governor */
2bb8d94f 545 mutex_lock(&dbs_data_mutex);
732b6d61 546
5da3dd1e 547 if (event == CPUFREQ_GOV_POLICY_INIT) {
906a6e5a 548 ret = cpufreq_governor_init(policy);
5da3dd1e
RW
549 } else if (policy->governor_data) {
550 switch (event) {
551 case CPUFREQ_GOV_POLICY_EXIT:
552 ret = cpufreq_governor_exit(policy);
553 break;
554 case CPUFREQ_GOV_START:
555 ret = cpufreq_governor_start(policy);
556 break;
557 case CPUFREQ_GOV_STOP:
558 ret = cpufreq_governor_stop(policy);
559 break;
560 case CPUFREQ_GOV_LIMITS:
561 ret = cpufreq_governor_limits(policy);
562 break;
563 }
4471a34f 564 }
714a2d9c 565
2bb8d94f 566 mutex_unlock(&dbs_data_mutex);
714a2d9c 567 return ret;
4471a34f
VK
568}
569EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);