cpufreq: Don't allow updating inactive policies from sysfs
[linux-block.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
bb176f7d 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
1da177e4 7 *
c32b6b8e 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 9 * Added handling for CPU hotplug
8ff69732
DJ
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 12 *
1da177e4
LT
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
1da177e4
LT
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
5ff0a268 20#include <linux/cpu.h>
1da177e4
LT
21#include <linux/cpufreq.h>
22#include <linux/delay.h>
1da177e4 23#include <linux/device.h>
5ff0a268
VK
24#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
3fc54d37 27#include <linux/mutex.h>
5ff0a268 28#include <linux/slab.h>
2f0aea93 29#include <linux/suspend.h>
90de2a4a 30#include <linux/syscore_ops.h>
5ff0a268 31#include <linux/tick.h>
6f4f2723
TR
32#include <trace/events/power.h>
33
b4f0676f 34static LIST_HEAD(cpufreq_policy_list);
f963735a
VK
35
36static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37{
38 return cpumask_empty(policy->cpus);
39}
40
41static bool suitable_policy(struct cpufreq_policy *policy, bool active)
42{
43 return active == !policy_is_inactive(policy);
44}
45
46/* Finds Next Acive/Inactive policy */
47static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
48 bool active)
49{
50 do {
51 policy = list_next_entry(policy, policy_list);
52
53 /* No more policies in the list */
54 if (&policy->policy_list == &cpufreq_policy_list)
55 return NULL;
56 } while (!suitable_policy(policy, active));
57
58 return policy;
59}
60
61static struct cpufreq_policy *first_policy(bool active)
62{
63 struct cpufreq_policy *policy;
64
65 /* No policies in the list */
66 if (list_empty(&cpufreq_policy_list))
67 return NULL;
68
69 policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
70 policy_list);
71
72 if (!suitable_policy(policy, active))
73 policy = next_policy(policy, active);
74
75 return policy;
76}
77
78/* Macros to iterate over CPU policies */
79#define for_each_suitable_policy(__policy, __active) \
80 for (__policy = first_policy(__active); \
81 __policy; \
82 __policy = next_policy(__policy, __active))
83
84#define for_each_active_policy(__policy) \
85 for_each_suitable_policy(__policy, true)
86#define for_each_inactive_policy(__policy) \
87 for_each_suitable_policy(__policy, false)
88
89#define for_each_policy(__policy) \
b4f0676f
VK
90 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
91
f7b27061
VK
92/* Iterate over governors */
93static LIST_HEAD(cpufreq_governor_list);
94#define for_each_governor(__governor) \
95 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
96
1da177e4 97/**
cd878479 98 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
99 * level driver of CPUFreq support, and its spinlock. This lock
100 * also protects the cpufreq_cpu_data array.
101 */
1c3d85dd 102static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 103static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
bb176f7d 104static DEFINE_RWLOCK(cpufreq_driver_lock);
6f1e4efd 105DEFINE_MUTEX(cpufreq_governor_lock);
bb176f7d 106
2f0aea93
VK
107/* Flag to suspend/resume CPUFreq governors */
108static bool cpufreq_suspended;
1da177e4 109
9c0ebcf7
VK
110static inline bool has_target(void)
111{
112 return cpufreq_driver->target_index || cpufreq_driver->target;
113}
114
6eed9404
VK
115/*
116 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
117 * sections
118 */
119static DECLARE_RWSEM(cpufreq_rwsem);
120
1da177e4 121/* internal prototypes */
29464f28
DJ
122static int __cpufreq_governor(struct cpufreq_policy *policy,
123 unsigned int event);
d92d50a4 124static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
65f27f38 125static void handle_update(struct work_struct *work);
1da177e4
LT
126
127/**
32ee8c3e
DJ
128 * Two notifier lists: the "policy" list is involved in the
129 * validation process for a new CPU frequency policy; the
1da177e4
LT
130 * "transition" list for kernel code that needs to handle
131 * changes to devices when the CPU clock speed changes.
132 * The mutex locks both lists.
133 */
e041c683 134static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 135static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 136
74212ca4 137static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
138static int __init init_cpufreq_transition_notifier_list(void)
139{
140 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 141 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
142 return 0;
143}
b3438f82 144pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 145
a7b422cd 146static int off __read_mostly;
da584455 147static int cpufreq_disabled(void)
a7b422cd
KRW
148{
149 return off;
150}
151void disable_cpufreq(void)
152{
153 off = 1;
154}
29464f28 155static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 156
4d5dcc42
VK
157bool have_governor_per_policy(void)
158{
0b981e70 159 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
4d5dcc42 160}
3f869d6d 161EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 162
944e9a03
VK
163struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
164{
165 if (have_governor_per_policy())
166 return &policy->kobj;
167 else
168 return cpufreq_global_kobject;
169}
170EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
171
72a4ce34
VK
172static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
173{
174 u64 idle_time;
175 u64 cur_wall_time;
176 u64 busy_time;
177
178 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
179
180 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
181 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
182 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
183 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
184 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
185 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
186
187 idle_time = cur_wall_time - busy_time;
188 if (wall)
189 *wall = cputime_to_usecs(cur_wall_time);
190
191 return cputime_to_usecs(idle_time);
192}
193
194u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
195{
196 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
197
198 if (idle_time == -1ULL)
199 return get_cpu_idle_time_jiffy(cpu, wall);
200 else if (!io_busy)
201 idle_time += get_cpu_iowait_time_us(cpu, wall);
202
203 return idle_time;
204}
205EXPORT_SYMBOL_GPL(get_cpu_idle_time);
206
70e9e778
VK
207/*
208 * This is a generic cpufreq init() routine which can be used by cpufreq
209 * drivers of SMP systems. It will do following:
210 * - validate & show freq table passed
211 * - set policies transition latency
212 * - policy->cpus with all possible CPUs
213 */
214int cpufreq_generic_init(struct cpufreq_policy *policy,
215 struct cpufreq_frequency_table *table,
216 unsigned int transition_latency)
217{
218 int ret;
219
220 ret = cpufreq_table_validate_and_show(policy, table);
221 if (ret) {
222 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
223 return ret;
224 }
225
226 policy->cpuinfo.transition_latency = transition_latency;
227
228 /*
58405af6 229 * The driver only supports the SMP configuration where all processors
70e9e778
VK
230 * share the clock and voltage and clock.
231 */
232 cpumask_setall(policy->cpus);
233
234 return 0;
235}
236EXPORT_SYMBOL_GPL(cpufreq_generic_init);
237
988bed09
VK
238/* Only for cpufreq core internal use */
239struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
652ed95d
VK
240{
241 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
242
988bed09
VK
243 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
244}
245
246unsigned int cpufreq_generic_get(unsigned int cpu)
247{
248 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
249
652ed95d 250 if (!policy || IS_ERR(policy->clk)) {
e837f9b5
JP
251 pr_err("%s: No %s associated to cpu: %d\n",
252 __func__, policy ? "clk" : "policy", cpu);
652ed95d
VK
253 return 0;
254 }
255
256 return clk_get_rate(policy->clk) / 1000;
257}
258EXPORT_SYMBOL_GPL(cpufreq_generic_get);
259
50e9c852
VK
260/**
261 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
262 *
263 * @cpu: cpu to find policy for.
264 *
265 * This returns policy for 'cpu', returns NULL if it doesn't exist.
266 * It also increments the kobject reference count to mark it busy and so would
267 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
268 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
269 * freed as that depends on the kobj count.
270 *
271 * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
272 * valid policy is found. This is done to make sure the driver doesn't get
273 * unregistered while the policy is being used.
274 *
275 * Return: A valid policy on success, otherwise NULL on failure.
276 */
6eed9404 277struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4 278{
6eed9404 279 struct cpufreq_policy *policy = NULL;
1da177e4
LT
280 unsigned long flags;
281
1b947c90 282 if (WARN_ON(cpu >= nr_cpu_ids))
6eed9404
VK
283 return NULL;
284
285 if (!down_read_trylock(&cpufreq_rwsem))
286 return NULL;
1da177e4
LT
287
288 /* get the cpufreq driver */
1c3d85dd 289 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 290
6eed9404
VK
291 if (cpufreq_driver) {
292 /* get the CPU */
988bed09 293 policy = cpufreq_cpu_get_raw(cpu);
6eed9404
VK
294 if (policy)
295 kobject_get(&policy->kobj);
296 }
1da177e4 297
6eed9404 298 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 299
3a3e9e06 300 if (!policy)
6eed9404 301 up_read(&cpufreq_rwsem);
1da177e4 302
3a3e9e06 303 return policy;
a9144436 304}
1da177e4
LT
305EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
306
50e9c852
VK
307/**
308 * cpufreq_cpu_put: Decrements the usage count of a policy
309 *
310 * @policy: policy earlier returned by cpufreq_cpu_get().
311 *
312 * This decrements the kobject reference count incremented earlier by calling
313 * cpufreq_cpu_get().
314 *
315 * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
316 */
3a3e9e06 317void cpufreq_cpu_put(struct cpufreq_policy *policy)
1da177e4 318{
6eed9404
VK
319 kobject_put(&policy->kobj);
320 up_read(&cpufreq_rwsem);
1da177e4
LT
321}
322EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
323
1da177e4
LT
324/*********************************************************************
325 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
326 *********************************************************************/
327
328/**
329 * adjust_jiffies - adjust the system "loops_per_jiffy"
330 *
331 * This function alters the system "loops_per_jiffy" for the clock
332 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 333 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
334 * per-CPU loops_per_jiffy value wherever possible.
335 */
858119e1 336static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4 337{
39c132ee
VK
338#ifndef CONFIG_SMP
339 static unsigned long l_p_j_ref;
340 static unsigned int l_p_j_ref_freq;
341
1da177e4
LT
342 if (ci->flags & CPUFREQ_CONST_LOOPS)
343 return;
344
345 if (!l_p_j_ref_freq) {
346 l_p_j_ref = loops_per_jiffy;
347 l_p_j_ref_freq = ci->old;
e837f9b5
JP
348 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
349 l_p_j_ref, l_p_j_ref_freq);
1da177e4 350 }
0b443ead 351 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
e08f5f5b
GS
352 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
353 ci->new);
e837f9b5
JP
354 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
355 loops_per_jiffy, ci->new);
1da177e4 356 }
1da177e4 357#endif
39c132ee 358}
1da177e4 359
0956df9c 360static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb 361 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
362{
363 BUG_ON(irqs_disabled());
364
d5aaffa9
DB
365 if (cpufreq_disabled())
366 return;
367
1c3d85dd 368 freqs->flags = cpufreq_driver->flags;
2d06d8c4 369 pr_debug("notification %u of frequency transition to %u kHz\n",
e837f9b5 370 state, freqs->new);
1da177e4 371
1da177e4 372 switch (state) {
e4472cb3 373
1da177e4 374 case CPUFREQ_PRECHANGE:
32ee8c3e 375 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
376 * which is not equal to what the cpufreq core thinks is
377 * "old frequency".
1da177e4 378 */
1c3d85dd 379 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
380 if ((policy) && (policy->cpu == freqs->cpu) &&
381 (policy->cur) && (policy->cur != freqs->old)) {
e837f9b5
JP
382 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
383 freqs->old, policy->cur);
e4472cb3 384 freqs->old = policy->cur;
1da177e4
LT
385 }
386 }
b4dfdbb3 387 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 388 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
389 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
390 break;
e4472cb3 391
1da177e4
LT
392 case CPUFREQ_POSTCHANGE:
393 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
e837f9b5
JP
394 pr_debug("FREQ: %lu - CPU: %lu\n",
395 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
25e41933 396 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 397 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 398 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
399 if (likely(policy) && likely(policy->cpu == freqs->cpu))
400 policy->cur = freqs->new;
1da177e4
LT
401 break;
402 }
1da177e4 403}
bb176f7d 404
b43a7ffb
VK
405/**
406 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
407 * on frequency transition.
408 *
409 * This function calls the transition notifiers and the "adjust_jiffies"
410 * function. It is called twice on all CPU frequency changes that have
411 * external effects.
412 */
236a9800 413static void cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb
VK
414 struct cpufreq_freqs *freqs, unsigned int state)
415{
416 for_each_cpu(freqs->cpu, policy->cpus)
417 __cpufreq_notify_transition(policy, freqs, state);
418}
1da177e4 419
f7ba3b41 420/* Do post notifications when there are chances that transition has failed */
236a9800 421static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
f7ba3b41
VK
422 struct cpufreq_freqs *freqs, int transition_failed)
423{
424 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
425 if (!transition_failed)
426 return;
427
428 swap(freqs->old, freqs->new);
429 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
430 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
431}
f7ba3b41 432
12478cf0
SB
433void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
434 struct cpufreq_freqs *freqs)
435{
ca654dc3
SB
436
437 /*
438 * Catch double invocations of _begin() which lead to self-deadlock.
439 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
440 * doesn't invoke _begin() on their behalf, and hence the chances of
441 * double invocations are very low. Moreover, there are scenarios
442 * where these checks can emit false-positive warnings in these
443 * drivers; so we avoid that by skipping them altogether.
444 */
445 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
446 && current == policy->transition_task);
447
12478cf0
SB
448wait:
449 wait_event(policy->transition_wait, !policy->transition_ongoing);
450
451 spin_lock(&policy->transition_lock);
452
453 if (unlikely(policy->transition_ongoing)) {
454 spin_unlock(&policy->transition_lock);
455 goto wait;
456 }
457
458 policy->transition_ongoing = true;
ca654dc3 459 policy->transition_task = current;
12478cf0
SB
460
461 spin_unlock(&policy->transition_lock);
462
463 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
464}
465EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
466
467void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
468 struct cpufreq_freqs *freqs, int transition_failed)
469{
470 if (unlikely(WARN_ON(!policy->transition_ongoing)))
471 return;
472
473 cpufreq_notify_post_transition(policy, freqs, transition_failed);
474
475 policy->transition_ongoing = false;
ca654dc3 476 policy->transition_task = NULL;
12478cf0
SB
477
478 wake_up(&policy->transition_wait);
479}
480EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
481
1da177e4 482
1da177e4
LT
483/*********************************************************************
484 * SYSFS INTERFACE *
485 *********************************************************************/
8a5c74a1 486static ssize_t show_boost(struct kobject *kobj,
6f19efc0
LM
487 struct attribute *attr, char *buf)
488{
489 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
490}
491
492static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
493 const char *buf, size_t count)
494{
495 int ret, enable;
496
497 ret = sscanf(buf, "%d", &enable);
498 if (ret != 1 || enable < 0 || enable > 1)
499 return -EINVAL;
500
501 if (cpufreq_boost_trigger_state(enable)) {
e837f9b5
JP
502 pr_err("%s: Cannot %s BOOST!\n",
503 __func__, enable ? "enable" : "disable");
6f19efc0
LM
504 return -EINVAL;
505 }
506
e837f9b5
JP
507 pr_debug("%s: cpufreq BOOST %s\n",
508 __func__, enable ? "enabled" : "disabled");
6f19efc0
LM
509
510 return count;
511}
512define_one_global_rw(boost);
1da177e4 513
42f91fa1 514static struct cpufreq_governor *find_governor(const char *str_governor)
3bcb09a3
JF
515{
516 struct cpufreq_governor *t;
517
f7b27061 518 for_each_governor(t)
7c4f4539 519 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
520 return t;
521
522 return NULL;
523}
524
1da177e4
LT
525/**
526 * cpufreq_parse_governor - parse a governor string
527 */
905d77cd 528static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
529 struct cpufreq_governor **governor)
530{
3bcb09a3 531 int err = -EINVAL;
1c3d85dd
RW
532
533 if (!cpufreq_driver)
3bcb09a3
JF
534 goto out;
535
1c3d85dd 536 if (cpufreq_driver->setpolicy) {
7c4f4539 537 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
1da177e4 538 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 539 err = 0;
7c4f4539 540 } else if (!strncasecmp(str_governor, "powersave",
e08f5f5b 541 CPUFREQ_NAME_LEN)) {
1da177e4 542 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 543 err = 0;
1da177e4 544 }
2e1cc3a5 545 } else {
1da177e4 546 struct cpufreq_governor *t;
3bcb09a3 547
3fc54d37 548 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3 549
42f91fa1 550 t = find_governor(str_governor);
3bcb09a3 551
ea714970 552 if (t == NULL) {
1a8e1463 553 int ret;
ea714970 554
1a8e1463
KC
555 mutex_unlock(&cpufreq_governor_mutex);
556 ret = request_module("cpufreq_%s", str_governor);
557 mutex_lock(&cpufreq_governor_mutex);
ea714970 558
1a8e1463 559 if (ret == 0)
42f91fa1 560 t = find_governor(str_governor);
ea714970
JF
561 }
562
3bcb09a3
JF
563 if (t != NULL) {
564 *governor = t;
565 err = 0;
1da177e4 566 }
3bcb09a3 567
3fc54d37 568 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 569 }
29464f28 570out:
3bcb09a3 571 return err;
1da177e4 572}
1da177e4 573
1da177e4 574/**
e08f5f5b
GS
575 * cpufreq_per_cpu_attr_read() / show_##file_name() -
576 * print out cpufreq information
1da177e4
LT
577 *
578 * Write out information from cpufreq_driver->policy[cpu]; object must be
579 * "unsigned int".
580 */
581
32ee8c3e
DJ
582#define show_one(file_name, object) \
583static ssize_t show_##file_name \
905d77cd 584(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 585{ \
29464f28 586 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
587}
588
589show_one(cpuinfo_min_freq, cpuinfo.min_freq);
590show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 591show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
592show_one(scaling_min_freq, min);
593show_one(scaling_max_freq, max);
c034b02e 594
09347b29 595static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
c034b02e
DB
596{
597 ssize_t ret;
598
599 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
600 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
601 else
602 ret = sprintf(buf, "%u\n", policy->cur);
603 return ret;
604}
1da177e4 605
037ce839 606static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 607 struct cpufreq_policy *new_policy);
7970e08b 608
1da177e4
LT
609/**
610 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
611 */
612#define store_one(file_name, object) \
613static ssize_t store_##file_name \
905d77cd 614(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 615{ \
619c144c 616 int ret, temp; \
1da177e4
LT
617 struct cpufreq_policy new_policy; \
618 \
619 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
620 if (ret) \
621 return -EINVAL; \
622 \
29464f28 623 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
624 if (ret != 1) \
625 return -EINVAL; \
626 \
619c144c 627 temp = new_policy.object; \
037ce839 628 ret = cpufreq_set_policy(policy, &new_policy); \
619c144c
VH
629 if (!ret) \
630 policy->user_policy.object = temp; \
1da177e4
LT
631 \
632 return ret ? ret : count; \
633}
634
29464f28
DJ
635store_one(scaling_min_freq, min);
636store_one(scaling_max_freq, max);
1da177e4
LT
637
638/**
639 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
640 */
905d77cd
DJ
641static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
642 char *buf)
1da177e4 643{
d92d50a4 644 unsigned int cur_freq = __cpufreq_get(policy);
1da177e4
LT
645 if (!cur_freq)
646 return sprintf(buf, "<unknown>");
647 return sprintf(buf, "%u\n", cur_freq);
648}
649
1da177e4
LT
650/**
651 * show_scaling_governor - show the current policy for the specified CPU
652 */
905d77cd 653static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 654{
29464f28 655 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
656 return sprintf(buf, "powersave\n");
657 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
658 return sprintf(buf, "performance\n");
659 else if (policy->governor)
4b972f0b 660 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 661 policy->governor->name);
1da177e4
LT
662 return -EINVAL;
663}
664
1da177e4
LT
665/**
666 * store_scaling_governor - store policy for the specified CPU
667 */
905d77cd
DJ
668static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
669 const char *buf, size_t count)
1da177e4 670{
5136fa56 671 int ret;
1da177e4
LT
672 char str_governor[16];
673 struct cpufreq_policy new_policy;
674
675 ret = cpufreq_get_policy(&new_policy, policy->cpu);
676 if (ret)
677 return ret;
678
29464f28 679 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
680 if (ret != 1)
681 return -EINVAL;
682
e08f5f5b
GS
683 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
684 &new_policy.governor))
1da177e4
LT
685 return -EINVAL;
686
037ce839 687 ret = cpufreq_set_policy(policy, &new_policy);
7970e08b
TR
688
689 policy->user_policy.policy = policy->policy;
690 policy->user_policy.governor = policy->governor;
7970e08b 691
e08f5f5b
GS
692 if (ret)
693 return ret;
694 else
695 return count;
1da177e4
LT
696}
697
698/**
699 * show_scaling_driver - show the cpufreq driver currently loaded
700 */
905d77cd 701static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 702{
1c3d85dd 703 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
704}
705
706/**
707 * show_scaling_available_governors - show the available CPUfreq governors
708 */
905d77cd
DJ
709static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
710 char *buf)
1da177e4
LT
711{
712 ssize_t i = 0;
713 struct cpufreq_governor *t;
714
9c0ebcf7 715 if (!has_target()) {
1da177e4
LT
716 i += sprintf(buf, "performance powersave");
717 goto out;
718 }
719
f7b27061 720 for_each_governor(t) {
29464f28
DJ
721 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
722 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 723 goto out;
4b972f0b 724 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 725 }
7d5e350f 726out:
1da177e4
LT
727 i += sprintf(&buf[i], "\n");
728 return i;
729}
e8628dd0 730
f4fd3797 731ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
732{
733 ssize_t i = 0;
734 unsigned int cpu;
735
835481d9 736 for_each_cpu(cpu, mask) {
1da177e4
LT
737 if (i)
738 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
739 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
740 if (i >= (PAGE_SIZE - 5))
29464f28 741 break;
1da177e4
LT
742 }
743 i += sprintf(&buf[i], "\n");
744 return i;
745}
f4fd3797 746EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
1da177e4 747
e8628dd0
DW
748/**
749 * show_related_cpus - show the CPUs affected by each transition even if
750 * hw coordination is in use
751 */
752static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
753{
f4fd3797 754 return cpufreq_show_cpus(policy->related_cpus, buf);
e8628dd0
DW
755}
756
757/**
758 * show_affected_cpus - show the CPUs affected by each transition
759 */
760static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
761{
f4fd3797 762 return cpufreq_show_cpus(policy->cpus, buf);
e8628dd0
DW
763}
764
9e76988e 765static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 766 const char *buf, size_t count)
9e76988e
VP
767{
768 unsigned int freq = 0;
769 unsigned int ret;
770
879000f9 771 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
772 return -EINVAL;
773
774 ret = sscanf(buf, "%u", &freq);
775 if (ret != 1)
776 return -EINVAL;
777
778 policy->governor->store_setspeed(policy, freq);
779
780 return count;
781}
782
783static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
784{
879000f9 785 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
786 return sprintf(buf, "<unsupported>\n");
787
788 return policy->governor->show_setspeed(policy, buf);
789}
1da177e4 790
e2f74f35 791/**
8bf1ac72 792 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
793 */
794static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
795{
796 unsigned int limit;
797 int ret;
1c3d85dd
RW
798 if (cpufreq_driver->bios_limit) {
799 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
800 if (!ret)
801 return sprintf(buf, "%u\n", limit);
802 }
803 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
804}
805
6dad2a29
BP
806cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
807cpufreq_freq_attr_ro(cpuinfo_min_freq);
808cpufreq_freq_attr_ro(cpuinfo_max_freq);
809cpufreq_freq_attr_ro(cpuinfo_transition_latency);
810cpufreq_freq_attr_ro(scaling_available_governors);
811cpufreq_freq_attr_ro(scaling_driver);
812cpufreq_freq_attr_ro(scaling_cur_freq);
813cpufreq_freq_attr_ro(bios_limit);
814cpufreq_freq_attr_ro(related_cpus);
815cpufreq_freq_attr_ro(affected_cpus);
816cpufreq_freq_attr_rw(scaling_min_freq);
817cpufreq_freq_attr_rw(scaling_max_freq);
818cpufreq_freq_attr_rw(scaling_governor);
819cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 820
905d77cd 821static struct attribute *default_attrs[] = {
1da177e4
LT
822 &cpuinfo_min_freq.attr,
823 &cpuinfo_max_freq.attr,
ed129784 824 &cpuinfo_transition_latency.attr,
1da177e4
LT
825 &scaling_min_freq.attr,
826 &scaling_max_freq.attr,
827 &affected_cpus.attr,
e8628dd0 828 &related_cpus.attr,
1da177e4
LT
829 &scaling_governor.attr,
830 &scaling_driver.attr,
831 &scaling_available_governors.attr,
9e76988e 832 &scaling_setspeed.attr,
1da177e4
LT
833 NULL
834};
835
29464f28
DJ
836#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
837#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 838
29464f28 839static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 840{
905d77cd
DJ
841 struct cpufreq_policy *policy = to_policy(kobj);
842 struct freq_attr *fattr = to_attr(attr);
1b750e3b 843 ssize_t ret;
6eed9404
VK
844
845 if (!down_read_trylock(&cpufreq_rwsem))
1b750e3b 846 return -EINVAL;
5a01f2e8 847
ad7722da 848 down_read(&policy->rwsem);
5a01f2e8 849
e08f5f5b
GS
850 if (fattr->show)
851 ret = fattr->show(policy, buf);
852 else
853 ret = -EIO;
854
ad7722da 855 up_read(&policy->rwsem);
6eed9404 856 up_read(&cpufreq_rwsem);
1b750e3b 857
1da177e4
LT
858 return ret;
859}
860
905d77cd
DJ
861static ssize_t store(struct kobject *kobj, struct attribute *attr,
862 const char *buf, size_t count)
1da177e4 863{
905d77cd
DJ
864 struct cpufreq_policy *policy = to_policy(kobj);
865 struct freq_attr *fattr = to_attr(attr);
a07530b4 866 ssize_t ret = -EINVAL;
6eed9404 867
4f750c93
SB
868 get_online_cpus();
869
870 if (!cpu_online(policy->cpu))
871 goto unlock;
872
6eed9404 873 if (!down_read_trylock(&cpufreq_rwsem))
4f750c93 874 goto unlock;
5a01f2e8 875
ad7722da 876 down_write(&policy->rwsem);
5a01f2e8 877
11e584cf
VK
878 /* Updating inactive policies is invalid, so avoid doing that. */
879 if (unlikely(policy_is_inactive(policy))) {
880 ret = -EBUSY;
881 goto unlock_policy_rwsem;
882 }
883
e08f5f5b
GS
884 if (fattr->store)
885 ret = fattr->store(policy, buf, count);
886 else
887 ret = -EIO;
888
11e584cf 889unlock_policy_rwsem:
ad7722da 890 up_write(&policy->rwsem);
6eed9404 891
6eed9404 892 up_read(&cpufreq_rwsem);
4f750c93
SB
893unlock:
894 put_online_cpus();
895
1da177e4
LT
896 return ret;
897}
898
905d77cd 899static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 900{
905d77cd 901 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 902 pr_debug("last reference is dropped\n");
1da177e4
LT
903 complete(&policy->kobj_unregister);
904}
905
52cf25d0 906static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
907 .show = show,
908 .store = store,
909};
910
911static struct kobj_type ktype_cpufreq = {
912 .sysfs_ops = &sysfs_ops,
913 .default_attrs = default_attrs,
914 .release = cpufreq_sysfs_release,
915};
916
2361be23
VK
917struct kobject *cpufreq_global_kobject;
918EXPORT_SYMBOL(cpufreq_global_kobject);
919
920static int cpufreq_global_kobject_usage;
921
922int cpufreq_get_global_kobject(void)
923{
924 if (!cpufreq_global_kobject_usage++)
925 return kobject_add(cpufreq_global_kobject,
926 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
927
928 return 0;
929}
930EXPORT_SYMBOL(cpufreq_get_global_kobject);
931
932void cpufreq_put_global_kobject(void)
933{
934 if (!--cpufreq_global_kobject_usage)
935 kobject_del(cpufreq_global_kobject);
936}
937EXPORT_SYMBOL(cpufreq_put_global_kobject);
938
939int cpufreq_sysfs_create_file(const struct attribute *attr)
940{
941 int ret = cpufreq_get_global_kobject();
942
943 if (!ret) {
944 ret = sysfs_create_file(cpufreq_global_kobject, attr);
945 if (ret)
946 cpufreq_put_global_kobject();
947 }
948
949 return ret;
950}
951EXPORT_SYMBOL(cpufreq_sysfs_create_file);
952
953void cpufreq_sysfs_remove_file(const struct attribute *attr)
954{
955 sysfs_remove_file(cpufreq_global_kobject, attr);
956 cpufreq_put_global_kobject();
957}
958EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
959
19d6f7ec 960/* symlink affected CPUs */
308b60e7 961static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
19d6f7ec
DJ
962{
963 unsigned int j;
964 int ret = 0;
965
966 for_each_cpu(j, policy->cpus) {
8a25a2fd 967 struct device *cpu_dev;
19d6f7ec 968
9d16f207 969 if (j == policy->kobj_cpu)
19d6f7ec 970 continue;
19d6f7ec 971
e8fdde10 972 pr_debug("Adding link for CPU: %u\n", j);
8a25a2fd
KS
973 cpu_dev = get_cpu_device(j);
974 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec 975 "cpufreq");
71c3461e
RW
976 if (ret)
977 break;
19d6f7ec
DJ
978 }
979 return ret;
980}
981
308b60e7 982static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
8a25a2fd 983 struct device *dev)
909a694e
DJ
984{
985 struct freq_attr **drv_attr;
909a694e 986 int ret = 0;
909a694e 987
909a694e 988 /* set up files for this cpu device */
1c3d85dd 989 drv_attr = cpufreq_driver->attr;
f13f1184 990 while (drv_attr && *drv_attr) {
909a694e
DJ
991 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
992 if (ret)
6d4e81ed 993 return ret;
909a694e
DJ
994 drv_attr++;
995 }
1c3d85dd 996 if (cpufreq_driver->get) {
909a694e
DJ
997 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
998 if (ret)
6d4e81ed 999 return ret;
909a694e 1000 }
c034b02e
DB
1001
1002 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1003 if (ret)
6d4e81ed 1004 return ret;
c034b02e 1005
1c3d85dd 1006 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
1007 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1008 if (ret)
6d4e81ed 1009 return ret;
e2f74f35 1010 }
909a694e 1011
6d4e81ed 1012 return cpufreq_add_dev_symlink(policy);
e18f1682
SB
1013}
1014
1015static void cpufreq_init_policy(struct cpufreq_policy *policy)
1016{
6e2c89d1 1017 struct cpufreq_governor *gov = NULL;
e18f1682
SB
1018 struct cpufreq_policy new_policy;
1019 int ret = 0;
1020
d5b73cd8 1021 memcpy(&new_policy, policy, sizeof(*policy));
a27a9ab7 1022
6e2c89d1 1023 /* Update governor of new_policy to the governor used before hotplug */
4573237b 1024 gov = find_governor(policy->last_governor);
6e2c89d1 1025 if (gov)
1026 pr_debug("Restoring governor %s for cpu %d\n",
1027 policy->governor->name, policy->cpu);
1028 else
1029 gov = CPUFREQ_DEFAULT_GOVERNOR;
1030
1031 new_policy.governor = gov;
1032
a27a9ab7
JB
1033 /* Use the default policy if its valid. */
1034 if (cpufreq_driver->setpolicy)
6e2c89d1 1035 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
ecf7e461
DJ
1036
1037 /* set default policy */
037ce839 1038 ret = cpufreq_set_policy(policy, &new_policy);
ecf7e461 1039 if (ret) {
2d06d8c4 1040 pr_debug("setting policy failed\n");
1c3d85dd
RW
1041 if (cpufreq_driver->exit)
1042 cpufreq_driver->exit(policy);
ecf7e461 1043 }
909a694e
DJ
1044}
1045
d8d3b471 1046static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
42f921a6 1047 unsigned int cpu, struct device *dev)
fcf80582 1048{
9c0ebcf7 1049 int ret = 0;
fcf80582 1050
bb29ae15
VK
1051 /* Has this CPU been taken care of already? */
1052 if (cpumask_test_cpu(cpu, policy->cpus))
1053 return 0;
1054
9c0ebcf7 1055 if (has_target()) {
3de9bdeb
VK
1056 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1057 if (ret) {
1058 pr_err("%s: Failed to stop governor\n", __func__);
1059 return ret;
1060 }
1061 }
fcf80582 1062
ad7722da 1063 down_write(&policy->rwsem);
fcf80582 1064 cpumask_set_cpu(cpu, policy->cpus);
ad7722da 1065 up_write(&policy->rwsem);
2eaa3e2d 1066
9c0ebcf7 1067 if (has_target()) {
e5c87b76
SK
1068 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1069 if (!ret)
1070 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1071
1072 if (ret) {
3de9bdeb
VK
1073 pr_err("%s: Failed to start governor\n", __func__);
1074 return ret;
1075 }
820c6ca2 1076 }
fcf80582 1077
42f921a6 1078 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
fcf80582 1079}
1da177e4 1080
8414809c
SB
1081static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1082{
1083 struct cpufreq_policy *policy;
1084 unsigned long flags;
1085
44871c9c 1086 read_lock_irqsave(&cpufreq_driver_lock, flags);
3914d379 1087 policy = per_cpu(cpufreq_cpu_data, cpu);
44871c9c 1088 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
8414809c 1089
3914d379
VK
1090 if (likely(policy)) {
1091 /* Policy should be inactive here */
1092 WARN_ON(!policy_is_inactive(policy));
3914d379 1093 }
6e2c89d1 1094
8414809c
SB
1095 return policy;
1096}
1097
e9698cc5
SB
1098static struct cpufreq_policy *cpufreq_policy_alloc(void)
1099{
1100 struct cpufreq_policy *policy;
1101
1102 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1103 if (!policy)
1104 return NULL;
1105
1106 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1107 goto err_free_policy;
1108
1109 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1110 goto err_free_cpumask;
1111
c88a1f8b 1112 INIT_LIST_HEAD(&policy->policy_list);
ad7722da 1113 init_rwsem(&policy->rwsem);
12478cf0
SB
1114 spin_lock_init(&policy->transition_lock);
1115 init_waitqueue_head(&policy->transition_wait);
818c5712
VK
1116 init_completion(&policy->kobj_unregister);
1117 INIT_WORK(&policy->update, handle_update);
ad7722da 1118
e9698cc5
SB
1119 return policy;
1120
1121err_free_cpumask:
1122 free_cpumask_var(policy->cpus);
1123err_free_policy:
1124 kfree(policy);
1125
1126 return NULL;
1127}
1128
42f921a6
VK
1129static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1130{
1131 struct kobject *kobj;
1132 struct completion *cmp;
1133
fcd7af91
VK
1134 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1135 CPUFREQ_REMOVE_POLICY, policy);
1136
42f921a6
VK
1137 down_read(&policy->rwsem);
1138 kobj = &policy->kobj;
1139 cmp = &policy->kobj_unregister;
1140 up_read(&policy->rwsem);
1141 kobject_put(kobj);
1142
1143 /*
1144 * We need to make sure that the underlying kobj is
1145 * actually not referenced anymore by anybody before we
1146 * proceed with unloading.
1147 */
1148 pr_debug("waiting for dropping of refcount\n");
1149 wait_for_completion(cmp);
1150 pr_debug("wait complete\n");
1151}
1152
e9698cc5
SB
1153static void cpufreq_policy_free(struct cpufreq_policy *policy)
1154{
988bed09
VK
1155 unsigned long flags;
1156 int cpu;
1157
1158 /* Remove policy from list */
1159 write_lock_irqsave(&cpufreq_driver_lock, flags);
1160 list_del(&policy->policy_list);
1161
1162 for_each_cpu(cpu, policy->related_cpus)
1163 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1164 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1165
e9698cc5
SB
1166 free_cpumask_var(policy->related_cpus);
1167 free_cpumask_var(policy->cpus);
1168 kfree(policy);
1169}
1170
1bfb425b
VK
1171static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1172 struct device *cpu_dev)
0d66b91e 1173{
1bfb425b
VK
1174 int ret;
1175
99ec899e 1176 if (WARN_ON(cpu == policy->cpu))
1bfb425b
VK
1177 return 0;
1178
1179 /* Move kobject to the new policy->cpu */
1180 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1181 if (ret) {
1182 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1183 return ret;
1184 }
cb38ed5c 1185
ad7722da 1186 down_write(&policy->rwsem);
0d66b91e 1187 policy->cpu = cpu;
9d16f207 1188 policy->kobj_cpu = cpu;
ad7722da 1189 up_write(&policy->rwsem);
8efd5765 1190
1bfb425b 1191 return 0;
0d66b91e
SB
1192}
1193
23faf0b7
VK
1194/**
1195 * cpufreq_add_dev - add a CPU device
1196 *
1197 * Adds the cpufreq interface for a CPU device.
1198 *
1199 * The Oracle says: try running cpufreq registration/unregistration concurrently
1200 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1201 * mess up, but more thorough testing is needed. - Mathieu
1202 */
1203static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1204{
fcf80582 1205 unsigned int j, cpu = dev->id;
65922465 1206 int ret = -ENOMEM;
7f0c020a 1207 struct cpufreq_policy *policy;
1da177e4 1208 unsigned long flags;
96bbbe4a 1209 bool recover_policy = cpufreq_suspended;
1da177e4 1210
c32b6b8e
AR
1211 if (cpu_is_offline(cpu))
1212 return 0;
1213
2d06d8c4 1214 pr_debug("adding CPU %u\n", cpu);
1da177e4 1215
6eed9404
VK
1216 if (!down_read_trylock(&cpufreq_rwsem))
1217 return 0;
1218
bb29ae15 1219 /* Check if this CPU already has a policy to manage it */
9104bb26
VK
1220 policy = per_cpu(cpufreq_cpu_data, cpu);
1221 if (policy && !policy_is_inactive(policy)) {
1222 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1223 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
1224 up_read(&cpufreq_rwsem);
1225 return ret;
fcf80582 1226 }
1da177e4 1227
72368d12
RW
1228 /*
1229 * Restore the saved policy when doing light-weight init and fall back
1230 * to the full init if that fails.
1231 */
96bbbe4a 1232 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
72368d12 1233 if (!policy) {
96bbbe4a 1234 recover_policy = false;
8414809c 1235 policy = cpufreq_policy_alloc();
72368d12
RW
1236 if (!policy)
1237 goto nomem_out;
1238 }
0d66b91e
SB
1239
1240 /*
1241 * In the resume path, since we restore a saved policy, the assignment
1242 * to policy->cpu is like an update of the existing policy, rather than
1243 * the creation of a brand new one. So we need to perform this update
1244 * by invoking update_policy_cpu().
1245 */
9d16f207 1246 if (recover_policy && cpu != policy->cpu) {
1bfb425b 1247 WARN_ON(update_policy_cpu(policy, cpu, dev));
9d16f207 1248 } else {
0d66b91e 1249 policy->cpu = cpu;
9d16f207
SK
1250 policy->kobj_cpu = cpu;
1251 }
0d66b91e 1252
835481d9 1253 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1254
1da177e4
LT
1255 /* call driver. From then on the cpufreq must be able
1256 * to accept all calls to ->verify and ->setpolicy for this CPU
1257 */
1c3d85dd 1258 ret = cpufreq_driver->init(policy);
1da177e4 1259 if (ret) {
2d06d8c4 1260 pr_debug("initialization failed\n");
2eaa3e2d 1261 goto err_set_policy_cpu;
1da177e4 1262 }
643ae6e8 1263
6d4e81ed
TV
1264 down_write(&policy->rwsem);
1265
5a7e56a5
VK
1266 /* related cpus should atleast have policy->cpus */
1267 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1268
1269 /*
1270 * affected cpus must always be the one, which are online. We aren't
1271 * managing offline cpus here.
1272 */
1273 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1274
96bbbe4a 1275 if (!recover_policy) {
5a7e56a5
VK
1276 policy->user_policy.min = policy->min;
1277 policy->user_policy.max = policy->max;
6d4e81ed
TV
1278
1279 /* prepare interface data */
1280 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1281 &dev->kobj, "cpufreq");
1282 if (ret) {
1283 pr_err("%s: failed to init policy->kobj: %d\n",
1284 __func__, ret);
1285 goto err_init_policy_kobj;
1286 }
5a7e56a5 1287
988bed09
VK
1288 write_lock_irqsave(&cpufreq_driver_lock, flags);
1289 for_each_cpu(j, policy->related_cpus)
1290 per_cpu(cpufreq_cpu_data, j) = policy;
1291 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1292 }
652ed95d 1293
2ed99e39 1294 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
da60ce9f
VK
1295 policy->cur = cpufreq_driver->get(policy->cpu);
1296 if (!policy->cur) {
1297 pr_err("%s: ->get() failed\n", __func__);
1298 goto err_get_freq;
1299 }
1300 }
1301
d3916691
VK
1302 /*
1303 * Sometimes boot loaders set CPU frequency to a value outside of
1304 * frequency table present with cpufreq core. In such cases CPU might be
1305 * unstable if it has to run on that frequency for long duration of time
1306 * and so its better to set it to a frequency which is specified in
1307 * freq-table. This also makes cpufreq stats inconsistent as
1308 * cpufreq-stats would fail to register because current frequency of CPU
1309 * isn't found in freq-table.
1310 *
1311 * Because we don't want this change to effect boot process badly, we go
1312 * for the next freq which is >= policy->cur ('cur' must be set by now,
1313 * otherwise we will end up setting freq to lowest of the table as 'cur'
1314 * is initialized to zero).
1315 *
1316 * We are passing target-freq as "policy->cur - 1" otherwise
1317 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1318 * equal to target-freq.
1319 */
1320 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1321 && has_target()) {
1322 /* Are we running at unknown frequency ? */
1323 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1324 if (ret == -EINVAL) {
1325 /* Warn user and fix it */
1326 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1327 __func__, policy->cpu, policy->cur);
1328 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1329 CPUFREQ_RELATION_L);
1330
1331 /*
1332 * Reaching here after boot in a few seconds may not
1333 * mean that system will remain stable at "unknown"
1334 * frequency for longer duration. Hence, a BUG_ON().
1335 */
1336 BUG_ON(ret);
1337 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1338 __func__, policy->cpu, policy->cur);
1339 }
1340 }
1341
a1531acd
TR
1342 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1343 CPUFREQ_START, policy);
1344
96bbbe4a 1345 if (!recover_policy) {
308b60e7 1346 ret = cpufreq_add_dev_interface(policy, dev);
a82fab29
SB
1347 if (ret)
1348 goto err_out_unregister;
fcd7af91
VK
1349 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1350 CPUFREQ_CREATE_POLICY, policy);
8ff69732 1351
988bed09
VK
1352 write_lock_irqsave(&cpufreq_driver_lock, flags);
1353 list_add(&policy->policy_list, &cpufreq_policy_list);
1354 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1355 }
9515f4d6 1356
e18f1682
SB
1357 cpufreq_init_policy(policy);
1358
96bbbe4a 1359 if (!recover_policy) {
08fd8c1c
VK
1360 policy->user_policy.policy = policy->policy;
1361 policy->user_policy.governor = policy->governor;
1362 }
4e97b631 1363 up_write(&policy->rwsem);
08fd8c1c 1364
038c5b3e 1365 kobject_uevent(&policy->kobj, KOBJ_ADD);
7c45cf31 1366
6eed9404
VK
1367 up_read(&cpufreq_rwsem);
1368
7c45cf31
VK
1369 /* Callback for handling stuff after policy is ready */
1370 if (cpufreq_driver->ready)
1371 cpufreq_driver->ready(policy);
1372
2d06d8c4 1373 pr_debug("initialization complete\n");
87c32271 1374
1da177e4
LT
1375 return 0;
1376
1da177e4 1377err_out_unregister:
652ed95d 1378err_get_freq:
6d4e81ed
TV
1379 if (!recover_policy) {
1380 kobject_put(&policy->kobj);
1381 wait_for_completion(&policy->kobj_unregister);
1382 }
1383err_init_policy_kobj:
7106e02b
PB
1384 up_write(&policy->rwsem);
1385
da60ce9f
VK
1386 if (cpufreq_driver->exit)
1387 cpufreq_driver->exit(policy);
2eaa3e2d 1388err_set_policy_cpu:
3914d379 1389 if (recover_policy)
42f921a6 1390 cpufreq_policy_put_kobj(policy);
e9698cc5 1391 cpufreq_policy_free(policy);
42f921a6 1392
1da177e4 1393nomem_out:
6eed9404
VK
1394 up_read(&cpufreq_rwsem);
1395
1da177e4
LT
1396 return ret;
1397}
1398
cedb70af 1399static int __cpufreq_remove_dev_prepare(struct device *dev,
96bbbe4a 1400 struct subsys_interface *sif)
1da177e4 1401{
f9ba680d 1402 unsigned int cpu = dev->id, cpus;
1bfb425b 1403 int ret;
3a3e9e06 1404 struct cpufreq_policy *policy;
1da177e4 1405
b8eed8af 1406 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1407
988bed09 1408 policy = cpufreq_cpu_get_raw(cpu);
3a3e9e06 1409 if (!policy) {
b8eed8af 1410 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1411 return -EINVAL;
1412 }
1da177e4 1413
9c0ebcf7 1414 if (has_target()) {
3de9bdeb
VK
1415 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1416 if (ret) {
1417 pr_err("%s: Failed to stop governor\n", __func__);
1418 return ret;
1419 }
db5f2995 1420 }
1da177e4 1421
4573237b 1422 down_write(&policy->rwsem);
3a3e9e06 1423 cpus = cpumask_weight(policy->cpus);
4573237b
VK
1424
1425 if (has_target() && cpus == 1)
1426 strncpy(policy->last_governor, policy->governor->name,
1427 CPUFREQ_NAME_LEN);
1428 up_write(&policy->rwsem);
084f3493 1429
9d16f207 1430 if (cpu != policy->kobj_cpu) {
6964d91d 1431 sysfs_remove_link(&dev->kobj, "cpufreq");
73bf0fc2 1432 } else if (cpus > 1) {
1bfb425b
VK
1433 /* Nominate new CPU */
1434 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1435 struct device *cpu_dev = get_cpu_device(new_cpu);
a82fab29 1436
1bfb425b
VK
1437 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1438 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1439 if (ret) {
1440 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1441 "cpufreq"))
1442 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1443 __func__, cpu_dev->id);
1444 return ret;
1da177e4 1445 }
1bfb425b
VK
1446
1447 if (!cpufreq_suspended)
1448 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1449 __func__, new_cpu, cpu);
789ca243 1450 } else if (cpufreq_driver->stop_cpu) {
367dc4aa 1451 cpufreq_driver->stop_cpu(policy);
1da177e4 1452 }
1da177e4 1453
cedb70af
SB
1454 return 0;
1455}
1456
1457static int __cpufreq_remove_dev_finish(struct device *dev,
96bbbe4a 1458 struct subsys_interface *sif)
cedb70af 1459{
988bed09 1460 unsigned int cpu = dev->id;
cedb70af 1461 int ret;
988bed09 1462 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
cedb70af
SB
1463
1464 if (!policy) {
1465 pr_debug("%s: No cpu_data found\n", __func__);
1466 return -EINVAL;
1467 }
1468
ad7722da 1469 down_write(&policy->rwsem);
303ae723 1470 cpumask_clear_cpu(cpu, policy->cpus);
ad7722da 1471 up_write(&policy->rwsem);
cedb70af 1472
b8eed8af 1473 /* If cpu is last user of policy, free policy */
988bed09 1474 if (policy_is_inactive(policy)) {
9c0ebcf7 1475 if (has_target()) {
3de9bdeb
VK
1476 ret = __cpufreq_governor(policy,
1477 CPUFREQ_GOV_POLICY_EXIT);
1478 if (ret) {
1479 pr_err("%s: Failed to exit governor\n",
e837f9b5 1480 __func__);
3de9bdeb
VK
1481 return ret;
1482 }
edab2fbc 1483 }
2a998599 1484
96bbbe4a 1485 if (!cpufreq_suspended)
42f921a6 1486 cpufreq_policy_put_kobj(policy);
7d26e2d5 1487
8414809c
SB
1488 /*
1489 * Perform the ->exit() even during light-weight tear-down,
1490 * since this is a core component, and is essential for the
1491 * subsequent light-weight ->init() to succeed.
b8eed8af 1492 */
1c3d85dd 1493 if (cpufreq_driver->exit)
3a3e9e06 1494 cpufreq_driver->exit(policy);
27ecddc2 1495
96bbbe4a 1496 if (!cpufreq_suspended)
3a3e9e06 1497 cpufreq_policy_free(policy);
e5c87b76
SK
1498 } else if (has_target()) {
1499 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1500 if (!ret)
1501 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1502
1503 if (ret) {
1504 pr_err("%s: Failed to start governor\n", __func__);
1505 return ret;
2a998599 1506 }
27ecddc2 1507 }
1da177e4 1508
1da177e4
LT
1509 return 0;
1510}
1511
cedb70af 1512/**
27a862e9 1513 * cpufreq_remove_dev - remove a CPU device
cedb70af
SB
1514 *
1515 * Removes the cpufreq interface for a CPU device.
cedb70af 1516 */
8a25a2fd 1517static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1518{
8a25a2fd 1519 unsigned int cpu = dev->id;
27a862e9 1520 int ret;
ec28297a
VP
1521
1522 if (cpu_is_offline(cpu))
1523 return 0;
1524
96bbbe4a 1525 ret = __cpufreq_remove_dev_prepare(dev, sif);
27a862e9
VK
1526
1527 if (!ret)
96bbbe4a 1528 ret = __cpufreq_remove_dev_finish(dev, sif);
27a862e9
VK
1529
1530 return ret;
5a01f2e8
VP
1531}
1532
65f27f38 1533static void handle_update(struct work_struct *work)
1da177e4 1534{
65f27f38
DH
1535 struct cpufreq_policy *policy =
1536 container_of(work, struct cpufreq_policy, update);
1537 unsigned int cpu = policy->cpu;
2d06d8c4 1538 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1539 cpufreq_update_policy(cpu);
1540}
1541
1542/**
bb176f7d
VK
1543 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1544 * in deep trouble.
a1e1dc41 1545 * @policy: policy managing CPUs
1da177e4
LT
1546 * @new_freq: CPU frequency the CPU actually runs at
1547 *
29464f28
DJ
1548 * We adjust to current frequency first, and need to clean up later.
1549 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1550 */
a1e1dc41 1551static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
e08f5f5b 1552 unsigned int new_freq)
1da177e4
LT
1553{
1554 struct cpufreq_freqs freqs;
b43a7ffb 1555
e837f9b5 1556 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
a1e1dc41 1557 policy->cur, new_freq);
1da177e4 1558
a1e1dc41 1559 freqs.old = policy->cur;
1da177e4 1560 freqs.new = new_freq;
b43a7ffb 1561
8fec051e
VK
1562 cpufreq_freq_transition_begin(policy, &freqs);
1563 cpufreq_freq_transition_end(policy, &freqs, 0);
1da177e4
LT
1564}
1565
32ee8c3e 1566/**
4ab70df4 1567 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1568 * @cpu: CPU number
1569 *
1570 * This is the last known freq, without actually getting it from the driver.
1571 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1572 */
1573unsigned int cpufreq_quick_get(unsigned int cpu)
1574{
9e21ba8b 1575 struct cpufreq_policy *policy;
e08f5f5b 1576 unsigned int ret_freq = 0;
95235ca2 1577
1c3d85dd
RW
1578 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1579 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1580
1581 policy = cpufreq_cpu_get(cpu);
95235ca2 1582 if (policy) {
e08f5f5b 1583 ret_freq = policy->cur;
95235ca2
VP
1584 cpufreq_cpu_put(policy);
1585 }
1586
4d34a67d 1587 return ret_freq;
95235ca2
VP
1588}
1589EXPORT_SYMBOL(cpufreq_quick_get);
1590
3d737108
JB
1591/**
1592 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1593 * @cpu: CPU number
1594 *
1595 * Just return the max possible frequency for a given CPU.
1596 */
1597unsigned int cpufreq_quick_get_max(unsigned int cpu)
1598{
1599 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1600 unsigned int ret_freq = 0;
1601
1602 if (policy) {
1603 ret_freq = policy->max;
1604 cpufreq_cpu_put(policy);
1605 }
1606
1607 return ret_freq;
1608}
1609EXPORT_SYMBOL(cpufreq_quick_get_max);
1610
d92d50a4 1611static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1da177e4 1612{
e08f5f5b 1613 unsigned int ret_freq = 0;
5800043b 1614
1c3d85dd 1615 if (!cpufreq_driver->get)
4d34a67d 1616 return ret_freq;
1da177e4 1617
d92d50a4 1618 ret_freq = cpufreq_driver->get(policy->cpu);
1da177e4 1619
11e584cf
VK
1620 /* Updating inactive policies is invalid, so avoid doing that. */
1621 if (unlikely(policy_is_inactive(policy)))
1622 return ret_freq;
1623
e08f5f5b 1624 if (ret_freq && policy->cur &&
1c3d85dd 1625 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1626 /* verify no discrepancy between actual and
1627 saved value exists */
1628 if (unlikely(ret_freq != policy->cur)) {
a1e1dc41 1629 cpufreq_out_of_sync(policy, ret_freq);
1da177e4
LT
1630 schedule_work(&policy->update);
1631 }
1632 }
1633
4d34a67d 1634 return ret_freq;
5a01f2e8 1635}
1da177e4 1636
5a01f2e8
VP
1637/**
1638 * cpufreq_get - get the current CPU frequency (in kHz)
1639 * @cpu: CPU number
1640 *
1641 * Get the CPU current (static) CPU frequency
1642 */
1643unsigned int cpufreq_get(unsigned int cpu)
1644{
999976e0 1645 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
5a01f2e8 1646 unsigned int ret_freq = 0;
5a01f2e8 1647
999976e0
AP
1648 if (policy) {
1649 down_read(&policy->rwsem);
d92d50a4 1650 ret_freq = __cpufreq_get(policy);
999976e0 1651 up_read(&policy->rwsem);
5a01f2e8 1652
999976e0
AP
1653 cpufreq_cpu_put(policy);
1654 }
6eed9404 1655
4d34a67d 1656 return ret_freq;
1da177e4
LT
1657}
1658EXPORT_SYMBOL(cpufreq_get);
1659
8a25a2fd
KS
1660static struct subsys_interface cpufreq_interface = {
1661 .name = "cpufreq",
1662 .subsys = &cpu_subsys,
1663 .add_dev = cpufreq_add_dev,
1664 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1665};
1666
e28867ea
VK
1667/*
1668 * In case platform wants some specific frequency to be configured
1669 * during suspend..
1670 */
1671int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1672{
1673 int ret;
1674
1675 if (!policy->suspend_freq) {
1676 pr_err("%s: suspend_freq can't be zero\n", __func__);
1677 return -EINVAL;
1678 }
1679
1680 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1681 policy->suspend_freq);
1682
1683 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1684 CPUFREQ_RELATION_H);
1685 if (ret)
1686 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1687 __func__, policy->suspend_freq, ret);
1688
1689 return ret;
1690}
1691EXPORT_SYMBOL(cpufreq_generic_suspend);
1692
42d4dc3f 1693/**
2f0aea93 1694 * cpufreq_suspend() - Suspend CPUFreq governors
e00e56df 1695 *
2f0aea93
VK
1696 * Called during system wide Suspend/Hibernate cycles for suspending governors
1697 * as some platforms can't change frequency after this point in suspend cycle.
1698 * Because some of the devices (like: i2c, regulators, etc) they use for
1699 * changing frequency are suspended quickly after this point.
42d4dc3f 1700 */
2f0aea93 1701void cpufreq_suspend(void)
42d4dc3f 1702{
3a3e9e06 1703 struct cpufreq_policy *policy;
42d4dc3f 1704
2f0aea93
VK
1705 if (!cpufreq_driver)
1706 return;
42d4dc3f 1707
2f0aea93 1708 if (!has_target())
b1b12bab 1709 goto suspend;
42d4dc3f 1710
2f0aea93
VK
1711 pr_debug("%s: Suspending Governors\n", __func__);
1712
f963735a 1713 for_each_active_policy(policy) {
2f0aea93
VK
1714 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1715 pr_err("%s: Failed to stop governor for policy: %p\n",
1716 __func__, policy);
1717 else if (cpufreq_driver->suspend
1718 && cpufreq_driver->suspend(policy))
1719 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1720 policy);
42d4dc3f 1721 }
b1b12bab
VK
1722
1723suspend:
1724 cpufreq_suspended = true;
42d4dc3f
BH
1725}
1726
1da177e4 1727/**
2f0aea93 1728 * cpufreq_resume() - Resume CPUFreq governors
1da177e4 1729 *
2f0aea93
VK
1730 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1731 * are suspended with cpufreq_suspend().
1da177e4 1732 */
2f0aea93 1733void cpufreq_resume(void)
1da177e4 1734{
3a3e9e06 1735 struct cpufreq_policy *policy;
1da177e4 1736
2f0aea93
VK
1737 if (!cpufreq_driver)
1738 return;
1da177e4 1739
8e30444e
LT
1740 cpufreq_suspended = false;
1741
2f0aea93 1742 if (!has_target())
e00e56df 1743 return;
1da177e4 1744
2f0aea93 1745 pr_debug("%s: Resuming Governors\n", __func__);
1da177e4 1746
f963735a 1747 for_each_active_policy(policy) {
0c5aa405
VK
1748 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1749 pr_err("%s: Failed to resume driver: %p\n", __func__,
1750 policy);
1751 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
2f0aea93
VK
1752 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1753 pr_err("%s: Failed to start governor for policy: %p\n",
1754 __func__, policy);
2f0aea93 1755 }
c75de0ac
VK
1756
1757 /*
1758 * schedule call cpufreq_update_policy() for first-online CPU, as that
1759 * wouldn't be hotplugged-out on suspend. It will verify that the
1760 * current freq is in sync with what we believe it to be.
1761 */
1762 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1763 if (WARN_ON(!policy))
1764 return;
1765
1766 schedule_work(&policy->update);
2f0aea93 1767}
1da177e4 1768
9d95046e
BP
1769/**
1770 * cpufreq_get_current_driver - return current driver's name
1771 *
1772 * Return the name string of the currently loaded cpufreq driver
1773 * or NULL, if none.
1774 */
1775const char *cpufreq_get_current_driver(void)
1776{
1c3d85dd
RW
1777 if (cpufreq_driver)
1778 return cpufreq_driver->name;
1779
1780 return NULL;
9d95046e
BP
1781}
1782EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4 1783
51315cdf
TP
1784/**
1785 * cpufreq_get_driver_data - return current driver data
1786 *
1787 * Return the private data of the currently loaded cpufreq
1788 * driver, or NULL if no cpufreq driver is loaded.
1789 */
1790void *cpufreq_get_driver_data(void)
1791{
1792 if (cpufreq_driver)
1793 return cpufreq_driver->driver_data;
1794
1795 return NULL;
1796}
1797EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1798
1da177e4
LT
1799/*********************************************************************
1800 * NOTIFIER LISTS INTERFACE *
1801 *********************************************************************/
1802
1803/**
1804 * cpufreq_register_notifier - register a driver with cpufreq
1805 * @nb: notifier function to register
1806 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1807 *
32ee8c3e 1808 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1809 * are notified about clock rate changes (once before and once after
1810 * the transition), or a list of drivers that are notified about
1811 * changes in cpufreq policy.
1812 *
1813 * This function may sleep, and has the same return conditions as
e041c683 1814 * blocking_notifier_chain_register.
1da177e4
LT
1815 */
1816int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1817{
1818 int ret;
1819
d5aaffa9
DB
1820 if (cpufreq_disabled())
1821 return -EINVAL;
1822
74212ca4
CEB
1823 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1824
1da177e4
LT
1825 switch (list) {
1826 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1827 ret = srcu_notifier_chain_register(
e041c683 1828 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1829 break;
1830 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1831 ret = blocking_notifier_chain_register(
1832 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1833 break;
1834 default:
1835 ret = -EINVAL;
1836 }
1da177e4
LT
1837
1838 return ret;
1839}
1840EXPORT_SYMBOL(cpufreq_register_notifier);
1841
1da177e4
LT
1842/**
1843 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1844 * @nb: notifier block to be unregistered
bb176f7d 1845 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1da177e4
LT
1846 *
1847 * Remove a driver from the CPU frequency notifier list.
1848 *
1849 * This function may sleep, and has the same return conditions as
e041c683 1850 * blocking_notifier_chain_unregister.
1da177e4
LT
1851 */
1852int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1853{
1854 int ret;
1855
d5aaffa9
DB
1856 if (cpufreq_disabled())
1857 return -EINVAL;
1858
1da177e4
LT
1859 switch (list) {
1860 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1861 ret = srcu_notifier_chain_unregister(
e041c683 1862 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1863 break;
1864 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1865 ret = blocking_notifier_chain_unregister(
1866 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1867 break;
1868 default:
1869 ret = -EINVAL;
1870 }
1da177e4
LT
1871
1872 return ret;
1873}
1874EXPORT_SYMBOL(cpufreq_unregister_notifier);
1875
1876
1877/*********************************************************************
1878 * GOVERNORS *
1879 *********************************************************************/
1880
1c03a2d0
VK
1881/* Must set freqs->new to intermediate frequency */
1882static int __target_intermediate(struct cpufreq_policy *policy,
1883 struct cpufreq_freqs *freqs, int index)
1884{
1885 int ret;
1886
1887 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1888
1889 /* We don't need to switch to intermediate freq */
1890 if (!freqs->new)
1891 return 0;
1892
1893 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1894 __func__, policy->cpu, freqs->old, freqs->new);
1895
1896 cpufreq_freq_transition_begin(policy, freqs);
1897 ret = cpufreq_driver->target_intermediate(policy, index);
1898 cpufreq_freq_transition_end(policy, freqs, ret);
1899
1900 if (ret)
1901 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1902 __func__, ret);
1903
1904 return ret;
1905}
1906
8d65775d
VK
1907static int __target_index(struct cpufreq_policy *policy,
1908 struct cpufreq_frequency_table *freq_table, int index)
1909{
1c03a2d0
VK
1910 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1911 unsigned int intermediate_freq = 0;
8d65775d
VK
1912 int retval = -EINVAL;
1913 bool notify;
1914
1915 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
8d65775d 1916 if (notify) {
1c03a2d0
VK
1917 /* Handle switching to intermediate frequency */
1918 if (cpufreq_driver->get_intermediate) {
1919 retval = __target_intermediate(policy, &freqs, index);
1920 if (retval)
1921 return retval;
1922
1923 intermediate_freq = freqs.new;
1924 /* Set old freq to intermediate */
1925 if (intermediate_freq)
1926 freqs.old = freqs.new;
1927 }
8d65775d 1928
1c03a2d0 1929 freqs.new = freq_table[index].frequency;
8d65775d
VK
1930 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1931 __func__, policy->cpu, freqs.old, freqs.new);
1932
1933 cpufreq_freq_transition_begin(policy, &freqs);
1934 }
1935
1936 retval = cpufreq_driver->target_index(policy, index);
1937 if (retval)
1938 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1939 retval);
1940
1c03a2d0 1941 if (notify) {
8d65775d
VK
1942 cpufreq_freq_transition_end(policy, &freqs, retval);
1943
1c03a2d0
VK
1944 /*
1945 * Failed after setting to intermediate freq? Driver should have
1946 * reverted back to initial frequency and so should we. Check
1947 * here for intermediate_freq instead of get_intermediate, in
58405af6 1948 * case we haven't switched to intermediate freq at all.
1c03a2d0
VK
1949 */
1950 if (unlikely(retval && intermediate_freq)) {
1951 freqs.old = intermediate_freq;
1952 freqs.new = policy->restore_freq;
1953 cpufreq_freq_transition_begin(policy, &freqs);
1954 cpufreq_freq_transition_end(policy, &freqs, 0);
1955 }
1956 }
1957
8d65775d
VK
1958 return retval;
1959}
1960
1da177e4
LT
1961int __cpufreq_driver_target(struct cpufreq_policy *policy,
1962 unsigned int target_freq,
1963 unsigned int relation)
1964{
7249924e 1965 unsigned int old_target_freq = target_freq;
8d65775d 1966 int retval = -EINVAL;
c32b6b8e 1967
a7b422cd
KRW
1968 if (cpufreq_disabled())
1969 return -ENODEV;
1970
7249924e
VK
1971 /* Make sure that target_freq is within supported range */
1972 if (target_freq > policy->max)
1973 target_freq = policy->max;
1974 if (target_freq < policy->min)
1975 target_freq = policy->min;
1976
1977 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
e837f9b5 1978 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228 1979
9c0ebcf7
VK
1980 /*
1981 * This might look like a redundant call as we are checking it again
1982 * after finding index. But it is left intentionally for cases where
1983 * exactly same freq is called again and so we can save on few function
1984 * calls.
1985 */
5a1c0228
VK
1986 if (target_freq == policy->cur)
1987 return 0;
1988
1c03a2d0
VK
1989 /* Save last value to restore later on errors */
1990 policy->restore_freq = policy->cur;
1991
1c3d85dd
RW
1992 if (cpufreq_driver->target)
1993 retval = cpufreq_driver->target(policy, target_freq, relation);
9c0ebcf7
VK
1994 else if (cpufreq_driver->target_index) {
1995 struct cpufreq_frequency_table *freq_table;
1996 int index;
90d45d17 1997
9c0ebcf7
VK
1998 freq_table = cpufreq_frequency_get_table(policy->cpu);
1999 if (unlikely(!freq_table)) {
2000 pr_err("%s: Unable to find freq_table\n", __func__);
2001 goto out;
2002 }
2003
2004 retval = cpufreq_frequency_table_target(policy, freq_table,
2005 target_freq, relation, &index);
2006 if (unlikely(retval)) {
2007 pr_err("%s: Unable to find matching freq\n", __func__);
2008 goto out;
2009 }
2010
d4019f0a 2011 if (freq_table[index].frequency == policy->cur) {
9c0ebcf7 2012 retval = 0;
d4019f0a
VK
2013 goto out;
2014 }
2015
8d65775d 2016 retval = __target_index(policy, freq_table, index);
9c0ebcf7
VK
2017 }
2018
2019out:
1da177e4
LT
2020 return retval;
2021}
2022EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2023
1da177e4
LT
2024int cpufreq_driver_target(struct cpufreq_policy *policy,
2025 unsigned int target_freq,
2026 unsigned int relation)
2027{
f1829e4a 2028 int ret = -EINVAL;
1da177e4 2029
ad7722da 2030 down_write(&policy->rwsem);
1da177e4
LT
2031
2032 ret = __cpufreq_driver_target(policy, target_freq, relation);
2033
ad7722da 2034 up_write(&policy->rwsem);
1da177e4 2035
1da177e4
LT
2036 return ret;
2037}
2038EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2039
e08f5f5b
GS
2040static int __cpufreq_governor(struct cpufreq_policy *policy,
2041 unsigned int event)
1da177e4 2042{
cc993cab 2043 int ret;
6afde10c
TR
2044
2045 /* Only must be defined when default governor is known to have latency
2046 restrictions, like e.g. conservative or ondemand.
2047 That this is the case is already ensured in Kconfig
2048 */
2049#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2050 struct cpufreq_governor *gov = &cpufreq_gov_performance;
2051#else
2052 struct cpufreq_governor *gov = NULL;
2053#endif
1c256245 2054
2f0aea93
VK
2055 /* Don't start any governor operations if we are entering suspend */
2056 if (cpufreq_suspended)
2057 return 0;
cb57720b
EZ
2058 /*
2059 * Governor might not be initiated here if ACPI _PPC changed
2060 * notification happened, so check it.
2061 */
2062 if (!policy->governor)
2063 return -EINVAL;
2f0aea93 2064
1c256245
TR
2065 if (policy->governor->max_transition_latency &&
2066 policy->cpuinfo.transition_latency >
2067 policy->governor->max_transition_latency) {
6afde10c
TR
2068 if (!gov)
2069 return -EINVAL;
2070 else {
e837f9b5
JP
2071 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2072 policy->governor->name, gov->name);
6afde10c
TR
2073 policy->governor = gov;
2074 }
1c256245 2075 }
1da177e4 2076
fe492f3f
VK
2077 if (event == CPUFREQ_GOV_POLICY_INIT)
2078 if (!try_module_get(policy->governor->owner))
2079 return -EINVAL;
1da177e4 2080
2d06d8c4 2081 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e837f9b5 2082 policy->cpu, event);
95731ebb
XC
2083
2084 mutex_lock(&cpufreq_governor_lock);
56d07db2 2085 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
f73d3933
VK
2086 || (!policy->governor_enabled
2087 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
95731ebb
XC
2088 mutex_unlock(&cpufreq_governor_lock);
2089 return -EBUSY;
2090 }
2091
2092 if (event == CPUFREQ_GOV_STOP)
2093 policy->governor_enabled = false;
2094 else if (event == CPUFREQ_GOV_START)
2095 policy->governor_enabled = true;
2096
2097 mutex_unlock(&cpufreq_governor_lock);
2098
1da177e4
LT
2099 ret = policy->governor->governor(policy, event);
2100
4d5dcc42
VK
2101 if (!ret) {
2102 if (event == CPUFREQ_GOV_POLICY_INIT)
2103 policy->governor->initialized++;
2104 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2105 policy->governor->initialized--;
95731ebb
XC
2106 } else {
2107 /* Restore original values */
2108 mutex_lock(&cpufreq_governor_lock);
2109 if (event == CPUFREQ_GOV_STOP)
2110 policy->governor_enabled = true;
2111 else if (event == CPUFREQ_GOV_START)
2112 policy->governor_enabled = false;
2113 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 2114 }
b394058f 2115
fe492f3f
VK
2116 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2117 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1da177e4
LT
2118 module_put(policy->governor->owner);
2119
2120 return ret;
2121}
2122
1da177e4
LT
2123int cpufreq_register_governor(struct cpufreq_governor *governor)
2124{
3bcb09a3 2125 int err;
1da177e4
LT
2126
2127 if (!governor)
2128 return -EINVAL;
2129
a7b422cd
KRW
2130 if (cpufreq_disabled())
2131 return -ENODEV;
2132
3fc54d37 2133 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 2134
b394058f 2135 governor->initialized = 0;
3bcb09a3 2136 err = -EBUSY;
42f91fa1 2137 if (!find_governor(governor->name)) {
3bcb09a3
JF
2138 err = 0;
2139 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 2140 }
1da177e4 2141
32ee8c3e 2142 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 2143 return err;
1da177e4
LT
2144}
2145EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2146
1da177e4
LT
2147void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2148{
4573237b
VK
2149 struct cpufreq_policy *policy;
2150 unsigned long flags;
90e41bac 2151
1da177e4
LT
2152 if (!governor)
2153 return;
2154
a7b422cd
KRW
2155 if (cpufreq_disabled())
2156 return;
2157
4573237b
VK
2158 /* clear last_governor for all inactive policies */
2159 read_lock_irqsave(&cpufreq_driver_lock, flags);
2160 for_each_inactive_policy(policy) {
18bf3a12
VK
2161 if (!strcmp(policy->last_governor, governor->name)) {
2162 policy->governor = NULL;
4573237b 2163 strcpy(policy->last_governor, "\0");
18bf3a12 2164 }
90e41bac 2165 }
4573237b 2166 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
90e41bac 2167
3fc54d37 2168 mutex_lock(&cpufreq_governor_mutex);
1da177e4 2169 list_del(&governor->governor_list);
3fc54d37 2170 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
2171 return;
2172}
2173EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2174
2175
1da177e4
LT
2176/*********************************************************************
2177 * POLICY INTERFACE *
2178 *********************************************************************/
2179
2180/**
2181 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
2182 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2183 * is written
1da177e4
LT
2184 *
2185 * Reads the current cpufreq policy.
2186 */
2187int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2188{
2189 struct cpufreq_policy *cpu_policy;
2190 if (!policy)
2191 return -EINVAL;
2192
2193 cpu_policy = cpufreq_cpu_get(cpu);
2194 if (!cpu_policy)
2195 return -EINVAL;
2196
d5b73cd8 2197 memcpy(policy, cpu_policy, sizeof(*policy));
1da177e4
LT
2198
2199 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
2200 return 0;
2201}
2202EXPORT_SYMBOL(cpufreq_get_policy);
2203
153d7f3f 2204/*
037ce839
VK
2205 * policy : current policy.
2206 * new_policy: policy to be set.
153d7f3f 2207 */
037ce839 2208static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 2209 struct cpufreq_policy *new_policy)
1da177e4 2210{
d9a789c7
RW
2211 struct cpufreq_governor *old_gov;
2212 int ret;
1da177e4 2213
e837f9b5
JP
2214 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2215 new_policy->cpu, new_policy->min, new_policy->max);
1da177e4 2216
d5b73cd8 2217 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1da177e4 2218
d9a789c7
RW
2219 if (new_policy->min > policy->max || new_policy->max < policy->min)
2220 return -EINVAL;
9c9a43ed 2221
1da177e4 2222 /* verify the cpu speed can be set within this limit */
3a3e9e06 2223 ret = cpufreq_driver->verify(new_policy);
1da177e4 2224 if (ret)
d9a789c7 2225 return ret;
1da177e4 2226
1da177e4 2227 /* adjust if necessary - all reasons */
e041c683 2228 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2229 CPUFREQ_ADJUST, new_policy);
1da177e4
LT
2230
2231 /* adjust if necessary - hardware incompatibility*/
e041c683 2232 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2233 CPUFREQ_INCOMPATIBLE, new_policy);
1da177e4 2234
bb176f7d
VK
2235 /*
2236 * verify the cpu speed can be set within this limit, which might be
2237 * different to the first one
2238 */
3a3e9e06 2239 ret = cpufreq_driver->verify(new_policy);
e041c683 2240 if (ret)
d9a789c7 2241 return ret;
1da177e4
LT
2242
2243 /* notification of the new policy */
e041c683 2244 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2245 CPUFREQ_NOTIFY, new_policy);
1da177e4 2246
3a3e9e06
VK
2247 policy->min = new_policy->min;
2248 policy->max = new_policy->max;
1da177e4 2249
2d06d8c4 2250 pr_debug("new min and max freqs are %u - %u kHz\n",
e837f9b5 2251 policy->min, policy->max);
1da177e4 2252
1c3d85dd 2253 if (cpufreq_driver->setpolicy) {
3a3e9e06 2254 policy->policy = new_policy->policy;
2d06d8c4 2255 pr_debug("setting range\n");
d9a789c7
RW
2256 return cpufreq_driver->setpolicy(new_policy);
2257 }
1da177e4 2258
d9a789c7
RW
2259 if (new_policy->governor == policy->governor)
2260 goto out;
7bd353a9 2261
d9a789c7
RW
2262 pr_debug("governor switch\n");
2263
2264 /* save old, working values */
2265 old_gov = policy->governor;
2266 /* end old governor */
2267 if (old_gov) {
2268 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2269 up_write(&policy->rwsem);
e5c87b76 2270 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
d9a789c7 2271 down_write(&policy->rwsem);
1da177e4
LT
2272 }
2273
d9a789c7
RW
2274 /* start new governor */
2275 policy->governor = new_policy->governor;
2276 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2277 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2278 goto out;
2279
2280 up_write(&policy->rwsem);
2281 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2282 down_write(&policy->rwsem);
2283 }
2284
2285 /* new governor failed, so re-start old one */
2286 pr_debug("starting governor %s failed\n", policy->governor->name);
2287 if (old_gov) {
2288 policy->governor = old_gov;
2289 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2290 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2291 }
2292
2293 return -EINVAL;
2294
2295 out:
2296 pr_debug("governor: change or update limits\n");
2297 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1da177e4
LT
2298}
2299
1da177e4
LT
2300/**
2301 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2302 * @cpu: CPU which shall be re-evaluated
2303 *
25985edc 2304 * Useful for policy notifiers which have different necessities
1da177e4
LT
2305 * at different times.
2306 */
2307int cpufreq_update_policy(unsigned int cpu)
2308{
3a3e9e06
VK
2309 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2310 struct cpufreq_policy new_policy;
f1829e4a 2311 int ret;
1da177e4 2312
fefa8ff8
AP
2313 if (!policy)
2314 return -ENODEV;
1da177e4 2315
ad7722da 2316 down_write(&policy->rwsem);
1da177e4 2317
2d06d8c4 2318 pr_debug("updating policy for CPU %u\n", cpu);
d5b73cd8 2319 memcpy(&new_policy, policy, sizeof(*policy));
3a3e9e06
VK
2320 new_policy.min = policy->user_policy.min;
2321 new_policy.max = policy->user_policy.max;
2322 new_policy.policy = policy->user_policy.policy;
2323 new_policy.governor = policy->user_policy.governor;
1da177e4 2324
bb176f7d
VK
2325 /*
2326 * BIOS might change freq behind our back
2327 * -> ask driver for current freq and notify governors about a change
2328 */
2ed99e39 2329 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
3a3e9e06 2330 new_policy.cur = cpufreq_driver->get(cpu);
bd0fa9bb
VK
2331 if (WARN_ON(!new_policy.cur)) {
2332 ret = -EIO;
fefa8ff8 2333 goto unlock;
bd0fa9bb
VK
2334 }
2335
3a3e9e06 2336 if (!policy->cur) {
e837f9b5 2337 pr_debug("Driver did not initialize current freq\n");
3a3e9e06 2338 policy->cur = new_policy.cur;
a85f7bd3 2339 } else {
9c0ebcf7 2340 if (policy->cur != new_policy.cur && has_target())
a1e1dc41 2341 cpufreq_out_of_sync(policy, new_policy.cur);
a85f7bd3 2342 }
0961dd0d
TR
2343 }
2344
037ce839 2345 ret = cpufreq_set_policy(policy, &new_policy);
1da177e4 2346
fefa8ff8 2347unlock:
ad7722da 2348 up_write(&policy->rwsem);
5a01f2e8 2349
3a3e9e06 2350 cpufreq_cpu_put(policy);
1da177e4
LT
2351 return ret;
2352}
2353EXPORT_SYMBOL(cpufreq_update_policy);
2354
2760984f 2355static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
2356 unsigned long action, void *hcpu)
2357{
2358 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 2359 struct device *dev;
c32b6b8e 2360
8a25a2fd
KS
2361 dev = get_cpu_device(cpu);
2362 if (dev) {
5302c3fb 2363 switch (action & ~CPU_TASKS_FROZEN) {
c32b6b8e 2364 case CPU_ONLINE:
23faf0b7 2365 cpufreq_add_dev(dev, NULL);
c32b6b8e 2366 break;
5302c3fb 2367
c32b6b8e 2368 case CPU_DOWN_PREPARE:
96bbbe4a 2369 __cpufreq_remove_dev_prepare(dev, NULL);
1aee40ac
SB
2370 break;
2371
2372 case CPU_POST_DEAD:
96bbbe4a 2373 __cpufreq_remove_dev_finish(dev, NULL);
c32b6b8e 2374 break;
5302c3fb 2375
5a01f2e8 2376 case CPU_DOWN_FAILED:
23faf0b7 2377 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
2378 break;
2379 }
2380 }
2381 return NOTIFY_OK;
2382}
2383
9c36f746 2384static struct notifier_block __refdata cpufreq_cpu_notifier = {
bb176f7d 2385 .notifier_call = cpufreq_cpu_callback,
c32b6b8e 2386};
1da177e4 2387
6f19efc0
LM
2388/*********************************************************************
2389 * BOOST *
2390 *********************************************************************/
2391static int cpufreq_boost_set_sw(int state)
2392{
2393 struct cpufreq_frequency_table *freq_table;
2394 struct cpufreq_policy *policy;
2395 int ret = -EINVAL;
2396
f963735a 2397 for_each_active_policy(policy) {
6f19efc0
LM
2398 freq_table = cpufreq_frequency_get_table(policy->cpu);
2399 if (freq_table) {
2400 ret = cpufreq_frequency_table_cpuinfo(policy,
2401 freq_table);
2402 if (ret) {
2403 pr_err("%s: Policy frequency update failed\n",
2404 __func__);
2405 break;
2406 }
2407 policy->user_policy.max = policy->max;
2408 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2409 }
2410 }
2411
2412 return ret;
2413}
2414
2415int cpufreq_boost_trigger_state(int state)
2416{
2417 unsigned long flags;
2418 int ret = 0;
2419
2420 if (cpufreq_driver->boost_enabled == state)
2421 return 0;
2422
2423 write_lock_irqsave(&cpufreq_driver_lock, flags);
2424 cpufreq_driver->boost_enabled = state;
2425 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2426
2427 ret = cpufreq_driver->set_boost(state);
2428 if (ret) {
2429 write_lock_irqsave(&cpufreq_driver_lock, flags);
2430 cpufreq_driver->boost_enabled = !state;
2431 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2432
e837f9b5
JP
2433 pr_err("%s: Cannot %s BOOST\n",
2434 __func__, state ? "enable" : "disable");
6f19efc0
LM
2435 }
2436
2437 return ret;
2438}
2439
2440int cpufreq_boost_supported(void)
2441{
2442 if (likely(cpufreq_driver))
2443 return cpufreq_driver->boost_supported;
2444
2445 return 0;
2446}
2447EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2448
2449int cpufreq_boost_enabled(void)
2450{
2451 return cpufreq_driver->boost_enabled;
2452}
2453EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2454
1da177e4
LT
2455/*********************************************************************
2456 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2457 *********************************************************************/
2458
2459/**
2460 * cpufreq_register_driver - register a CPU Frequency driver
2461 * @driver_data: A struct cpufreq_driver containing the values#
2462 * submitted by the CPU Frequency driver.
2463 *
bb176f7d 2464 * Registers a CPU Frequency driver to this core code. This code
1da177e4 2465 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 2466 * (and isn't unregistered in the meantime).
1da177e4
LT
2467 *
2468 */
221dee28 2469int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
2470{
2471 unsigned long flags;
2472 int ret;
2473
a7b422cd
KRW
2474 if (cpufreq_disabled())
2475 return -ENODEV;
2476
1da177e4 2477 if (!driver_data || !driver_data->verify || !driver_data->init ||
9c0ebcf7 2478 !(driver_data->setpolicy || driver_data->target_index ||
9832235f
RW
2479 driver_data->target) ||
2480 (driver_data->setpolicy && (driver_data->target_index ||
1c03a2d0
VK
2481 driver_data->target)) ||
2482 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
1da177e4
LT
2483 return -EINVAL;
2484
2d06d8c4 2485 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4 2486
0d1857a1 2487 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2488 if (cpufreq_driver) {
0d1857a1 2489 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4dea5806 2490 return -EEXIST;
1da177e4 2491 }
1c3d85dd 2492 cpufreq_driver = driver_data;
0d1857a1 2493 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 2494
bc68b7df
VK
2495 if (driver_data->setpolicy)
2496 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2497
6f19efc0
LM
2498 if (cpufreq_boost_supported()) {
2499 /*
2500 * Check if driver provides function to enable boost -
2501 * if not, use cpufreq_boost_set_sw as default
2502 */
2503 if (!cpufreq_driver->set_boost)
2504 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2505
2506 ret = cpufreq_sysfs_create_file(&boost.attr);
2507 if (ret) {
2508 pr_err("%s: cannot register global BOOST sysfs file\n",
e837f9b5 2509 __func__);
6f19efc0
LM
2510 goto err_null_driver;
2511 }
2512 }
2513
8a25a2fd 2514 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab 2515 if (ret)
6f19efc0 2516 goto err_boost_unreg;
1da177e4 2517
ce1bcfe9
VK
2518 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2519 list_empty(&cpufreq_policy_list)) {
1da177e4 2520 /* if all ->init() calls failed, unregister */
ce1bcfe9
VK
2521 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2522 driver_data->name);
2523 goto err_if_unreg;
1da177e4
LT
2524 }
2525
8f5bc2ab 2526 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2527 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2528
8f5bc2ab 2529 return 0;
8a25a2fd
KS
2530err_if_unreg:
2531 subsys_interface_unregister(&cpufreq_interface);
6f19efc0
LM
2532err_boost_unreg:
2533 if (cpufreq_boost_supported())
2534 cpufreq_sysfs_remove_file(&boost.attr);
8f5bc2ab 2535err_null_driver:
0d1857a1 2536 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2537 cpufreq_driver = NULL;
0d1857a1 2538 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2539 return ret;
1da177e4
LT
2540}
2541EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2542
1da177e4
LT
2543/**
2544 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2545 *
bb176f7d 2546 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2547 * the right to do so, i.e. if you have succeeded in initialising before!
2548 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2549 * currently not initialised.
2550 */
221dee28 2551int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2552{
2553 unsigned long flags;
2554
1c3d85dd 2555 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2556 return -EINVAL;
1da177e4 2557
2d06d8c4 2558 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2559
8a25a2fd 2560 subsys_interface_unregister(&cpufreq_interface);
6f19efc0
LM
2561 if (cpufreq_boost_supported())
2562 cpufreq_sysfs_remove_file(&boost.attr);
2563
65edc68c 2564 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2565
6eed9404 2566 down_write(&cpufreq_rwsem);
0d1857a1 2567 write_lock_irqsave(&cpufreq_driver_lock, flags);
6eed9404 2568
1c3d85dd 2569 cpufreq_driver = NULL;
6eed9404 2570
0d1857a1 2571 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
6eed9404 2572 up_write(&cpufreq_rwsem);
1da177e4
LT
2573
2574 return 0;
2575}
2576EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8 2577
90de2a4a
DA
2578/*
2579 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2580 * or mutexes when secondary CPUs are halted.
2581 */
2582static struct syscore_ops cpufreq_syscore_ops = {
2583 .shutdown = cpufreq_suspend,
2584};
2585
5a01f2e8
VP
2586static int __init cpufreq_core_init(void)
2587{
a7b422cd
KRW
2588 if (cpufreq_disabled())
2589 return -ENODEV;
2590
2361be23 2591 cpufreq_global_kobject = kobject_create();
8aa84ad8
TR
2592 BUG_ON(!cpufreq_global_kobject);
2593
90de2a4a
DA
2594 register_syscore_ops(&cpufreq_syscore_ops);
2595
5a01f2e8
VP
2596 return 0;
2597}
5a01f2e8 2598core_initcall(cpufreq_core_init);