Merge remote-tracking branches 'regulator/topic/s2mps11', 'regulator/topic/s5m8767...
[linux-2.6-block.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
bb176f7d 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
1da177e4 7 *
c32b6b8e 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 9 * Added handling for CPU hotplug
8ff69732
DJ
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 12 *
1da177e4
LT
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
1da177e4
LT
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
5ff0a268 20#include <linux/cpu.h>
1da177e4
LT
21#include <linux/cpufreq.h>
22#include <linux/delay.h>
1da177e4 23#include <linux/device.h>
5ff0a268
VK
24#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
3fc54d37 27#include <linux/mutex.h>
5ff0a268 28#include <linux/slab.h>
e00e56df 29#include <linux/syscore_ops.h>
5ff0a268 30#include <linux/tick.h>
6f4f2723
TR
31#include <trace/events/power.h>
32
1da177e4 33/**
cd878479 34 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
37 */
1c3d85dd 38static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 39static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
8414809c 40static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
bb176f7d
VK
41static DEFINE_RWLOCK(cpufreq_driver_lock);
42static DEFINE_MUTEX(cpufreq_governor_lock);
c88a1f8b 43static LIST_HEAD(cpufreq_policy_list);
bb176f7d 44
084f3493
TR
45#ifdef CONFIG_HOTPLUG_CPU
46/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 47static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 48#endif
1da177e4 49
9c0ebcf7
VK
50static inline bool has_target(void)
51{
52 return cpufreq_driver->target_index || cpufreq_driver->target;
53}
54
6eed9404
VK
55/*
56 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
57 * sections
58 */
59static DECLARE_RWSEM(cpufreq_rwsem);
60
1da177e4 61/* internal prototypes */
29464f28
DJ
62static int __cpufreq_governor(struct cpufreq_policy *policy,
63 unsigned int event);
5a01f2e8 64static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 65static void handle_update(struct work_struct *work);
1da177e4
LT
66
67/**
32ee8c3e
DJ
68 * Two notifier lists: the "policy" list is involved in the
69 * validation process for a new CPU frequency policy; the
1da177e4
LT
70 * "transition" list for kernel code that needs to handle
71 * changes to devices when the CPU clock speed changes.
72 * The mutex locks both lists.
73 */
e041c683 74static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 75static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 76
74212ca4 77static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
78static int __init init_cpufreq_transition_notifier_list(void)
79{
80 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 81 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
82 return 0;
83}
b3438f82 84pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 85
a7b422cd 86static int off __read_mostly;
da584455 87static int cpufreq_disabled(void)
a7b422cd
KRW
88{
89 return off;
90}
91void disable_cpufreq(void)
92{
93 off = 1;
94}
1da177e4 95static LIST_HEAD(cpufreq_governor_list);
29464f28 96static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 97
4d5dcc42
VK
98bool have_governor_per_policy(void)
99{
0b981e70 100 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
4d5dcc42 101}
3f869d6d 102EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 103
944e9a03
VK
104struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
105{
106 if (have_governor_per_policy())
107 return &policy->kobj;
108 else
109 return cpufreq_global_kobject;
110}
111EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
112
72a4ce34
VK
113static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
114{
115 u64 idle_time;
116 u64 cur_wall_time;
117 u64 busy_time;
118
119 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
120
121 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
122 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
123 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
124 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
127
128 idle_time = cur_wall_time - busy_time;
129 if (wall)
130 *wall = cputime_to_usecs(cur_wall_time);
131
132 return cputime_to_usecs(idle_time);
133}
134
135u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
136{
137 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
138
139 if (idle_time == -1ULL)
140 return get_cpu_idle_time_jiffy(cpu, wall);
141 else if (!io_busy)
142 idle_time += get_cpu_iowait_time_us(cpu, wall);
143
144 return idle_time;
145}
146EXPORT_SYMBOL_GPL(get_cpu_idle_time);
147
70e9e778
VK
148/*
149 * This is a generic cpufreq init() routine which can be used by cpufreq
150 * drivers of SMP systems. It will do following:
151 * - validate & show freq table passed
152 * - set policies transition latency
153 * - policy->cpus with all possible CPUs
154 */
155int cpufreq_generic_init(struct cpufreq_policy *policy,
156 struct cpufreq_frequency_table *table,
157 unsigned int transition_latency)
158{
159 int ret;
160
161 ret = cpufreq_table_validate_and_show(policy, table);
162 if (ret) {
163 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
164 return ret;
165 }
166
167 policy->cpuinfo.transition_latency = transition_latency;
168
169 /*
170 * The driver only supports the SMP configuartion where all processors
171 * share the clock and voltage and clock.
172 */
173 cpumask_setall(policy->cpus);
174
175 return 0;
176}
177EXPORT_SYMBOL_GPL(cpufreq_generic_init);
178
6eed9404 179struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4 180{
6eed9404 181 struct cpufreq_policy *policy = NULL;
1da177e4
LT
182 unsigned long flags;
183
6eed9404
VK
184 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
185 return NULL;
186
187 if (!down_read_trylock(&cpufreq_rwsem))
188 return NULL;
1da177e4
LT
189
190 /* get the cpufreq driver */
1c3d85dd 191 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 192
6eed9404
VK
193 if (cpufreq_driver) {
194 /* get the CPU */
195 policy = per_cpu(cpufreq_cpu_data, cpu);
196 if (policy)
197 kobject_get(&policy->kobj);
198 }
1da177e4 199
6eed9404 200 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 201
3a3e9e06 202 if (!policy)
6eed9404 203 up_read(&cpufreq_rwsem);
1da177e4 204
3a3e9e06 205 return policy;
a9144436 206}
1da177e4
LT
207EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
208
3a3e9e06 209void cpufreq_cpu_put(struct cpufreq_policy *policy)
1da177e4 210{
d5aaffa9
DB
211 if (cpufreq_disabled())
212 return;
213
6eed9404
VK
214 kobject_put(&policy->kobj);
215 up_read(&cpufreq_rwsem);
1da177e4
LT
216}
217EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
218
1da177e4
LT
219/*********************************************************************
220 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
221 *********************************************************************/
222
223/**
224 * adjust_jiffies - adjust the system "loops_per_jiffy"
225 *
226 * This function alters the system "loops_per_jiffy" for the clock
227 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 228 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
229 * per-CPU loops_per_jiffy value wherever possible.
230 */
231#ifndef CONFIG_SMP
232static unsigned long l_p_j_ref;
bb176f7d 233static unsigned int l_p_j_ref_freq;
1da177e4 234
858119e1 235static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
236{
237 if (ci->flags & CPUFREQ_CONST_LOOPS)
238 return;
239
240 if (!l_p_j_ref_freq) {
241 l_p_j_ref = loops_per_jiffy;
242 l_p_j_ref_freq = ci->old;
2d06d8c4 243 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 244 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 245 }
bb176f7d 246 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 247 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
248 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
249 ci->new);
2d06d8c4 250 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 251 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
252 }
253}
254#else
e08f5f5b
GS
255static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
256{
257 return;
258}
1da177e4
LT
259#endif
260
0956df9c 261static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb 262 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
263{
264 BUG_ON(irqs_disabled());
265
d5aaffa9
DB
266 if (cpufreq_disabled())
267 return;
268
1c3d85dd 269 freqs->flags = cpufreq_driver->flags;
2d06d8c4 270 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 271 state, freqs->new);
1da177e4 272
1da177e4 273 switch (state) {
e4472cb3 274
1da177e4 275 case CPUFREQ_PRECHANGE:
32ee8c3e 276 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
277 * which is not equal to what the cpufreq core thinks is
278 * "old frequency".
1da177e4 279 */
1c3d85dd 280 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
281 if ((policy) && (policy->cpu == freqs->cpu) &&
282 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 283 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
284 " %u, cpufreq assumed %u kHz.\n",
285 freqs->old, policy->cur);
286 freqs->old = policy->cur;
1da177e4
LT
287 }
288 }
b4dfdbb3 289 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 290 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
291 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
292 break;
e4472cb3 293
1da177e4
LT
294 case CPUFREQ_POSTCHANGE:
295 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 296 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 297 (unsigned long)freqs->cpu);
25e41933 298 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 299 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 300 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
301 if (likely(policy) && likely(policy->cpu == freqs->cpu))
302 policy->cur = freqs->new;
1da177e4
LT
303 break;
304 }
1da177e4 305}
bb176f7d 306
b43a7ffb
VK
307/**
308 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
309 * on frequency transition.
310 *
311 * This function calls the transition notifiers and the "adjust_jiffies"
312 * function. It is called twice on all CPU frequency changes that have
313 * external effects.
314 */
315void cpufreq_notify_transition(struct cpufreq_policy *policy,
316 struct cpufreq_freqs *freqs, unsigned int state)
317{
318 for_each_cpu(freqs->cpu, policy->cpus)
319 __cpufreq_notify_transition(policy, freqs, state);
320}
1da177e4
LT
321EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
322
323
1da177e4
LT
324/*********************************************************************
325 * SYSFS INTERFACE *
326 *********************************************************************/
327
3bcb09a3
JF
328static struct cpufreq_governor *__find_governor(const char *str_governor)
329{
330 struct cpufreq_governor *t;
331
332 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 333 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
334 return t;
335
336 return NULL;
337}
338
1da177e4
LT
339/**
340 * cpufreq_parse_governor - parse a governor string
341 */
905d77cd 342static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
343 struct cpufreq_governor **governor)
344{
3bcb09a3 345 int err = -EINVAL;
1c3d85dd
RW
346
347 if (!cpufreq_driver)
3bcb09a3
JF
348 goto out;
349
1c3d85dd 350 if (cpufreq_driver->setpolicy) {
1da177e4
LT
351 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
352 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 353 err = 0;
e08f5f5b
GS
354 } else if (!strnicmp(str_governor, "powersave",
355 CPUFREQ_NAME_LEN)) {
1da177e4 356 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 357 err = 0;
1da177e4 358 }
9c0ebcf7 359 } else if (has_target()) {
1da177e4 360 struct cpufreq_governor *t;
3bcb09a3 361
3fc54d37 362 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
363
364 t = __find_governor(str_governor);
365
ea714970 366 if (t == NULL) {
1a8e1463 367 int ret;
ea714970 368
1a8e1463
KC
369 mutex_unlock(&cpufreq_governor_mutex);
370 ret = request_module("cpufreq_%s", str_governor);
371 mutex_lock(&cpufreq_governor_mutex);
ea714970 372
1a8e1463
KC
373 if (ret == 0)
374 t = __find_governor(str_governor);
ea714970
JF
375 }
376
3bcb09a3
JF
377 if (t != NULL) {
378 *governor = t;
379 err = 0;
1da177e4 380 }
3bcb09a3 381
3fc54d37 382 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 383 }
29464f28 384out:
3bcb09a3 385 return err;
1da177e4 386}
1da177e4 387
1da177e4 388/**
e08f5f5b
GS
389 * cpufreq_per_cpu_attr_read() / show_##file_name() -
390 * print out cpufreq information
1da177e4
LT
391 *
392 * Write out information from cpufreq_driver->policy[cpu]; object must be
393 * "unsigned int".
394 */
395
32ee8c3e
DJ
396#define show_one(file_name, object) \
397static ssize_t show_##file_name \
905d77cd 398(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 399{ \
29464f28 400 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
401}
402
403show_one(cpuinfo_min_freq, cpuinfo.min_freq);
404show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 405show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
406show_one(scaling_min_freq, min);
407show_one(scaling_max_freq, max);
408show_one(scaling_cur_freq, cur);
409
037ce839 410static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 411 struct cpufreq_policy *new_policy);
7970e08b 412
1da177e4
LT
413/**
414 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
415 */
416#define store_one(file_name, object) \
417static ssize_t store_##file_name \
905d77cd 418(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 419{ \
5136fa56 420 int ret; \
1da177e4
LT
421 struct cpufreq_policy new_policy; \
422 \
423 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
424 if (ret) \
425 return -EINVAL; \
426 \
29464f28 427 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
428 if (ret != 1) \
429 return -EINVAL; \
430 \
037ce839 431 ret = cpufreq_set_policy(policy, &new_policy); \
7970e08b 432 policy->user_policy.object = policy->object; \
1da177e4
LT
433 \
434 return ret ? ret : count; \
435}
436
29464f28
DJ
437store_one(scaling_min_freq, min);
438store_one(scaling_max_freq, max);
1da177e4
LT
439
440/**
441 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
442 */
905d77cd
DJ
443static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
444 char *buf)
1da177e4 445{
5a01f2e8 446 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
447 if (!cur_freq)
448 return sprintf(buf, "<unknown>");
449 return sprintf(buf, "%u\n", cur_freq);
450}
451
1da177e4
LT
452/**
453 * show_scaling_governor - show the current policy for the specified CPU
454 */
905d77cd 455static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 456{
29464f28 457 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
458 return sprintf(buf, "powersave\n");
459 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
460 return sprintf(buf, "performance\n");
461 else if (policy->governor)
4b972f0b 462 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 463 policy->governor->name);
1da177e4
LT
464 return -EINVAL;
465}
466
1da177e4
LT
467/**
468 * store_scaling_governor - store policy for the specified CPU
469 */
905d77cd
DJ
470static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
471 const char *buf, size_t count)
1da177e4 472{
5136fa56 473 int ret;
1da177e4
LT
474 char str_governor[16];
475 struct cpufreq_policy new_policy;
476
477 ret = cpufreq_get_policy(&new_policy, policy->cpu);
478 if (ret)
479 return ret;
480
29464f28 481 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
482 if (ret != 1)
483 return -EINVAL;
484
e08f5f5b
GS
485 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
486 &new_policy.governor))
1da177e4
LT
487 return -EINVAL;
488
037ce839 489 ret = cpufreq_set_policy(policy, &new_policy);
7970e08b
TR
490
491 policy->user_policy.policy = policy->policy;
492 policy->user_policy.governor = policy->governor;
7970e08b 493
e08f5f5b
GS
494 if (ret)
495 return ret;
496 else
497 return count;
1da177e4
LT
498}
499
500/**
501 * show_scaling_driver - show the cpufreq driver currently loaded
502 */
905d77cd 503static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 504{
1c3d85dd 505 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
506}
507
508/**
509 * show_scaling_available_governors - show the available CPUfreq governors
510 */
905d77cd
DJ
511static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
512 char *buf)
1da177e4
LT
513{
514 ssize_t i = 0;
515 struct cpufreq_governor *t;
516
9c0ebcf7 517 if (!has_target()) {
1da177e4
LT
518 i += sprintf(buf, "performance powersave");
519 goto out;
520 }
521
522 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
523 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
524 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 525 goto out;
4b972f0b 526 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 527 }
7d5e350f 528out:
1da177e4
LT
529 i += sprintf(&buf[i], "\n");
530 return i;
531}
e8628dd0 532
f4fd3797 533ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
534{
535 ssize_t i = 0;
536 unsigned int cpu;
537
835481d9 538 for_each_cpu(cpu, mask) {
1da177e4
LT
539 if (i)
540 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
541 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
542 if (i >= (PAGE_SIZE - 5))
29464f28 543 break;
1da177e4
LT
544 }
545 i += sprintf(&buf[i], "\n");
546 return i;
547}
f4fd3797 548EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
1da177e4 549
e8628dd0
DW
550/**
551 * show_related_cpus - show the CPUs affected by each transition even if
552 * hw coordination is in use
553 */
554static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
555{
f4fd3797 556 return cpufreq_show_cpus(policy->related_cpus, buf);
e8628dd0
DW
557}
558
559/**
560 * show_affected_cpus - show the CPUs affected by each transition
561 */
562static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
563{
f4fd3797 564 return cpufreq_show_cpus(policy->cpus, buf);
e8628dd0
DW
565}
566
9e76988e 567static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 568 const char *buf, size_t count)
9e76988e
VP
569{
570 unsigned int freq = 0;
571 unsigned int ret;
572
879000f9 573 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
574 return -EINVAL;
575
576 ret = sscanf(buf, "%u", &freq);
577 if (ret != 1)
578 return -EINVAL;
579
580 policy->governor->store_setspeed(policy, freq);
581
582 return count;
583}
584
585static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
586{
879000f9 587 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
588 return sprintf(buf, "<unsupported>\n");
589
590 return policy->governor->show_setspeed(policy, buf);
591}
1da177e4 592
e2f74f35 593/**
8bf1ac72 594 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
595 */
596static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
597{
598 unsigned int limit;
599 int ret;
1c3d85dd
RW
600 if (cpufreq_driver->bios_limit) {
601 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
602 if (!ret)
603 return sprintf(buf, "%u\n", limit);
604 }
605 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
606}
607
6dad2a29
BP
608cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
609cpufreq_freq_attr_ro(cpuinfo_min_freq);
610cpufreq_freq_attr_ro(cpuinfo_max_freq);
611cpufreq_freq_attr_ro(cpuinfo_transition_latency);
612cpufreq_freq_attr_ro(scaling_available_governors);
613cpufreq_freq_attr_ro(scaling_driver);
614cpufreq_freq_attr_ro(scaling_cur_freq);
615cpufreq_freq_attr_ro(bios_limit);
616cpufreq_freq_attr_ro(related_cpus);
617cpufreq_freq_attr_ro(affected_cpus);
618cpufreq_freq_attr_rw(scaling_min_freq);
619cpufreq_freq_attr_rw(scaling_max_freq);
620cpufreq_freq_attr_rw(scaling_governor);
621cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 622
905d77cd 623static struct attribute *default_attrs[] = {
1da177e4
LT
624 &cpuinfo_min_freq.attr,
625 &cpuinfo_max_freq.attr,
ed129784 626 &cpuinfo_transition_latency.attr,
1da177e4
LT
627 &scaling_min_freq.attr,
628 &scaling_max_freq.attr,
629 &affected_cpus.attr,
e8628dd0 630 &related_cpus.attr,
1da177e4
LT
631 &scaling_governor.attr,
632 &scaling_driver.attr,
633 &scaling_available_governors.attr,
9e76988e 634 &scaling_setspeed.attr,
1da177e4
LT
635 NULL
636};
637
29464f28
DJ
638#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
639#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 640
29464f28 641static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 642{
905d77cd
DJ
643 struct cpufreq_policy *policy = to_policy(kobj);
644 struct freq_attr *fattr = to_attr(attr);
1b750e3b 645 ssize_t ret;
6eed9404
VK
646
647 if (!down_read_trylock(&cpufreq_rwsem))
1b750e3b 648 return -EINVAL;
5a01f2e8 649
ad7722da 650 down_read(&policy->rwsem);
5a01f2e8 651
e08f5f5b
GS
652 if (fattr->show)
653 ret = fattr->show(policy, buf);
654 else
655 ret = -EIO;
656
ad7722da 657 up_read(&policy->rwsem);
6eed9404 658 up_read(&cpufreq_rwsem);
1b750e3b 659
1da177e4
LT
660 return ret;
661}
662
905d77cd
DJ
663static ssize_t store(struct kobject *kobj, struct attribute *attr,
664 const char *buf, size_t count)
1da177e4 665{
905d77cd
DJ
666 struct cpufreq_policy *policy = to_policy(kobj);
667 struct freq_attr *fattr = to_attr(attr);
a07530b4 668 ssize_t ret = -EINVAL;
6eed9404 669
4f750c93
SB
670 get_online_cpus();
671
672 if (!cpu_online(policy->cpu))
673 goto unlock;
674
6eed9404 675 if (!down_read_trylock(&cpufreq_rwsem))
4f750c93 676 goto unlock;
5a01f2e8 677
ad7722da 678 down_write(&policy->rwsem);
5a01f2e8 679
e08f5f5b
GS
680 if (fattr->store)
681 ret = fattr->store(policy, buf, count);
682 else
683 ret = -EIO;
684
ad7722da 685 up_write(&policy->rwsem);
6eed9404 686
6eed9404 687 up_read(&cpufreq_rwsem);
4f750c93
SB
688unlock:
689 put_online_cpus();
690
1da177e4
LT
691 return ret;
692}
693
905d77cd 694static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 695{
905d77cd 696 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 697 pr_debug("last reference is dropped\n");
1da177e4
LT
698 complete(&policy->kobj_unregister);
699}
700
52cf25d0 701static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
702 .show = show,
703 .store = store,
704};
705
706static struct kobj_type ktype_cpufreq = {
707 .sysfs_ops = &sysfs_ops,
708 .default_attrs = default_attrs,
709 .release = cpufreq_sysfs_release,
710};
711
2361be23
VK
712struct kobject *cpufreq_global_kobject;
713EXPORT_SYMBOL(cpufreq_global_kobject);
714
715static int cpufreq_global_kobject_usage;
716
717int cpufreq_get_global_kobject(void)
718{
719 if (!cpufreq_global_kobject_usage++)
720 return kobject_add(cpufreq_global_kobject,
721 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
722
723 return 0;
724}
725EXPORT_SYMBOL(cpufreq_get_global_kobject);
726
727void cpufreq_put_global_kobject(void)
728{
729 if (!--cpufreq_global_kobject_usage)
730 kobject_del(cpufreq_global_kobject);
731}
732EXPORT_SYMBOL(cpufreq_put_global_kobject);
733
734int cpufreq_sysfs_create_file(const struct attribute *attr)
735{
736 int ret = cpufreq_get_global_kobject();
737
738 if (!ret) {
739 ret = sysfs_create_file(cpufreq_global_kobject, attr);
740 if (ret)
741 cpufreq_put_global_kobject();
742 }
743
744 return ret;
745}
746EXPORT_SYMBOL(cpufreq_sysfs_create_file);
747
748void cpufreq_sysfs_remove_file(const struct attribute *attr)
749{
750 sysfs_remove_file(cpufreq_global_kobject, attr);
751 cpufreq_put_global_kobject();
752}
753EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
754
19d6f7ec 755/* symlink affected CPUs */
308b60e7 756static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
19d6f7ec
DJ
757{
758 unsigned int j;
759 int ret = 0;
760
761 for_each_cpu(j, policy->cpus) {
8a25a2fd 762 struct device *cpu_dev;
19d6f7ec 763
308b60e7 764 if (j == policy->cpu)
19d6f7ec 765 continue;
19d6f7ec 766
e8fdde10 767 pr_debug("Adding link for CPU: %u\n", j);
8a25a2fd
KS
768 cpu_dev = get_cpu_device(j);
769 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec 770 "cpufreq");
71c3461e
RW
771 if (ret)
772 break;
19d6f7ec
DJ
773 }
774 return ret;
775}
776
308b60e7 777static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
8a25a2fd 778 struct device *dev)
909a694e
DJ
779{
780 struct freq_attr **drv_attr;
909a694e 781 int ret = 0;
909a694e
DJ
782
783 /* prepare interface data */
784 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 785 &dev->kobj, "cpufreq");
909a694e
DJ
786 if (ret)
787 return ret;
788
789 /* set up files for this cpu device */
1c3d85dd 790 drv_attr = cpufreq_driver->attr;
909a694e
DJ
791 while ((drv_attr) && (*drv_attr)) {
792 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
793 if (ret)
1c3d85dd 794 goto err_out_kobj_put;
909a694e
DJ
795 drv_attr++;
796 }
1c3d85dd 797 if (cpufreq_driver->get) {
909a694e
DJ
798 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
799 if (ret)
1c3d85dd 800 goto err_out_kobj_put;
909a694e 801 }
9c0ebcf7 802 if (has_target()) {
909a694e
DJ
803 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
804 if (ret)
1c3d85dd 805 goto err_out_kobj_put;
909a694e 806 }
1c3d85dd 807 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
808 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
809 if (ret)
1c3d85dd 810 goto err_out_kobj_put;
e2f74f35 811 }
909a694e 812
308b60e7 813 ret = cpufreq_add_dev_symlink(policy);
ecf7e461
DJ
814 if (ret)
815 goto err_out_kobj_put;
816
e18f1682
SB
817 return ret;
818
819err_out_kobj_put:
820 kobject_put(&policy->kobj);
821 wait_for_completion(&policy->kobj_unregister);
822 return ret;
823}
824
825static void cpufreq_init_policy(struct cpufreq_policy *policy)
826{
827 struct cpufreq_policy new_policy;
828 int ret = 0;
829
d5b73cd8 830 memcpy(&new_policy, policy, sizeof(*policy));
a27a9ab7
JB
831
832 /* Use the default policy if its valid. */
833 if (cpufreq_driver->setpolicy)
834 cpufreq_parse_governor(policy->governor->name,
835 &new_policy.policy, NULL);
836
037ce839 837 /* assure that the starting sequence is run in cpufreq_set_policy */
ecf7e461
DJ
838 policy->governor = NULL;
839
840 /* set default policy */
037ce839 841 ret = cpufreq_set_policy(policy, &new_policy);
ecf7e461 842 if (ret) {
2d06d8c4 843 pr_debug("setting policy failed\n");
1c3d85dd
RW
844 if (cpufreq_driver->exit)
845 cpufreq_driver->exit(policy);
ecf7e461 846 }
909a694e
DJ
847}
848
fcf80582 849#ifdef CONFIG_HOTPLUG_CPU
d8d3b471 850static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
42f921a6 851 unsigned int cpu, struct device *dev)
fcf80582 852{
9c0ebcf7 853 int ret = 0;
fcf80582
VK
854 unsigned long flags;
855
9c0ebcf7 856 if (has_target()) {
3de9bdeb
VK
857 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
858 if (ret) {
859 pr_err("%s: Failed to stop governor\n", __func__);
860 return ret;
861 }
862 }
fcf80582 863
ad7722da 864 down_write(&policy->rwsem);
2eaa3e2d 865
0d1857a1 866 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 867
fcf80582
VK
868 cpumask_set_cpu(cpu, policy->cpus);
869 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 870 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 871
ad7722da 872 up_write(&policy->rwsem);
2eaa3e2d 873
9c0ebcf7 874 if (has_target()) {
3de9bdeb
VK
875 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
876 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
877 pr_err("%s: Failed to start governor\n", __func__);
878 return ret;
879 }
820c6ca2 880 }
fcf80582 881
42f921a6 882 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
fcf80582
VK
883}
884#endif
1da177e4 885
8414809c
SB
886static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
887{
888 struct cpufreq_policy *policy;
889 unsigned long flags;
890
44871c9c 891 read_lock_irqsave(&cpufreq_driver_lock, flags);
8414809c
SB
892
893 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
894
44871c9c 895 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
8414809c
SB
896
897 return policy;
898}
899
e9698cc5
SB
900static struct cpufreq_policy *cpufreq_policy_alloc(void)
901{
902 struct cpufreq_policy *policy;
903
904 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
905 if (!policy)
906 return NULL;
907
908 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
909 goto err_free_policy;
910
911 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
912 goto err_free_cpumask;
913
c88a1f8b 914 INIT_LIST_HEAD(&policy->policy_list);
ad7722da 915 init_rwsem(&policy->rwsem);
916
e9698cc5
SB
917 return policy;
918
919err_free_cpumask:
920 free_cpumask_var(policy->cpus);
921err_free_policy:
922 kfree(policy);
923
924 return NULL;
925}
926
42f921a6
VK
927static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
928{
929 struct kobject *kobj;
930 struct completion *cmp;
931
932 down_read(&policy->rwsem);
933 kobj = &policy->kobj;
934 cmp = &policy->kobj_unregister;
935 up_read(&policy->rwsem);
936 kobject_put(kobj);
937
938 /*
939 * We need to make sure that the underlying kobj is
940 * actually not referenced anymore by anybody before we
941 * proceed with unloading.
942 */
943 pr_debug("waiting for dropping of refcount\n");
944 wait_for_completion(cmp);
945 pr_debug("wait complete\n");
946}
947
e9698cc5
SB
948static void cpufreq_policy_free(struct cpufreq_policy *policy)
949{
950 free_cpumask_var(policy->related_cpus);
951 free_cpumask_var(policy->cpus);
952 kfree(policy);
953}
954
0d66b91e
SB
955static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
956{
99ec899e 957 if (WARN_ON(cpu == policy->cpu))
cb38ed5c
SB
958 return;
959
ad7722da 960 down_write(&policy->rwsem);
8efd5765 961
0d66b91e
SB
962 policy->last_cpu = policy->cpu;
963 policy->cpu = cpu;
964
ad7722da 965 up_write(&policy->rwsem);
8efd5765 966
0d66b91e 967 cpufreq_frequency_table_update_policy_cpu(policy);
0d66b91e
SB
968 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
969 CPUFREQ_UPDATE_POLICY_CPU, policy);
970}
971
a82fab29
SB
972static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
973 bool frozen)
1da177e4 974{
fcf80582 975 unsigned int j, cpu = dev->id;
65922465 976 int ret = -ENOMEM;
1da177e4 977 struct cpufreq_policy *policy;
1da177e4 978 unsigned long flags;
90e41bac 979#ifdef CONFIG_HOTPLUG_CPU
1b274294 980 struct cpufreq_policy *tpolicy;
fcf80582 981 struct cpufreq_governor *gov;
90e41bac 982#endif
1da177e4 983
c32b6b8e
AR
984 if (cpu_is_offline(cpu))
985 return 0;
986
2d06d8c4 987 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
988
989#ifdef CONFIG_SMP
990 /* check whether a different CPU already registered this
991 * CPU because it is in the same boat. */
992 policy = cpufreq_cpu_get(cpu);
993 if (unlikely(policy)) {
8ff69732 994 cpufreq_cpu_put(policy);
1da177e4
LT
995 return 0;
996 }
5025d628 997#endif
fcf80582 998
6eed9404
VK
999 if (!down_read_trylock(&cpufreq_rwsem))
1000 return 0;
1001
fcf80582
VK
1002#ifdef CONFIG_HOTPLUG_CPU
1003 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 1004 read_lock_irqsave(&cpufreq_driver_lock, flags);
1b274294
VK
1005 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1006 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
0d1857a1 1007 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
42f921a6 1008 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
6eed9404
VK
1009 up_read(&cpufreq_rwsem);
1010 return ret;
2eaa3e2d 1011 }
fcf80582 1012 }
0d1857a1 1013 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1014#endif
1015
72368d12
RW
1016 /*
1017 * Restore the saved policy when doing light-weight init and fall back
1018 * to the full init if that fails.
1019 */
1020 policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
1021 if (!policy) {
1022 frozen = false;
8414809c 1023 policy = cpufreq_policy_alloc();
72368d12
RW
1024 if (!policy)
1025 goto nomem_out;
1026 }
0d66b91e
SB
1027
1028 /*
1029 * In the resume path, since we restore a saved policy, the assignment
1030 * to policy->cpu is like an update of the existing policy, rather than
1031 * the creation of a brand new one. So we need to perform this update
1032 * by invoking update_policy_cpu().
1033 */
1034 if (frozen && cpu != policy->cpu)
1035 update_policy_cpu(policy, cpu);
1036 else
1037 policy->cpu = cpu;
1038
65922465 1039 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 1040 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1041
1da177e4 1042 init_completion(&policy->kobj_unregister);
65f27f38 1043 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
1044
1045 /* call driver. From then on the cpufreq must be able
1046 * to accept all calls to ->verify and ->setpolicy for this CPU
1047 */
1c3d85dd 1048 ret = cpufreq_driver->init(policy);
1da177e4 1049 if (ret) {
2d06d8c4 1050 pr_debug("initialization failed\n");
2eaa3e2d 1051 goto err_set_policy_cpu;
1da177e4 1052 }
643ae6e8 1053
da60ce9f
VK
1054 if (cpufreq_driver->get) {
1055 policy->cur = cpufreq_driver->get(policy->cpu);
1056 if (!policy->cur) {
1057 pr_err("%s: ->get() failed\n", __func__);
1058 goto err_get_freq;
1059 }
1060 }
1061
fcf80582
VK
1062 /* related cpus should atleast have policy->cpus */
1063 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1064
643ae6e8
VK
1065 /*
1066 * affected cpus must always be the one, which are online. We aren't
1067 * managing offline cpus here.
1068 */
1069 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1070
08fd8c1c
VK
1071 if (!frozen) {
1072 policy->user_policy.min = policy->min;
1073 policy->user_policy.max = policy->max;
1074 }
1da177e4 1075
a1531acd
TR
1076 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1077 CPUFREQ_START, policy);
1078
fcf80582
VK
1079#ifdef CONFIG_HOTPLUG_CPU
1080 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1081 if (gov) {
1082 policy->governor = gov;
1083 pr_debug("Restoring governor %s for cpu %d\n",
1084 policy->governor->name, cpu);
4bfa042c 1085 }
fcf80582 1086#endif
1da177e4 1087
e18f1682 1088 write_lock_irqsave(&cpufreq_driver_lock, flags);
474deff7 1089 for_each_cpu(j, policy->cpus)
e18f1682 1090 per_cpu(cpufreq_cpu_data, j) = policy;
e18f1682
SB
1091 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1092
a82fab29 1093 if (!frozen) {
308b60e7 1094 ret = cpufreq_add_dev_interface(policy, dev);
a82fab29
SB
1095 if (ret)
1096 goto err_out_unregister;
1097 }
8ff69732 1098
9515f4d6
VK
1099 write_lock_irqsave(&cpufreq_driver_lock, flags);
1100 list_add(&policy->policy_list, &cpufreq_policy_list);
1101 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1102
e18f1682
SB
1103 cpufreq_init_policy(policy);
1104
08fd8c1c
VK
1105 if (!frozen) {
1106 policy->user_policy.policy = policy->policy;
1107 policy->user_policy.governor = policy->governor;
1108 }
1109
038c5b3e 1110 kobject_uevent(&policy->kobj, KOBJ_ADD);
6eed9404
VK
1111 up_read(&cpufreq_rwsem);
1112
2d06d8c4 1113 pr_debug("initialization complete\n");
87c32271 1114
1da177e4
LT
1115 return 0;
1116
1da177e4 1117err_out_unregister:
0d1857a1 1118 write_lock_irqsave(&cpufreq_driver_lock, flags);
474deff7 1119 for_each_cpu(j, policy->cpus)
7a6aedfa 1120 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 1121 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1122
da60ce9f
VK
1123err_get_freq:
1124 if (cpufreq_driver->exit)
1125 cpufreq_driver->exit(policy);
2eaa3e2d 1126err_set_policy_cpu:
72368d12
RW
1127 if (frozen) {
1128 /* Do not leave stale fallback data behind. */
1129 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
42f921a6 1130 cpufreq_policy_put_kobj(policy);
72368d12 1131 }
e9698cc5 1132 cpufreq_policy_free(policy);
42f921a6 1133
1da177e4 1134nomem_out:
6eed9404
VK
1135 up_read(&cpufreq_rwsem);
1136
1da177e4
LT
1137 return ret;
1138}
1139
a82fab29
SB
1140/**
1141 * cpufreq_add_dev - add a CPU device
1142 *
1143 * Adds the cpufreq interface for a CPU device.
1144 *
1145 * The Oracle says: try running cpufreq registration/unregistration concurrently
1146 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1147 * mess up, but more thorough testing is needed. - Mathieu
1148 */
1149static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1150{
1151 return __cpufreq_add_dev(dev, sif, false);
1152}
1153
3a3e9e06 1154static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
42f921a6 1155 unsigned int old_cpu)
f9ba680d
SB
1156{
1157 struct device *cpu_dev;
f9ba680d
SB
1158 int ret;
1159
1160 /* first sibling now owns the new sysfs dir */
9c8f1ee4 1161 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
a82fab29 1162
f9ba680d 1163 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
3a3e9e06 1164 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
f9ba680d
SB
1165 if (ret) {
1166 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1167
ad7722da 1168 down_write(&policy->rwsem);
3a3e9e06 1169 cpumask_set_cpu(old_cpu, policy->cpus);
ad7722da 1170 up_write(&policy->rwsem);
f9ba680d 1171
3a3e9e06 1172 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
f9ba680d
SB
1173 "cpufreq");
1174
1175 return -EINVAL;
1176 }
1177
1178 return cpu_dev->id;
1179}
1180
cedb70af
SB
1181static int __cpufreq_remove_dev_prepare(struct device *dev,
1182 struct subsys_interface *sif,
1183 bool frozen)
1da177e4 1184{
f9ba680d 1185 unsigned int cpu = dev->id, cpus;
3de9bdeb 1186 int new_cpu, ret;
1da177e4 1187 unsigned long flags;
3a3e9e06 1188 struct cpufreq_policy *policy;
1da177e4 1189
b8eed8af 1190 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1191
0d1857a1 1192 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1193
3a3e9e06 1194 policy = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d 1195
8414809c
SB
1196 /* Save the policy somewhere when doing a light-weight tear-down */
1197 if (frozen)
3a3e9e06 1198 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
8414809c 1199
0d1857a1 1200 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1201
3a3e9e06 1202 if (!policy) {
b8eed8af 1203 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1204 return -EINVAL;
1205 }
1da177e4 1206
9c0ebcf7 1207 if (has_target()) {
3de9bdeb
VK
1208 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1209 if (ret) {
1210 pr_err("%s: Failed to stop governor\n", __func__);
1211 return ret;
1212 }
1213 }
1da177e4 1214
084f3493 1215#ifdef CONFIG_HOTPLUG_CPU
1c3d85dd 1216 if (!cpufreq_driver->setpolicy)
fa69e33f 1217 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
3a3e9e06 1218 policy->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1219#endif
1220
ad7722da 1221 down_read(&policy->rwsem);
3a3e9e06 1222 cpus = cpumask_weight(policy->cpus);
ad7722da 1223 up_read(&policy->rwsem);
084f3493 1224
61173f25
SB
1225 if (cpu != policy->cpu) {
1226 if (!frozen)
1227 sysfs_remove_link(&dev->kobj, "cpufreq");
73bf0fc2 1228 } else if (cpus > 1) {
42f921a6 1229 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
f9ba680d 1230 if (new_cpu >= 0) {
3a3e9e06 1231 update_policy_cpu(policy, new_cpu);
a82fab29
SB
1232
1233 if (!frozen) {
75949c9a
VK
1234 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1235 __func__, new_cpu, cpu);
a82fab29 1236 }
1da177e4
LT
1237 }
1238 }
1da177e4 1239
cedb70af
SB
1240 return 0;
1241}
1242
1243static int __cpufreq_remove_dev_finish(struct device *dev,
1244 struct subsys_interface *sif,
1245 bool frozen)
1246{
1247 unsigned int cpu = dev->id, cpus;
1248 int ret;
1249 unsigned long flags;
1250 struct cpufreq_policy *policy;
cedb70af
SB
1251
1252 read_lock_irqsave(&cpufreq_driver_lock, flags);
1253 policy = per_cpu(cpufreq_cpu_data, cpu);
1254 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1255
1256 if (!policy) {
1257 pr_debug("%s: No cpu_data found\n", __func__);
1258 return -EINVAL;
1259 }
1260
ad7722da 1261 down_write(&policy->rwsem);
cedb70af 1262 cpus = cpumask_weight(policy->cpus);
9c8f1ee4
VK
1263
1264 if (cpus > 1)
1265 cpumask_clear_cpu(cpu, policy->cpus);
ad7722da 1266 up_write(&policy->rwsem);
cedb70af 1267
b8eed8af
VK
1268 /* If cpu is last user of policy, free policy */
1269 if (cpus == 1) {
9c0ebcf7 1270 if (has_target()) {
3de9bdeb
VK
1271 ret = __cpufreq_governor(policy,
1272 CPUFREQ_GOV_POLICY_EXIT);
1273 if (ret) {
1274 pr_err("%s: Failed to exit governor\n",
1275 __func__);
1276 return ret;
1277 }
edab2fbc 1278 }
2a998599 1279
42f921a6
VK
1280 if (!frozen)
1281 cpufreq_policy_put_kobj(policy);
7d26e2d5 1282
8414809c
SB
1283 /*
1284 * Perform the ->exit() even during light-weight tear-down,
1285 * since this is a core component, and is essential for the
1286 * subsequent light-weight ->init() to succeed.
b8eed8af 1287 */
1c3d85dd 1288 if (cpufreq_driver->exit)
3a3e9e06 1289 cpufreq_driver->exit(policy);
27ecddc2 1290
9515f4d6
VK
1291 /* Remove policy from list of active policies */
1292 write_lock_irqsave(&cpufreq_driver_lock, flags);
1293 list_del(&policy->policy_list);
1294 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1295
8414809c 1296 if (!frozen)
3a3e9e06 1297 cpufreq_policy_free(policy);
2a998599 1298 } else {
9c0ebcf7 1299 if (has_target()) {
3de9bdeb
VK
1300 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1301 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1302 pr_err("%s: Failed to start governor\n",
1303 __func__);
1304 return ret;
1305 }
2a998599 1306 }
27ecddc2 1307 }
1da177e4 1308
474deff7 1309 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1da177e4
LT
1310 return 0;
1311}
1312
cedb70af 1313/**
27a862e9 1314 * cpufreq_remove_dev - remove a CPU device
cedb70af
SB
1315 *
1316 * Removes the cpufreq interface for a CPU device.
cedb70af 1317 */
8a25a2fd 1318static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1319{
8a25a2fd 1320 unsigned int cpu = dev->id;
27a862e9 1321 int ret;
ec28297a
VP
1322
1323 if (cpu_is_offline(cpu))
1324 return 0;
1325
27a862e9
VK
1326 ret = __cpufreq_remove_dev_prepare(dev, sif, false);
1327
1328 if (!ret)
1329 ret = __cpufreq_remove_dev_finish(dev, sif, false);
1330
1331 return ret;
5a01f2e8
VP
1332}
1333
65f27f38 1334static void handle_update(struct work_struct *work)
1da177e4 1335{
65f27f38
DH
1336 struct cpufreq_policy *policy =
1337 container_of(work, struct cpufreq_policy, update);
1338 unsigned int cpu = policy->cpu;
2d06d8c4 1339 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1340 cpufreq_update_policy(cpu);
1341}
1342
1343/**
bb176f7d
VK
1344 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1345 * in deep trouble.
1da177e4
LT
1346 * @cpu: cpu number
1347 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1348 * @new_freq: CPU frequency the CPU actually runs at
1349 *
29464f28
DJ
1350 * We adjust to current frequency first, and need to clean up later.
1351 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1352 */
e08f5f5b
GS
1353static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1354 unsigned int new_freq)
1da177e4 1355{
b43a7ffb 1356 struct cpufreq_policy *policy;
1da177e4 1357 struct cpufreq_freqs freqs;
b43a7ffb
VK
1358 unsigned long flags;
1359
2d06d8c4 1360 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1361 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1362
1da177e4
LT
1363 freqs.old = old_freq;
1364 freqs.new = new_freq;
b43a7ffb
VK
1365
1366 read_lock_irqsave(&cpufreq_driver_lock, flags);
1367 policy = per_cpu(cpufreq_cpu_data, cpu);
1368 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1369
1370 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1371 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1372}
1373
32ee8c3e 1374/**
4ab70df4 1375 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1376 * @cpu: CPU number
1377 *
1378 * This is the last known freq, without actually getting it from the driver.
1379 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1380 */
1381unsigned int cpufreq_quick_get(unsigned int cpu)
1382{
9e21ba8b 1383 struct cpufreq_policy *policy;
e08f5f5b 1384 unsigned int ret_freq = 0;
95235ca2 1385
1c3d85dd
RW
1386 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1387 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1388
1389 policy = cpufreq_cpu_get(cpu);
95235ca2 1390 if (policy) {
e08f5f5b 1391 ret_freq = policy->cur;
95235ca2
VP
1392 cpufreq_cpu_put(policy);
1393 }
1394
4d34a67d 1395 return ret_freq;
95235ca2
VP
1396}
1397EXPORT_SYMBOL(cpufreq_quick_get);
1398
3d737108
JB
1399/**
1400 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1401 * @cpu: CPU number
1402 *
1403 * Just return the max possible frequency for a given CPU.
1404 */
1405unsigned int cpufreq_quick_get_max(unsigned int cpu)
1406{
1407 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1408 unsigned int ret_freq = 0;
1409
1410 if (policy) {
1411 ret_freq = policy->max;
1412 cpufreq_cpu_put(policy);
1413 }
1414
1415 return ret_freq;
1416}
1417EXPORT_SYMBOL(cpufreq_quick_get_max);
1418
5a01f2e8 1419static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1420{
7a6aedfa 1421 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1422 unsigned int ret_freq = 0;
5800043b 1423
1c3d85dd 1424 if (!cpufreq_driver->get)
4d34a67d 1425 return ret_freq;
1da177e4 1426
1c3d85dd 1427 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1428
e08f5f5b 1429 if (ret_freq && policy->cur &&
1c3d85dd 1430 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1431 /* verify no discrepancy between actual and
1432 saved value exists */
1433 if (unlikely(ret_freq != policy->cur)) {
1434 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1435 schedule_work(&policy->update);
1436 }
1437 }
1438
4d34a67d 1439 return ret_freq;
5a01f2e8 1440}
1da177e4 1441
5a01f2e8
VP
1442/**
1443 * cpufreq_get - get the current CPU frequency (in kHz)
1444 * @cpu: CPU number
1445 *
1446 * Get the CPU current (static) CPU frequency
1447 */
1448unsigned int cpufreq_get(unsigned int cpu)
1449{
ad7722da 1450 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
5a01f2e8 1451 unsigned int ret_freq = 0;
5a01f2e8 1452
26ca8694
VK
1453 if (cpufreq_disabled() || !cpufreq_driver)
1454 return -ENOENT;
1455
ad7722da 1456 BUG_ON(!policy);
1457
6eed9404
VK
1458 if (!down_read_trylock(&cpufreq_rwsem))
1459 return 0;
5a01f2e8 1460
ad7722da 1461 down_read(&policy->rwsem);
5a01f2e8
VP
1462
1463 ret_freq = __cpufreq_get(cpu);
1464
ad7722da 1465 up_read(&policy->rwsem);
6eed9404
VK
1466 up_read(&cpufreq_rwsem);
1467
4d34a67d 1468 return ret_freq;
1da177e4
LT
1469}
1470EXPORT_SYMBOL(cpufreq_get);
1471
8a25a2fd
KS
1472static struct subsys_interface cpufreq_interface = {
1473 .name = "cpufreq",
1474 .subsys = &cpu_subsys,
1475 .add_dev = cpufreq_add_dev,
1476 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1477};
1478
42d4dc3f 1479/**
e00e56df
RW
1480 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1481 *
1482 * This function is only executed for the boot processor. The other CPUs
1483 * have been put offline by means of CPU hotplug.
42d4dc3f 1484 */
e00e56df 1485static int cpufreq_bp_suspend(void)
42d4dc3f 1486{
e08f5f5b 1487 int ret = 0;
4bc5d341 1488
e00e56df 1489 int cpu = smp_processor_id();
3a3e9e06 1490 struct cpufreq_policy *policy;
42d4dc3f 1491
2d06d8c4 1492 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1493
e00e56df 1494 /* If there's no policy for the boot CPU, we have nothing to do. */
3a3e9e06
VK
1495 policy = cpufreq_cpu_get(cpu);
1496 if (!policy)
e00e56df 1497 return 0;
42d4dc3f 1498
1c3d85dd 1499 if (cpufreq_driver->suspend) {
3a3e9e06 1500 ret = cpufreq_driver->suspend(policy);
ce6c3997 1501 if (ret)
42d4dc3f 1502 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
3a3e9e06 1503 "step on CPU %u\n", policy->cpu);
42d4dc3f
BH
1504 }
1505
3a3e9e06 1506 cpufreq_cpu_put(policy);
c9060494 1507 return ret;
42d4dc3f
BH
1508}
1509
1da177e4 1510/**
e00e56df 1511 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1512 *
1513 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1514 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1515 * restored. It will verify that the current freq is in sync with
1516 * what we believe it to be. This is a bit later than when it
1517 * should be, but nonethteless it's better than calling
1518 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1519 *
1520 * This function is only executed for the boot CPU. The other CPUs have not
1521 * been turned on yet.
1da177e4 1522 */
e00e56df 1523static void cpufreq_bp_resume(void)
1da177e4 1524{
e08f5f5b 1525 int ret = 0;
4bc5d341 1526
e00e56df 1527 int cpu = smp_processor_id();
3a3e9e06 1528 struct cpufreq_policy *policy;
1da177e4 1529
2d06d8c4 1530 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1531
e00e56df 1532 /* If there's no policy for the boot CPU, we have nothing to do. */
3a3e9e06
VK
1533 policy = cpufreq_cpu_get(cpu);
1534 if (!policy)
e00e56df 1535 return;
1da177e4 1536
1c3d85dd 1537 if (cpufreq_driver->resume) {
3a3e9e06 1538 ret = cpufreq_driver->resume(policy);
1da177e4
LT
1539 if (ret) {
1540 printk(KERN_ERR "cpufreq: resume failed in ->resume "
3a3e9e06 1541 "step on CPU %u\n", policy->cpu);
c9060494 1542 goto fail;
1da177e4
LT
1543 }
1544 }
1545
3a3e9e06 1546 schedule_work(&policy->update);
ce6c3997 1547
c9060494 1548fail:
3a3e9e06 1549 cpufreq_cpu_put(policy);
1da177e4
LT
1550}
1551
e00e56df
RW
1552static struct syscore_ops cpufreq_syscore_ops = {
1553 .suspend = cpufreq_bp_suspend,
1554 .resume = cpufreq_bp_resume,
1da177e4
LT
1555};
1556
9d95046e
BP
1557/**
1558 * cpufreq_get_current_driver - return current driver's name
1559 *
1560 * Return the name string of the currently loaded cpufreq driver
1561 * or NULL, if none.
1562 */
1563const char *cpufreq_get_current_driver(void)
1564{
1c3d85dd
RW
1565 if (cpufreq_driver)
1566 return cpufreq_driver->name;
1567
1568 return NULL;
9d95046e
BP
1569}
1570EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1571
1572/*********************************************************************
1573 * NOTIFIER LISTS INTERFACE *
1574 *********************************************************************/
1575
1576/**
1577 * cpufreq_register_notifier - register a driver with cpufreq
1578 * @nb: notifier function to register
1579 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1580 *
32ee8c3e 1581 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1582 * are notified about clock rate changes (once before and once after
1583 * the transition), or a list of drivers that are notified about
1584 * changes in cpufreq policy.
1585 *
1586 * This function may sleep, and has the same return conditions as
e041c683 1587 * blocking_notifier_chain_register.
1da177e4
LT
1588 */
1589int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1590{
1591 int ret;
1592
d5aaffa9
DB
1593 if (cpufreq_disabled())
1594 return -EINVAL;
1595
74212ca4
CEB
1596 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1597
1da177e4
LT
1598 switch (list) {
1599 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1600 ret = srcu_notifier_chain_register(
e041c683 1601 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1602 break;
1603 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1604 ret = blocking_notifier_chain_register(
1605 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1606 break;
1607 default:
1608 ret = -EINVAL;
1609 }
1da177e4
LT
1610
1611 return ret;
1612}
1613EXPORT_SYMBOL(cpufreq_register_notifier);
1614
1da177e4
LT
1615/**
1616 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1617 * @nb: notifier block to be unregistered
bb176f7d 1618 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1da177e4
LT
1619 *
1620 * Remove a driver from the CPU frequency notifier list.
1621 *
1622 * This function may sleep, and has the same return conditions as
e041c683 1623 * blocking_notifier_chain_unregister.
1da177e4
LT
1624 */
1625int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1626{
1627 int ret;
1628
d5aaffa9
DB
1629 if (cpufreq_disabled())
1630 return -EINVAL;
1631
1da177e4
LT
1632 switch (list) {
1633 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1634 ret = srcu_notifier_chain_unregister(
e041c683 1635 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1636 break;
1637 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1638 ret = blocking_notifier_chain_unregister(
1639 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1640 break;
1641 default:
1642 ret = -EINVAL;
1643 }
1da177e4
LT
1644
1645 return ret;
1646}
1647EXPORT_SYMBOL(cpufreq_unregister_notifier);
1648
1649
1650/*********************************************************************
1651 * GOVERNORS *
1652 *********************************************************************/
1653
1da177e4
LT
1654int __cpufreq_driver_target(struct cpufreq_policy *policy,
1655 unsigned int target_freq,
1656 unsigned int relation)
1657{
1658 int retval = -EINVAL;
7249924e 1659 unsigned int old_target_freq = target_freq;
c32b6b8e 1660
a7b422cd
KRW
1661 if (cpufreq_disabled())
1662 return -ENODEV;
1663
7249924e
VK
1664 /* Make sure that target_freq is within supported range */
1665 if (target_freq > policy->max)
1666 target_freq = policy->max;
1667 if (target_freq < policy->min)
1668 target_freq = policy->min;
1669
1670 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1671 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228 1672
9c0ebcf7
VK
1673 /*
1674 * This might look like a redundant call as we are checking it again
1675 * after finding index. But it is left intentionally for cases where
1676 * exactly same freq is called again and so we can save on few function
1677 * calls.
1678 */
5a1c0228
VK
1679 if (target_freq == policy->cur)
1680 return 0;
1681
1c3d85dd
RW
1682 if (cpufreq_driver->target)
1683 retval = cpufreq_driver->target(policy, target_freq, relation);
9c0ebcf7
VK
1684 else if (cpufreq_driver->target_index) {
1685 struct cpufreq_frequency_table *freq_table;
d4019f0a
VK
1686 struct cpufreq_freqs freqs;
1687 bool notify;
9c0ebcf7 1688 int index;
90d45d17 1689
9c0ebcf7
VK
1690 freq_table = cpufreq_frequency_get_table(policy->cpu);
1691 if (unlikely(!freq_table)) {
1692 pr_err("%s: Unable to find freq_table\n", __func__);
1693 goto out;
1694 }
1695
1696 retval = cpufreq_frequency_table_target(policy, freq_table,
1697 target_freq, relation, &index);
1698 if (unlikely(retval)) {
1699 pr_err("%s: Unable to find matching freq\n", __func__);
1700 goto out;
1701 }
1702
d4019f0a 1703 if (freq_table[index].frequency == policy->cur) {
9c0ebcf7 1704 retval = 0;
d4019f0a
VK
1705 goto out;
1706 }
1707
1708 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1709
1710 if (notify) {
1711 freqs.old = policy->cur;
1712 freqs.new = freq_table[index].frequency;
1713 freqs.flags = 0;
1714
1715 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1716 __func__, policy->cpu, freqs.old,
1717 freqs.new);
1718
1719 cpufreq_notify_transition(policy, &freqs,
1720 CPUFREQ_PRECHANGE);
1721 }
1722
1723 retval = cpufreq_driver->target_index(policy, index);
1724 if (retval)
1725 pr_err("%s: Failed to change cpu frequency: %d\n",
1726 __func__, retval);
1727
1728 if (notify) {
1729 /*
1730 * Notify with old freq in case we failed to change
1731 * frequency
1732 */
1733 if (retval)
1734 freqs.new = freqs.old;
1735
1736 cpufreq_notify_transition(policy, &freqs,
1737 CPUFREQ_POSTCHANGE);
1738 }
9c0ebcf7
VK
1739 }
1740
1741out:
1da177e4
LT
1742 return retval;
1743}
1744EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1745
1da177e4
LT
1746int cpufreq_driver_target(struct cpufreq_policy *policy,
1747 unsigned int target_freq,
1748 unsigned int relation)
1749{
f1829e4a 1750 int ret = -EINVAL;
1da177e4 1751
ad7722da 1752 down_write(&policy->rwsem);
1da177e4
LT
1753
1754 ret = __cpufreq_driver_target(policy, target_freq, relation);
1755
ad7722da 1756 up_write(&policy->rwsem);
1da177e4 1757
1da177e4
LT
1758 return ret;
1759}
1760EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1761
153d7f3f 1762/*
153d7f3f
AV
1763 * when "event" is CPUFREQ_GOV_LIMITS
1764 */
1da177e4 1765
e08f5f5b
GS
1766static int __cpufreq_governor(struct cpufreq_policy *policy,
1767 unsigned int event)
1da177e4 1768{
cc993cab 1769 int ret;
6afde10c
TR
1770
1771 /* Only must be defined when default governor is known to have latency
1772 restrictions, like e.g. conservative or ondemand.
1773 That this is the case is already ensured in Kconfig
1774 */
1775#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1776 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1777#else
1778 struct cpufreq_governor *gov = NULL;
1779#endif
1c256245
TR
1780
1781 if (policy->governor->max_transition_latency &&
1782 policy->cpuinfo.transition_latency >
1783 policy->governor->max_transition_latency) {
6afde10c
TR
1784 if (!gov)
1785 return -EINVAL;
1786 else {
1787 printk(KERN_WARNING "%s governor failed, too long"
1788 " transition latency of HW, fallback"
1789 " to %s governor\n",
1790 policy->governor->name,
1791 gov->name);
1792 policy->governor = gov;
1793 }
1c256245 1794 }
1da177e4 1795
fe492f3f
VK
1796 if (event == CPUFREQ_GOV_POLICY_INIT)
1797 if (!try_module_get(policy->governor->owner))
1798 return -EINVAL;
1da177e4 1799
2d06d8c4 1800 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1801 policy->cpu, event);
95731ebb
XC
1802
1803 mutex_lock(&cpufreq_governor_lock);
56d07db2 1804 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
f73d3933
VK
1805 || (!policy->governor_enabled
1806 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
95731ebb
XC
1807 mutex_unlock(&cpufreq_governor_lock);
1808 return -EBUSY;
1809 }
1810
1811 if (event == CPUFREQ_GOV_STOP)
1812 policy->governor_enabled = false;
1813 else if (event == CPUFREQ_GOV_START)
1814 policy->governor_enabled = true;
1815
1816 mutex_unlock(&cpufreq_governor_lock);
1817
1da177e4
LT
1818 ret = policy->governor->governor(policy, event);
1819
4d5dcc42
VK
1820 if (!ret) {
1821 if (event == CPUFREQ_GOV_POLICY_INIT)
1822 policy->governor->initialized++;
1823 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1824 policy->governor->initialized--;
95731ebb
XC
1825 } else {
1826 /* Restore original values */
1827 mutex_lock(&cpufreq_governor_lock);
1828 if (event == CPUFREQ_GOV_STOP)
1829 policy->governor_enabled = true;
1830 else if (event == CPUFREQ_GOV_START)
1831 policy->governor_enabled = false;
1832 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 1833 }
b394058f 1834
fe492f3f
VK
1835 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1836 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1da177e4
LT
1837 module_put(policy->governor->owner);
1838
1839 return ret;
1840}
1841
1da177e4
LT
1842int cpufreq_register_governor(struct cpufreq_governor *governor)
1843{
3bcb09a3 1844 int err;
1da177e4
LT
1845
1846 if (!governor)
1847 return -EINVAL;
1848
a7b422cd
KRW
1849 if (cpufreq_disabled())
1850 return -ENODEV;
1851
3fc54d37 1852 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1853
b394058f 1854 governor->initialized = 0;
3bcb09a3
JF
1855 err = -EBUSY;
1856 if (__find_governor(governor->name) == NULL) {
1857 err = 0;
1858 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1859 }
1da177e4 1860
32ee8c3e 1861 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1862 return err;
1da177e4
LT
1863}
1864EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1865
1da177e4
LT
1866void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1867{
90e41bac
PB
1868#ifdef CONFIG_HOTPLUG_CPU
1869 int cpu;
1870#endif
1871
1da177e4
LT
1872 if (!governor)
1873 return;
1874
a7b422cd
KRW
1875 if (cpufreq_disabled())
1876 return;
1877
90e41bac
PB
1878#ifdef CONFIG_HOTPLUG_CPU
1879 for_each_present_cpu(cpu) {
1880 if (cpu_online(cpu))
1881 continue;
1882 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1883 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1884 }
1885#endif
1886
3fc54d37 1887 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1888 list_del(&governor->governor_list);
3fc54d37 1889 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1890 return;
1891}
1892EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1893
1894
1da177e4
LT
1895/*********************************************************************
1896 * POLICY INTERFACE *
1897 *********************************************************************/
1898
1899/**
1900 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1901 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1902 * is written
1da177e4
LT
1903 *
1904 * Reads the current cpufreq policy.
1905 */
1906int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1907{
1908 struct cpufreq_policy *cpu_policy;
1909 if (!policy)
1910 return -EINVAL;
1911
1912 cpu_policy = cpufreq_cpu_get(cpu);
1913 if (!cpu_policy)
1914 return -EINVAL;
1915
d5b73cd8 1916 memcpy(policy, cpu_policy, sizeof(*policy));
1da177e4
LT
1917
1918 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1919 return 0;
1920}
1921EXPORT_SYMBOL(cpufreq_get_policy);
1922
153d7f3f 1923/*
037ce839
VK
1924 * policy : current policy.
1925 * new_policy: policy to be set.
153d7f3f 1926 */
037ce839 1927static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 1928 struct cpufreq_policy *new_policy)
1da177e4 1929{
7bd353a9 1930 int ret = 0, failed = 1;
1da177e4 1931
3a3e9e06
VK
1932 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1933 new_policy->min, new_policy->max);
1da177e4 1934
d5b73cd8 1935 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1da177e4 1936
3a3e9e06 1937 if (new_policy->min > policy->max || new_policy->max < policy->min) {
9c9a43ed
MD
1938 ret = -EINVAL;
1939 goto error_out;
1940 }
1941
1da177e4 1942 /* verify the cpu speed can be set within this limit */
3a3e9e06 1943 ret = cpufreq_driver->verify(new_policy);
1da177e4
LT
1944 if (ret)
1945 goto error_out;
1946
1da177e4 1947 /* adjust if necessary - all reasons */
e041c683 1948 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1949 CPUFREQ_ADJUST, new_policy);
1da177e4
LT
1950
1951 /* adjust if necessary - hardware incompatibility*/
e041c683 1952 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1953 CPUFREQ_INCOMPATIBLE, new_policy);
1da177e4 1954
bb176f7d
VK
1955 /*
1956 * verify the cpu speed can be set within this limit, which might be
1957 * different to the first one
1958 */
3a3e9e06 1959 ret = cpufreq_driver->verify(new_policy);
e041c683 1960 if (ret)
1da177e4 1961 goto error_out;
1da177e4
LT
1962
1963 /* notification of the new policy */
e041c683 1964 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1965 CPUFREQ_NOTIFY, new_policy);
1da177e4 1966
3a3e9e06
VK
1967 policy->min = new_policy->min;
1968 policy->max = new_policy->max;
1da177e4 1969
2d06d8c4 1970 pr_debug("new min and max freqs are %u - %u kHz\n",
3a3e9e06 1971 policy->min, policy->max);
1da177e4 1972
1c3d85dd 1973 if (cpufreq_driver->setpolicy) {
3a3e9e06 1974 policy->policy = new_policy->policy;
2d06d8c4 1975 pr_debug("setting range\n");
3a3e9e06 1976 ret = cpufreq_driver->setpolicy(new_policy);
1da177e4 1977 } else {
3a3e9e06 1978 if (new_policy->governor != policy->governor) {
1da177e4 1979 /* save old, working values */
3a3e9e06 1980 struct cpufreq_governor *old_gov = policy->governor;
1da177e4 1981
2d06d8c4 1982 pr_debug("governor switch\n");
1da177e4
LT
1983
1984 /* end old governor */
3a3e9e06
VK
1985 if (policy->governor) {
1986 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
ad7722da 1987 up_write(&policy->rwsem);
3a3e9e06 1988 __cpufreq_governor(policy,
7bd353a9 1989 CPUFREQ_GOV_POLICY_EXIT);
ad7722da 1990 down_write(&policy->rwsem);
7bd353a9 1991 }
1da177e4
LT
1992
1993 /* start new governor */
3a3e9e06
VK
1994 policy->governor = new_policy->governor;
1995 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
1996 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
7bd353a9 1997 failed = 0;
955ef483 1998 } else {
ad7722da 1999 up_write(&policy->rwsem);
3a3e9e06 2000 __cpufreq_governor(policy,
7bd353a9 2001 CPUFREQ_GOV_POLICY_EXIT);
ad7722da 2002 down_write(&policy->rwsem);
955ef483 2003 }
7bd353a9
VK
2004 }
2005
2006 if (failed) {
1da177e4 2007 /* new governor failed, so re-start old one */
2d06d8c4 2008 pr_debug("starting governor %s failed\n",
3a3e9e06 2009 policy->governor->name);
1da177e4 2010 if (old_gov) {
3a3e9e06
VK
2011 policy->governor = old_gov;
2012 __cpufreq_governor(policy,
7bd353a9 2013 CPUFREQ_GOV_POLICY_INIT);
3a3e9e06 2014 __cpufreq_governor(policy,
e08f5f5b 2015 CPUFREQ_GOV_START);
1da177e4
LT
2016 }
2017 ret = -EINVAL;
2018 goto error_out;
2019 }
2020 /* might be a policy change, too, so fall through */
2021 }
2d06d8c4 2022 pr_debug("governor: change or update limits\n");
3de9bdeb 2023 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1da177e4
LT
2024 }
2025
7d5e350f 2026error_out:
1da177e4
LT
2027 return ret;
2028}
2029
1da177e4
LT
2030/**
2031 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2032 * @cpu: CPU which shall be re-evaluated
2033 *
25985edc 2034 * Useful for policy notifiers which have different necessities
1da177e4
LT
2035 * at different times.
2036 */
2037int cpufreq_update_policy(unsigned int cpu)
2038{
3a3e9e06
VK
2039 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2040 struct cpufreq_policy new_policy;
f1829e4a 2041 int ret;
1da177e4 2042
3a3e9e06 2043 if (!policy) {
f1829e4a
JL
2044 ret = -ENODEV;
2045 goto no_policy;
2046 }
1da177e4 2047
ad7722da 2048 down_write(&policy->rwsem);
1da177e4 2049
2d06d8c4 2050 pr_debug("updating policy for CPU %u\n", cpu);
d5b73cd8 2051 memcpy(&new_policy, policy, sizeof(*policy));
3a3e9e06
VK
2052 new_policy.min = policy->user_policy.min;
2053 new_policy.max = policy->user_policy.max;
2054 new_policy.policy = policy->user_policy.policy;
2055 new_policy.governor = policy->user_policy.governor;
1da177e4 2056
bb176f7d
VK
2057 /*
2058 * BIOS might change freq behind our back
2059 * -> ask driver for current freq and notify governors about a change
2060 */
1c3d85dd 2061 if (cpufreq_driver->get) {
3a3e9e06
VK
2062 new_policy.cur = cpufreq_driver->get(cpu);
2063 if (!policy->cur) {
2d06d8c4 2064 pr_debug("Driver did not initialize current freq");
3a3e9e06 2065 policy->cur = new_policy.cur;
a85f7bd3 2066 } else {
9c0ebcf7 2067 if (policy->cur != new_policy.cur && has_target())
3a3e9e06
VK
2068 cpufreq_out_of_sync(cpu, policy->cur,
2069 new_policy.cur);
a85f7bd3 2070 }
0961dd0d
TR
2071 }
2072
037ce839 2073 ret = cpufreq_set_policy(policy, &new_policy);
1da177e4 2074
ad7722da 2075 up_write(&policy->rwsem);
5a01f2e8 2076
3a3e9e06 2077 cpufreq_cpu_put(policy);
f1829e4a 2078no_policy:
1da177e4
LT
2079 return ret;
2080}
2081EXPORT_SYMBOL(cpufreq_update_policy);
2082
2760984f 2083static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
2084 unsigned long action, void *hcpu)
2085{
2086 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 2087 struct device *dev;
5302c3fb 2088 bool frozen = false;
c32b6b8e 2089
8a25a2fd
KS
2090 dev = get_cpu_device(cpu);
2091 if (dev) {
5302c3fb 2092
d4faadd5
RW
2093 if (action & CPU_TASKS_FROZEN)
2094 frozen = true;
2095
5302c3fb 2096 switch (action & ~CPU_TASKS_FROZEN) {
c32b6b8e 2097 case CPU_ONLINE:
5302c3fb 2098 __cpufreq_add_dev(dev, NULL, frozen);
23d32899 2099 cpufreq_update_policy(cpu);
c32b6b8e 2100 break;
5302c3fb 2101
c32b6b8e 2102 case CPU_DOWN_PREPARE:
cedb70af 2103 __cpufreq_remove_dev_prepare(dev, NULL, frozen);
1aee40ac
SB
2104 break;
2105
2106 case CPU_POST_DEAD:
cedb70af 2107 __cpufreq_remove_dev_finish(dev, NULL, frozen);
c32b6b8e 2108 break;
5302c3fb 2109
5a01f2e8 2110 case CPU_DOWN_FAILED:
5302c3fb 2111 __cpufreq_add_dev(dev, NULL, frozen);
c32b6b8e
AR
2112 break;
2113 }
2114 }
2115 return NOTIFY_OK;
2116}
2117
9c36f746 2118static struct notifier_block __refdata cpufreq_cpu_notifier = {
bb176f7d 2119 .notifier_call = cpufreq_cpu_callback,
c32b6b8e 2120};
1da177e4
LT
2121
2122/*********************************************************************
2123 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2124 *********************************************************************/
2125
2126/**
2127 * cpufreq_register_driver - register a CPU Frequency driver
2128 * @driver_data: A struct cpufreq_driver containing the values#
2129 * submitted by the CPU Frequency driver.
2130 *
bb176f7d 2131 * Registers a CPU Frequency driver to this core code. This code
1da177e4 2132 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 2133 * (and isn't unregistered in the meantime).
1da177e4
LT
2134 *
2135 */
221dee28 2136int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
2137{
2138 unsigned long flags;
2139 int ret;
2140
a7b422cd
KRW
2141 if (cpufreq_disabled())
2142 return -ENODEV;
2143
1da177e4 2144 if (!driver_data || !driver_data->verify || !driver_data->init ||
9c0ebcf7
VK
2145 !(driver_data->setpolicy || driver_data->target_index ||
2146 driver_data->target))
1da177e4
LT
2147 return -EINVAL;
2148
2d06d8c4 2149 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
2150
2151 if (driver_data->setpolicy)
2152 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2153
0d1857a1 2154 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2155 if (cpufreq_driver) {
0d1857a1 2156 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4dea5806 2157 return -EEXIST;
1da177e4 2158 }
1c3d85dd 2159 cpufreq_driver = driver_data;
0d1857a1 2160 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 2161
8a25a2fd 2162 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
2163 if (ret)
2164 goto err_null_driver;
1da177e4 2165
1c3d85dd 2166 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
2167 int i;
2168 ret = -ENODEV;
2169
2170 /* check for at least one working CPU */
7a6aedfa
MT
2171 for (i = 0; i < nr_cpu_ids; i++)
2172 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 2173 ret = 0;
7a6aedfa
MT
2174 break;
2175 }
1da177e4
LT
2176
2177 /* if all ->init() calls failed, unregister */
2178 if (ret) {
2d06d8c4 2179 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 2180 driver_data->name);
8a25a2fd 2181 goto err_if_unreg;
1da177e4
LT
2182 }
2183 }
2184
8f5bc2ab 2185 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2186 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2187
8f5bc2ab 2188 return 0;
8a25a2fd
KS
2189err_if_unreg:
2190 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 2191err_null_driver:
0d1857a1 2192 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2193 cpufreq_driver = NULL;
0d1857a1 2194 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2195 return ret;
1da177e4
LT
2196}
2197EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2198
1da177e4
LT
2199/**
2200 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2201 *
bb176f7d 2202 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2203 * the right to do so, i.e. if you have succeeded in initialising before!
2204 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2205 * currently not initialised.
2206 */
221dee28 2207int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2208{
2209 unsigned long flags;
2210
1c3d85dd 2211 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2212 return -EINVAL;
1da177e4 2213
2d06d8c4 2214 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2215
8a25a2fd 2216 subsys_interface_unregister(&cpufreq_interface);
65edc68c 2217 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2218
6eed9404 2219 down_write(&cpufreq_rwsem);
0d1857a1 2220 write_lock_irqsave(&cpufreq_driver_lock, flags);
6eed9404 2221
1c3d85dd 2222 cpufreq_driver = NULL;
6eed9404 2223
0d1857a1 2224 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
6eed9404 2225 up_write(&cpufreq_rwsem);
1da177e4
LT
2226
2227 return 0;
2228}
2229EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
2230
2231static int __init cpufreq_core_init(void)
2232{
a7b422cd
KRW
2233 if (cpufreq_disabled())
2234 return -ENODEV;
2235
2361be23 2236 cpufreq_global_kobject = kobject_create();
8aa84ad8 2237 BUG_ON(!cpufreq_global_kobject);
e00e56df 2238 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 2239
5a01f2e8
VP
2240 return 0;
2241}
5a01f2e8 2242core_initcall(cpufreq_core_init);