cpufreq: Mark ARM drivers with CPUFREQ_NEED_INITIAL_FREQ_CHECK flag
[linux-2.6-block.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
bb176f7d 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
1da177e4 7 *
c32b6b8e 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 9 * Added handling for CPU hotplug
8ff69732
DJ
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 12 *
1da177e4
LT
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
1da177e4
LT
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
5ff0a268 20#include <linux/cpu.h>
1da177e4
LT
21#include <linux/cpufreq.h>
22#include <linux/delay.h>
1da177e4 23#include <linux/device.h>
5ff0a268
VK
24#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
3fc54d37 27#include <linux/mutex.h>
5ff0a268 28#include <linux/slab.h>
e00e56df 29#include <linux/syscore_ops.h>
5ff0a268 30#include <linux/tick.h>
6f4f2723
TR
31#include <trace/events/power.h>
32
1da177e4 33/**
cd878479 34 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
37 */
1c3d85dd 38static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 39static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
8414809c 40static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
bb176f7d 41static DEFINE_RWLOCK(cpufreq_driver_lock);
6f1e4efd 42DEFINE_MUTEX(cpufreq_governor_lock);
c88a1f8b 43static LIST_HEAD(cpufreq_policy_list);
bb176f7d 44
084f3493
TR
45#ifdef CONFIG_HOTPLUG_CPU
46/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 47static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 48#endif
1da177e4 49
9c0ebcf7
VK
50static inline bool has_target(void)
51{
52 return cpufreq_driver->target_index || cpufreq_driver->target;
53}
54
6eed9404
VK
55/*
56 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
57 * sections
58 */
59static DECLARE_RWSEM(cpufreq_rwsem);
60
1da177e4 61/* internal prototypes */
29464f28
DJ
62static int __cpufreq_governor(struct cpufreq_policy *policy,
63 unsigned int event);
5a01f2e8 64static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 65static void handle_update(struct work_struct *work);
1da177e4
LT
66
67/**
32ee8c3e
DJ
68 * Two notifier lists: the "policy" list is involved in the
69 * validation process for a new CPU frequency policy; the
1da177e4
LT
70 * "transition" list for kernel code that needs to handle
71 * changes to devices when the CPU clock speed changes.
72 * The mutex locks both lists.
73 */
e041c683 74static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 75static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 76
74212ca4 77static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
78static int __init init_cpufreq_transition_notifier_list(void)
79{
80 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 81 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
82 return 0;
83}
b3438f82 84pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 85
a7b422cd 86static int off __read_mostly;
da584455 87static int cpufreq_disabled(void)
a7b422cd
KRW
88{
89 return off;
90}
91void disable_cpufreq(void)
92{
93 off = 1;
94}
1da177e4 95static LIST_HEAD(cpufreq_governor_list);
29464f28 96static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 97
4d5dcc42
VK
98bool have_governor_per_policy(void)
99{
0b981e70 100 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
4d5dcc42 101}
3f869d6d 102EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 103
944e9a03
VK
104struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
105{
106 if (have_governor_per_policy())
107 return &policy->kobj;
108 else
109 return cpufreq_global_kobject;
110}
111EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
112
72a4ce34
VK
113static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
114{
115 u64 idle_time;
116 u64 cur_wall_time;
117 u64 busy_time;
118
119 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
120
121 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
122 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
123 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
124 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
127
128 idle_time = cur_wall_time - busy_time;
129 if (wall)
130 *wall = cputime_to_usecs(cur_wall_time);
131
132 return cputime_to_usecs(idle_time);
133}
134
135u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
136{
137 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
138
139 if (idle_time == -1ULL)
140 return get_cpu_idle_time_jiffy(cpu, wall);
141 else if (!io_busy)
142 idle_time += get_cpu_iowait_time_us(cpu, wall);
143
144 return idle_time;
145}
146EXPORT_SYMBOL_GPL(get_cpu_idle_time);
147
70e9e778
VK
148/*
149 * This is a generic cpufreq init() routine which can be used by cpufreq
150 * drivers of SMP systems. It will do following:
151 * - validate & show freq table passed
152 * - set policies transition latency
153 * - policy->cpus with all possible CPUs
154 */
155int cpufreq_generic_init(struct cpufreq_policy *policy,
156 struct cpufreq_frequency_table *table,
157 unsigned int transition_latency)
158{
159 int ret;
160
161 ret = cpufreq_table_validate_and_show(policy, table);
162 if (ret) {
163 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
164 return ret;
165 }
166
167 policy->cpuinfo.transition_latency = transition_latency;
168
169 /*
170 * The driver only supports the SMP configuartion where all processors
171 * share the clock and voltage and clock.
172 */
173 cpumask_setall(policy->cpus);
174
175 return 0;
176}
177EXPORT_SYMBOL_GPL(cpufreq_generic_init);
178
6eed9404 179struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4 180{
6eed9404 181 struct cpufreq_policy *policy = NULL;
1da177e4
LT
182 unsigned long flags;
183
6eed9404
VK
184 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
185 return NULL;
186
187 if (!down_read_trylock(&cpufreq_rwsem))
188 return NULL;
1da177e4
LT
189
190 /* get the cpufreq driver */
1c3d85dd 191 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 192
6eed9404
VK
193 if (cpufreq_driver) {
194 /* get the CPU */
195 policy = per_cpu(cpufreq_cpu_data, cpu);
196 if (policy)
197 kobject_get(&policy->kobj);
198 }
1da177e4 199
6eed9404 200 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 201
3a3e9e06 202 if (!policy)
6eed9404 203 up_read(&cpufreq_rwsem);
1da177e4 204
3a3e9e06 205 return policy;
a9144436 206}
1da177e4
LT
207EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
208
3a3e9e06 209void cpufreq_cpu_put(struct cpufreq_policy *policy)
1da177e4 210{
d5aaffa9
DB
211 if (cpufreq_disabled())
212 return;
213
6eed9404
VK
214 kobject_put(&policy->kobj);
215 up_read(&cpufreq_rwsem);
1da177e4
LT
216}
217EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
218
1da177e4
LT
219/*********************************************************************
220 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
221 *********************************************************************/
222
223/**
224 * adjust_jiffies - adjust the system "loops_per_jiffy"
225 *
226 * This function alters the system "loops_per_jiffy" for the clock
227 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 228 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
229 * per-CPU loops_per_jiffy value wherever possible.
230 */
231#ifndef CONFIG_SMP
232static unsigned long l_p_j_ref;
bb176f7d 233static unsigned int l_p_j_ref_freq;
1da177e4 234
858119e1 235static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
236{
237 if (ci->flags & CPUFREQ_CONST_LOOPS)
238 return;
239
240 if (!l_p_j_ref_freq) {
241 l_p_j_ref = loops_per_jiffy;
242 l_p_j_ref_freq = ci->old;
2d06d8c4 243 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 244 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 245 }
bb176f7d 246 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 247 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
248 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
249 ci->new);
2d06d8c4 250 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 251 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
252 }
253}
254#else
e08f5f5b
GS
255static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
256{
257 return;
258}
1da177e4
LT
259#endif
260
0956df9c 261static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb 262 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
263{
264 BUG_ON(irqs_disabled());
265
d5aaffa9
DB
266 if (cpufreq_disabled())
267 return;
268
1c3d85dd 269 freqs->flags = cpufreq_driver->flags;
2d06d8c4 270 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 271 state, freqs->new);
1da177e4 272
1da177e4 273 switch (state) {
e4472cb3 274
1da177e4 275 case CPUFREQ_PRECHANGE:
32ee8c3e 276 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
277 * which is not equal to what the cpufreq core thinks is
278 * "old frequency".
1da177e4 279 */
1c3d85dd 280 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
281 if ((policy) && (policy->cpu == freqs->cpu) &&
282 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 283 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
284 " %u, cpufreq assumed %u kHz.\n",
285 freqs->old, policy->cur);
286 freqs->old = policy->cur;
1da177e4
LT
287 }
288 }
b4dfdbb3 289 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 290 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
291 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
292 break;
e4472cb3 293
1da177e4
LT
294 case CPUFREQ_POSTCHANGE:
295 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 296 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 297 (unsigned long)freqs->cpu);
25e41933 298 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 299 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 300 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
301 if (likely(policy) && likely(policy->cpu == freqs->cpu))
302 policy->cur = freqs->new;
1da177e4
LT
303 break;
304 }
1da177e4 305}
bb176f7d 306
b43a7ffb
VK
307/**
308 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
309 * on frequency transition.
310 *
311 * This function calls the transition notifiers and the "adjust_jiffies"
312 * function. It is called twice on all CPU frequency changes that have
313 * external effects.
314 */
315void cpufreq_notify_transition(struct cpufreq_policy *policy,
316 struct cpufreq_freqs *freqs, unsigned int state)
317{
318 for_each_cpu(freqs->cpu, policy->cpus)
319 __cpufreq_notify_transition(policy, freqs, state);
320}
1da177e4
LT
321EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
322
f7ba3b41
VK
323/* Do post notifications when there are chances that transition has failed */
324void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
325 struct cpufreq_freqs *freqs, int transition_failed)
326{
327 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
328 if (!transition_failed)
329 return;
330
331 swap(freqs->old, freqs->new);
332 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
333 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
334}
335EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition);
336
1da177e4 337
1da177e4
LT
338/*********************************************************************
339 * SYSFS INTERFACE *
340 *********************************************************************/
341
3bcb09a3
JF
342static struct cpufreq_governor *__find_governor(const char *str_governor)
343{
344 struct cpufreq_governor *t;
345
346 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 347 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
348 return t;
349
350 return NULL;
351}
352
1da177e4
LT
353/**
354 * cpufreq_parse_governor - parse a governor string
355 */
905d77cd 356static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
357 struct cpufreq_governor **governor)
358{
3bcb09a3 359 int err = -EINVAL;
1c3d85dd
RW
360
361 if (!cpufreq_driver)
3bcb09a3
JF
362 goto out;
363
1c3d85dd 364 if (cpufreq_driver->setpolicy) {
1da177e4
LT
365 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
366 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 367 err = 0;
e08f5f5b
GS
368 } else if (!strnicmp(str_governor, "powersave",
369 CPUFREQ_NAME_LEN)) {
1da177e4 370 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 371 err = 0;
1da177e4 372 }
9c0ebcf7 373 } else if (has_target()) {
1da177e4 374 struct cpufreq_governor *t;
3bcb09a3 375
3fc54d37 376 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
377
378 t = __find_governor(str_governor);
379
ea714970 380 if (t == NULL) {
1a8e1463 381 int ret;
ea714970 382
1a8e1463
KC
383 mutex_unlock(&cpufreq_governor_mutex);
384 ret = request_module("cpufreq_%s", str_governor);
385 mutex_lock(&cpufreq_governor_mutex);
ea714970 386
1a8e1463
KC
387 if (ret == 0)
388 t = __find_governor(str_governor);
ea714970
JF
389 }
390
3bcb09a3
JF
391 if (t != NULL) {
392 *governor = t;
393 err = 0;
1da177e4 394 }
3bcb09a3 395
3fc54d37 396 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 397 }
29464f28 398out:
3bcb09a3 399 return err;
1da177e4 400}
1da177e4 401
1da177e4 402/**
e08f5f5b
GS
403 * cpufreq_per_cpu_attr_read() / show_##file_name() -
404 * print out cpufreq information
1da177e4
LT
405 *
406 * Write out information from cpufreq_driver->policy[cpu]; object must be
407 * "unsigned int".
408 */
409
32ee8c3e
DJ
410#define show_one(file_name, object) \
411static ssize_t show_##file_name \
905d77cd 412(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 413{ \
29464f28 414 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
415}
416
417show_one(cpuinfo_min_freq, cpuinfo.min_freq);
418show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 419show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
420show_one(scaling_min_freq, min);
421show_one(scaling_max_freq, max);
422show_one(scaling_cur_freq, cur);
423
037ce839 424static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 425 struct cpufreq_policy *new_policy);
7970e08b 426
1da177e4
LT
427/**
428 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
429 */
430#define store_one(file_name, object) \
431static ssize_t store_##file_name \
905d77cd 432(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 433{ \
5136fa56 434 int ret; \
1da177e4
LT
435 struct cpufreq_policy new_policy; \
436 \
437 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
438 if (ret) \
439 return -EINVAL; \
440 \
29464f28 441 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
442 if (ret != 1) \
443 return -EINVAL; \
444 \
037ce839 445 ret = cpufreq_set_policy(policy, &new_policy); \
7970e08b 446 policy->user_policy.object = policy->object; \
1da177e4
LT
447 \
448 return ret ? ret : count; \
449}
450
29464f28
DJ
451store_one(scaling_min_freq, min);
452store_one(scaling_max_freq, max);
1da177e4
LT
453
454/**
455 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
456 */
905d77cd
DJ
457static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
458 char *buf)
1da177e4 459{
5a01f2e8 460 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
461 if (!cur_freq)
462 return sprintf(buf, "<unknown>");
463 return sprintf(buf, "%u\n", cur_freq);
464}
465
1da177e4
LT
466/**
467 * show_scaling_governor - show the current policy for the specified CPU
468 */
905d77cd 469static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 470{
29464f28 471 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
472 return sprintf(buf, "powersave\n");
473 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
474 return sprintf(buf, "performance\n");
475 else if (policy->governor)
4b972f0b 476 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 477 policy->governor->name);
1da177e4
LT
478 return -EINVAL;
479}
480
1da177e4
LT
481/**
482 * store_scaling_governor - store policy for the specified CPU
483 */
905d77cd
DJ
484static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
485 const char *buf, size_t count)
1da177e4 486{
5136fa56 487 int ret;
1da177e4
LT
488 char str_governor[16];
489 struct cpufreq_policy new_policy;
490
491 ret = cpufreq_get_policy(&new_policy, policy->cpu);
492 if (ret)
493 return ret;
494
29464f28 495 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
496 if (ret != 1)
497 return -EINVAL;
498
e08f5f5b
GS
499 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
500 &new_policy.governor))
1da177e4
LT
501 return -EINVAL;
502
037ce839 503 ret = cpufreq_set_policy(policy, &new_policy);
7970e08b
TR
504
505 policy->user_policy.policy = policy->policy;
506 policy->user_policy.governor = policy->governor;
7970e08b 507
e08f5f5b
GS
508 if (ret)
509 return ret;
510 else
511 return count;
1da177e4
LT
512}
513
514/**
515 * show_scaling_driver - show the cpufreq driver currently loaded
516 */
905d77cd 517static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 518{
1c3d85dd 519 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
520}
521
522/**
523 * show_scaling_available_governors - show the available CPUfreq governors
524 */
905d77cd
DJ
525static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
526 char *buf)
1da177e4
LT
527{
528 ssize_t i = 0;
529 struct cpufreq_governor *t;
530
9c0ebcf7 531 if (!has_target()) {
1da177e4
LT
532 i += sprintf(buf, "performance powersave");
533 goto out;
534 }
535
536 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
537 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
538 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 539 goto out;
4b972f0b 540 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 541 }
7d5e350f 542out:
1da177e4
LT
543 i += sprintf(&buf[i], "\n");
544 return i;
545}
e8628dd0 546
f4fd3797 547ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
548{
549 ssize_t i = 0;
550 unsigned int cpu;
551
835481d9 552 for_each_cpu(cpu, mask) {
1da177e4
LT
553 if (i)
554 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
555 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
556 if (i >= (PAGE_SIZE - 5))
29464f28 557 break;
1da177e4
LT
558 }
559 i += sprintf(&buf[i], "\n");
560 return i;
561}
f4fd3797 562EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
1da177e4 563
e8628dd0
DW
564/**
565 * show_related_cpus - show the CPUs affected by each transition even if
566 * hw coordination is in use
567 */
568static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
569{
f4fd3797 570 return cpufreq_show_cpus(policy->related_cpus, buf);
e8628dd0
DW
571}
572
573/**
574 * show_affected_cpus - show the CPUs affected by each transition
575 */
576static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
577{
f4fd3797 578 return cpufreq_show_cpus(policy->cpus, buf);
e8628dd0
DW
579}
580
9e76988e 581static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 582 const char *buf, size_t count)
9e76988e
VP
583{
584 unsigned int freq = 0;
585 unsigned int ret;
586
879000f9 587 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
588 return -EINVAL;
589
590 ret = sscanf(buf, "%u", &freq);
591 if (ret != 1)
592 return -EINVAL;
593
594 policy->governor->store_setspeed(policy, freq);
595
596 return count;
597}
598
599static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
600{
879000f9 601 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
602 return sprintf(buf, "<unsupported>\n");
603
604 return policy->governor->show_setspeed(policy, buf);
605}
1da177e4 606
e2f74f35 607/**
8bf1ac72 608 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
609 */
610static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
611{
612 unsigned int limit;
613 int ret;
1c3d85dd
RW
614 if (cpufreq_driver->bios_limit) {
615 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
616 if (!ret)
617 return sprintf(buf, "%u\n", limit);
618 }
619 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
620}
621
6dad2a29
BP
622cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
623cpufreq_freq_attr_ro(cpuinfo_min_freq);
624cpufreq_freq_attr_ro(cpuinfo_max_freq);
625cpufreq_freq_attr_ro(cpuinfo_transition_latency);
626cpufreq_freq_attr_ro(scaling_available_governors);
627cpufreq_freq_attr_ro(scaling_driver);
628cpufreq_freq_attr_ro(scaling_cur_freq);
629cpufreq_freq_attr_ro(bios_limit);
630cpufreq_freq_attr_ro(related_cpus);
631cpufreq_freq_attr_ro(affected_cpus);
632cpufreq_freq_attr_rw(scaling_min_freq);
633cpufreq_freq_attr_rw(scaling_max_freq);
634cpufreq_freq_attr_rw(scaling_governor);
635cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 636
905d77cd 637static struct attribute *default_attrs[] = {
1da177e4
LT
638 &cpuinfo_min_freq.attr,
639 &cpuinfo_max_freq.attr,
ed129784 640 &cpuinfo_transition_latency.attr,
1da177e4
LT
641 &scaling_min_freq.attr,
642 &scaling_max_freq.attr,
643 &affected_cpus.attr,
e8628dd0 644 &related_cpus.attr,
1da177e4
LT
645 &scaling_governor.attr,
646 &scaling_driver.attr,
647 &scaling_available_governors.attr,
9e76988e 648 &scaling_setspeed.attr,
1da177e4
LT
649 NULL
650};
651
29464f28
DJ
652#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
653#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 654
29464f28 655static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 656{
905d77cd
DJ
657 struct cpufreq_policy *policy = to_policy(kobj);
658 struct freq_attr *fattr = to_attr(attr);
1b750e3b 659 ssize_t ret;
6eed9404
VK
660
661 if (!down_read_trylock(&cpufreq_rwsem))
1b750e3b 662 return -EINVAL;
5a01f2e8 663
ad7722da 664 down_read(&policy->rwsem);
5a01f2e8 665
e08f5f5b
GS
666 if (fattr->show)
667 ret = fattr->show(policy, buf);
668 else
669 ret = -EIO;
670
ad7722da 671 up_read(&policy->rwsem);
6eed9404 672 up_read(&cpufreq_rwsem);
1b750e3b 673
1da177e4
LT
674 return ret;
675}
676
905d77cd
DJ
677static ssize_t store(struct kobject *kobj, struct attribute *attr,
678 const char *buf, size_t count)
1da177e4 679{
905d77cd
DJ
680 struct cpufreq_policy *policy = to_policy(kobj);
681 struct freq_attr *fattr = to_attr(attr);
a07530b4 682 ssize_t ret = -EINVAL;
6eed9404 683
4f750c93
SB
684 get_online_cpus();
685
686 if (!cpu_online(policy->cpu))
687 goto unlock;
688
6eed9404 689 if (!down_read_trylock(&cpufreq_rwsem))
4f750c93 690 goto unlock;
5a01f2e8 691
ad7722da 692 down_write(&policy->rwsem);
5a01f2e8 693
e08f5f5b
GS
694 if (fattr->store)
695 ret = fattr->store(policy, buf, count);
696 else
697 ret = -EIO;
698
ad7722da 699 up_write(&policy->rwsem);
6eed9404 700
6eed9404 701 up_read(&cpufreq_rwsem);
4f750c93
SB
702unlock:
703 put_online_cpus();
704
1da177e4
LT
705 return ret;
706}
707
905d77cd 708static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 709{
905d77cd 710 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 711 pr_debug("last reference is dropped\n");
1da177e4
LT
712 complete(&policy->kobj_unregister);
713}
714
52cf25d0 715static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
716 .show = show,
717 .store = store,
718};
719
720static struct kobj_type ktype_cpufreq = {
721 .sysfs_ops = &sysfs_ops,
722 .default_attrs = default_attrs,
723 .release = cpufreq_sysfs_release,
724};
725
2361be23
VK
726struct kobject *cpufreq_global_kobject;
727EXPORT_SYMBOL(cpufreq_global_kobject);
728
729static int cpufreq_global_kobject_usage;
730
731int cpufreq_get_global_kobject(void)
732{
733 if (!cpufreq_global_kobject_usage++)
734 return kobject_add(cpufreq_global_kobject,
735 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
736
737 return 0;
738}
739EXPORT_SYMBOL(cpufreq_get_global_kobject);
740
741void cpufreq_put_global_kobject(void)
742{
743 if (!--cpufreq_global_kobject_usage)
744 kobject_del(cpufreq_global_kobject);
745}
746EXPORT_SYMBOL(cpufreq_put_global_kobject);
747
748int cpufreq_sysfs_create_file(const struct attribute *attr)
749{
750 int ret = cpufreq_get_global_kobject();
751
752 if (!ret) {
753 ret = sysfs_create_file(cpufreq_global_kobject, attr);
754 if (ret)
755 cpufreq_put_global_kobject();
756 }
757
758 return ret;
759}
760EXPORT_SYMBOL(cpufreq_sysfs_create_file);
761
762void cpufreq_sysfs_remove_file(const struct attribute *attr)
763{
764 sysfs_remove_file(cpufreq_global_kobject, attr);
765 cpufreq_put_global_kobject();
766}
767EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
768
19d6f7ec 769/* symlink affected CPUs */
308b60e7 770static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
19d6f7ec
DJ
771{
772 unsigned int j;
773 int ret = 0;
774
775 for_each_cpu(j, policy->cpus) {
8a25a2fd 776 struct device *cpu_dev;
19d6f7ec 777
308b60e7 778 if (j == policy->cpu)
19d6f7ec 779 continue;
19d6f7ec 780
e8fdde10 781 pr_debug("Adding link for CPU: %u\n", j);
8a25a2fd
KS
782 cpu_dev = get_cpu_device(j);
783 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec 784 "cpufreq");
71c3461e
RW
785 if (ret)
786 break;
19d6f7ec
DJ
787 }
788 return ret;
789}
790
308b60e7 791static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
8a25a2fd 792 struct device *dev)
909a694e
DJ
793{
794 struct freq_attr **drv_attr;
909a694e 795 int ret = 0;
909a694e
DJ
796
797 /* prepare interface data */
798 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 799 &dev->kobj, "cpufreq");
909a694e
DJ
800 if (ret)
801 return ret;
802
803 /* set up files for this cpu device */
1c3d85dd 804 drv_attr = cpufreq_driver->attr;
909a694e
DJ
805 while ((drv_attr) && (*drv_attr)) {
806 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
807 if (ret)
1c3d85dd 808 goto err_out_kobj_put;
909a694e
DJ
809 drv_attr++;
810 }
1c3d85dd 811 if (cpufreq_driver->get) {
909a694e
DJ
812 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
813 if (ret)
1c3d85dd 814 goto err_out_kobj_put;
909a694e 815 }
9c0ebcf7 816 if (has_target()) {
909a694e
DJ
817 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
818 if (ret)
1c3d85dd 819 goto err_out_kobj_put;
909a694e 820 }
1c3d85dd 821 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
822 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
823 if (ret)
1c3d85dd 824 goto err_out_kobj_put;
e2f74f35 825 }
909a694e 826
308b60e7 827 ret = cpufreq_add_dev_symlink(policy);
ecf7e461
DJ
828 if (ret)
829 goto err_out_kobj_put;
830
e18f1682
SB
831 return ret;
832
833err_out_kobj_put:
834 kobject_put(&policy->kobj);
835 wait_for_completion(&policy->kobj_unregister);
836 return ret;
837}
838
839static void cpufreq_init_policy(struct cpufreq_policy *policy)
840{
841 struct cpufreq_policy new_policy;
842 int ret = 0;
843
d5b73cd8 844 memcpy(&new_policy, policy, sizeof(*policy));
a27a9ab7
JB
845
846 /* Use the default policy if its valid. */
847 if (cpufreq_driver->setpolicy)
848 cpufreq_parse_governor(policy->governor->name,
849 &new_policy.policy, NULL);
850
037ce839 851 /* assure that the starting sequence is run in cpufreq_set_policy */
ecf7e461
DJ
852 policy->governor = NULL;
853
854 /* set default policy */
037ce839 855 ret = cpufreq_set_policy(policy, &new_policy);
ecf7e461 856 if (ret) {
2d06d8c4 857 pr_debug("setting policy failed\n");
1c3d85dd
RW
858 if (cpufreq_driver->exit)
859 cpufreq_driver->exit(policy);
ecf7e461 860 }
909a694e
DJ
861}
862
fcf80582 863#ifdef CONFIG_HOTPLUG_CPU
d8d3b471 864static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
42f921a6 865 unsigned int cpu, struct device *dev)
fcf80582 866{
9c0ebcf7 867 int ret = 0;
fcf80582
VK
868 unsigned long flags;
869
9c0ebcf7 870 if (has_target()) {
3de9bdeb
VK
871 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
872 if (ret) {
873 pr_err("%s: Failed to stop governor\n", __func__);
874 return ret;
875 }
876 }
fcf80582 877
ad7722da 878 down_write(&policy->rwsem);
2eaa3e2d 879
0d1857a1 880 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 881
fcf80582
VK
882 cpumask_set_cpu(cpu, policy->cpus);
883 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 884 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 885
ad7722da 886 up_write(&policy->rwsem);
2eaa3e2d 887
9c0ebcf7 888 if (has_target()) {
3de9bdeb
VK
889 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
890 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
891 pr_err("%s: Failed to start governor\n", __func__);
892 return ret;
893 }
820c6ca2 894 }
fcf80582 895
42f921a6 896 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
fcf80582
VK
897}
898#endif
1da177e4 899
8414809c
SB
900static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
901{
902 struct cpufreq_policy *policy;
903 unsigned long flags;
904
44871c9c 905 read_lock_irqsave(&cpufreq_driver_lock, flags);
8414809c
SB
906
907 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
908
44871c9c 909 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
8414809c
SB
910
911 return policy;
912}
913
e9698cc5
SB
914static struct cpufreq_policy *cpufreq_policy_alloc(void)
915{
916 struct cpufreq_policy *policy;
917
918 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
919 if (!policy)
920 return NULL;
921
922 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
923 goto err_free_policy;
924
925 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
926 goto err_free_cpumask;
927
c88a1f8b 928 INIT_LIST_HEAD(&policy->policy_list);
ad7722da 929 init_rwsem(&policy->rwsem);
930
e9698cc5
SB
931 return policy;
932
933err_free_cpumask:
934 free_cpumask_var(policy->cpus);
935err_free_policy:
936 kfree(policy);
937
938 return NULL;
939}
940
42f921a6
VK
941static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
942{
943 struct kobject *kobj;
944 struct completion *cmp;
945
946 down_read(&policy->rwsem);
947 kobj = &policy->kobj;
948 cmp = &policy->kobj_unregister;
949 up_read(&policy->rwsem);
950 kobject_put(kobj);
951
952 /*
953 * We need to make sure that the underlying kobj is
954 * actually not referenced anymore by anybody before we
955 * proceed with unloading.
956 */
957 pr_debug("waiting for dropping of refcount\n");
958 wait_for_completion(cmp);
959 pr_debug("wait complete\n");
960}
961
e9698cc5
SB
962static void cpufreq_policy_free(struct cpufreq_policy *policy)
963{
964 free_cpumask_var(policy->related_cpus);
965 free_cpumask_var(policy->cpus);
966 kfree(policy);
967}
968
0d66b91e
SB
969static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
970{
99ec899e 971 if (WARN_ON(cpu == policy->cpu))
cb38ed5c
SB
972 return;
973
ad7722da 974 down_write(&policy->rwsem);
8efd5765 975
0d66b91e
SB
976 policy->last_cpu = policy->cpu;
977 policy->cpu = cpu;
978
ad7722da 979 up_write(&policy->rwsem);
8efd5765 980
0d66b91e 981 cpufreq_frequency_table_update_policy_cpu(policy);
0d66b91e
SB
982 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
983 CPUFREQ_UPDATE_POLICY_CPU, policy);
984}
985
a82fab29
SB
986static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
987 bool frozen)
1da177e4 988{
fcf80582 989 unsigned int j, cpu = dev->id;
65922465 990 int ret = -ENOMEM;
1da177e4 991 struct cpufreq_policy *policy;
1da177e4 992 unsigned long flags;
90e41bac 993#ifdef CONFIG_HOTPLUG_CPU
1b274294 994 struct cpufreq_policy *tpolicy;
fcf80582 995 struct cpufreq_governor *gov;
90e41bac 996#endif
1da177e4 997
c32b6b8e
AR
998 if (cpu_is_offline(cpu))
999 return 0;
1000
2d06d8c4 1001 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
1002
1003#ifdef CONFIG_SMP
1004 /* check whether a different CPU already registered this
1005 * CPU because it is in the same boat. */
1006 policy = cpufreq_cpu_get(cpu);
1007 if (unlikely(policy)) {
8ff69732 1008 cpufreq_cpu_put(policy);
1da177e4
LT
1009 return 0;
1010 }
5025d628 1011#endif
fcf80582 1012
6eed9404
VK
1013 if (!down_read_trylock(&cpufreq_rwsem))
1014 return 0;
1015
fcf80582
VK
1016#ifdef CONFIG_HOTPLUG_CPU
1017 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 1018 read_lock_irqsave(&cpufreq_driver_lock, flags);
1b274294
VK
1019 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1020 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
0d1857a1 1021 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
42f921a6 1022 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
6eed9404
VK
1023 up_read(&cpufreq_rwsem);
1024 return ret;
2eaa3e2d 1025 }
fcf80582 1026 }
0d1857a1 1027 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1028#endif
1029
72368d12
RW
1030 /*
1031 * Restore the saved policy when doing light-weight init and fall back
1032 * to the full init if that fails.
1033 */
1034 policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
1035 if (!policy) {
1036 frozen = false;
8414809c 1037 policy = cpufreq_policy_alloc();
72368d12
RW
1038 if (!policy)
1039 goto nomem_out;
1040 }
0d66b91e
SB
1041
1042 /*
1043 * In the resume path, since we restore a saved policy, the assignment
1044 * to policy->cpu is like an update of the existing policy, rather than
1045 * the creation of a brand new one. So we need to perform this update
1046 * by invoking update_policy_cpu().
1047 */
1048 if (frozen && cpu != policy->cpu)
1049 update_policy_cpu(policy, cpu);
1050 else
1051 policy->cpu = cpu;
1052
65922465 1053 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 1054 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1055
1da177e4 1056 init_completion(&policy->kobj_unregister);
65f27f38 1057 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
1058
1059 /* call driver. From then on the cpufreq must be able
1060 * to accept all calls to ->verify and ->setpolicy for this CPU
1061 */
1c3d85dd 1062 ret = cpufreq_driver->init(policy);
1da177e4 1063 if (ret) {
2d06d8c4 1064 pr_debug("initialization failed\n");
2eaa3e2d 1065 goto err_set_policy_cpu;
1da177e4 1066 }
643ae6e8 1067
da60ce9f
VK
1068 if (cpufreq_driver->get) {
1069 policy->cur = cpufreq_driver->get(policy->cpu);
1070 if (!policy->cur) {
1071 pr_err("%s: ->get() failed\n", __func__);
1072 goto err_get_freq;
1073 }
1074 }
1075
fcf80582
VK
1076 /* related cpus should atleast have policy->cpus */
1077 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1078
643ae6e8
VK
1079 /*
1080 * affected cpus must always be the one, which are online. We aren't
1081 * managing offline cpus here.
1082 */
1083 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1084
08fd8c1c
VK
1085 if (!frozen) {
1086 policy->user_policy.min = policy->min;
1087 policy->user_policy.max = policy->max;
1088 }
1da177e4 1089
a1531acd
TR
1090 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1091 CPUFREQ_START, policy);
1092
fcf80582
VK
1093#ifdef CONFIG_HOTPLUG_CPU
1094 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1095 if (gov) {
1096 policy->governor = gov;
1097 pr_debug("Restoring governor %s for cpu %d\n",
1098 policy->governor->name, cpu);
4bfa042c 1099 }
fcf80582 1100#endif
1da177e4 1101
e18f1682 1102 write_lock_irqsave(&cpufreq_driver_lock, flags);
474deff7 1103 for_each_cpu(j, policy->cpus)
e18f1682 1104 per_cpu(cpufreq_cpu_data, j) = policy;
e18f1682
SB
1105 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1106
a82fab29 1107 if (!frozen) {
308b60e7 1108 ret = cpufreq_add_dev_interface(policy, dev);
a82fab29
SB
1109 if (ret)
1110 goto err_out_unregister;
1111 }
8ff69732 1112
9515f4d6
VK
1113 write_lock_irqsave(&cpufreq_driver_lock, flags);
1114 list_add(&policy->policy_list, &cpufreq_policy_list);
1115 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1116
e18f1682
SB
1117 cpufreq_init_policy(policy);
1118
08fd8c1c
VK
1119 if (!frozen) {
1120 policy->user_policy.policy = policy->policy;
1121 policy->user_policy.governor = policy->governor;
1122 }
1123
038c5b3e 1124 kobject_uevent(&policy->kobj, KOBJ_ADD);
6eed9404
VK
1125 up_read(&cpufreq_rwsem);
1126
2d06d8c4 1127 pr_debug("initialization complete\n");
87c32271 1128
1da177e4
LT
1129 return 0;
1130
1da177e4 1131err_out_unregister:
0d1857a1 1132 write_lock_irqsave(&cpufreq_driver_lock, flags);
474deff7 1133 for_each_cpu(j, policy->cpus)
7a6aedfa 1134 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 1135 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1136
da60ce9f
VK
1137err_get_freq:
1138 if (cpufreq_driver->exit)
1139 cpufreq_driver->exit(policy);
2eaa3e2d 1140err_set_policy_cpu:
72368d12
RW
1141 if (frozen) {
1142 /* Do not leave stale fallback data behind. */
1143 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
42f921a6 1144 cpufreq_policy_put_kobj(policy);
72368d12 1145 }
e9698cc5 1146 cpufreq_policy_free(policy);
42f921a6 1147
1da177e4 1148nomem_out:
6eed9404
VK
1149 up_read(&cpufreq_rwsem);
1150
1da177e4
LT
1151 return ret;
1152}
1153
a82fab29
SB
1154/**
1155 * cpufreq_add_dev - add a CPU device
1156 *
1157 * Adds the cpufreq interface for a CPU device.
1158 *
1159 * The Oracle says: try running cpufreq registration/unregistration concurrently
1160 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1161 * mess up, but more thorough testing is needed. - Mathieu
1162 */
1163static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1164{
1165 return __cpufreq_add_dev(dev, sif, false);
1166}
1167
3a3e9e06 1168static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
42f921a6 1169 unsigned int old_cpu)
f9ba680d
SB
1170{
1171 struct device *cpu_dev;
f9ba680d
SB
1172 int ret;
1173
1174 /* first sibling now owns the new sysfs dir */
9c8f1ee4 1175 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
a82fab29 1176
f9ba680d 1177 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
3a3e9e06 1178 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
f9ba680d
SB
1179 if (ret) {
1180 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1181
ad7722da 1182 down_write(&policy->rwsem);
3a3e9e06 1183 cpumask_set_cpu(old_cpu, policy->cpus);
ad7722da 1184 up_write(&policy->rwsem);
f9ba680d 1185
3a3e9e06 1186 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
f9ba680d
SB
1187 "cpufreq");
1188
1189 return -EINVAL;
1190 }
1191
1192 return cpu_dev->id;
1193}
1194
cedb70af
SB
1195static int __cpufreq_remove_dev_prepare(struct device *dev,
1196 struct subsys_interface *sif,
1197 bool frozen)
1da177e4 1198{
f9ba680d 1199 unsigned int cpu = dev->id, cpus;
3de9bdeb 1200 int new_cpu, ret;
1da177e4 1201 unsigned long flags;
3a3e9e06 1202 struct cpufreq_policy *policy;
1da177e4 1203
b8eed8af 1204 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1205
0d1857a1 1206 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1207
3a3e9e06 1208 policy = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d 1209
8414809c
SB
1210 /* Save the policy somewhere when doing a light-weight tear-down */
1211 if (frozen)
3a3e9e06 1212 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
8414809c 1213
0d1857a1 1214 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1215
3a3e9e06 1216 if (!policy) {
b8eed8af 1217 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1218 return -EINVAL;
1219 }
1da177e4 1220
9c0ebcf7 1221 if (has_target()) {
3de9bdeb
VK
1222 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1223 if (ret) {
1224 pr_err("%s: Failed to stop governor\n", __func__);
1225 return ret;
1226 }
1227 }
1da177e4 1228
084f3493 1229#ifdef CONFIG_HOTPLUG_CPU
1c3d85dd 1230 if (!cpufreq_driver->setpolicy)
fa69e33f 1231 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
3a3e9e06 1232 policy->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1233#endif
1234
ad7722da 1235 down_read(&policy->rwsem);
3a3e9e06 1236 cpus = cpumask_weight(policy->cpus);
ad7722da 1237 up_read(&policy->rwsem);
084f3493 1238
61173f25
SB
1239 if (cpu != policy->cpu) {
1240 if (!frozen)
1241 sysfs_remove_link(&dev->kobj, "cpufreq");
73bf0fc2 1242 } else if (cpus > 1) {
42f921a6 1243 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
f9ba680d 1244 if (new_cpu >= 0) {
3a3e9e06 1245 update_policy_cpu(policy, new_cpu);
a82fab29
SB
1246
1247 if (!frozen) {
75949c9a
VK
1248 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1249 __func__, new_cpu, cpu);
a82fab29 1250 }
1da177e4
LT
1251 }
1252 }
1da177e4 1253
cedb70af
SB
1254 return 0;
1255}
1256
1257static int __cpufreq_remove_dev_finish(struct device *dev,
1258 struct subsys_interface *sif,
1259 bool frozen)
1260{
1261 unsigned int cpu = dev->id, cpus;
1262 int ret;
1263 unsigned long flags;
1264 struct cpufreq_policy *policy;
cedb70af
SB
1265
1266 read_lock_irqsave(&cpufreq_driver_lock, flags);
1267 policy = per_cpu(cpufreq_cpu_data, cpu);
1268 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1269
1270 if (!policy) {
1271 pr_debug("%s: No cpu_data found\n", __func__);
1272 return -EINVAL;
1273 }
1274
ad7722da 1275 down_write(&policy->rwsem);
cedb70af 1276 cpus = cpumask_weight(policy->cpus);
9c8f1ee4
VK
1277
1278 if (cpus > 1)
1279 cpumask_clear_cpu(cpu, policy->cpus);
ad7722da 1280 up_write(&policy->rwsem);
cedb70af 1281
b8eed8af
VK
1282 /* If cpu is last user of policy, free policy */
1283 if (cpus == 1) {
9c0ebcf7 1284 if (has_target()) {
3de9bdeb
VK
1285 ret = __cpufreq_governor(policy,
1286 CPUFREQ_GOV_POLICY_EXIT);
1287 if (ret) {
1288 pr_err("%s: Failed to exit governor\n",
1289 __func__);
1290 return ret;
1291 }
edab2fbc 1292 }
2a998599 1293
42f921a6
VK
1294 if (!frozen)
1295 cpufreq_policy_put_kobj(policy);
7d26e2d5 1296
8414809c
SB
1297 /*
1298 * Perform the ->exit() even during light-weight tear-down,
1299 * since this is a core component, and is essential for the
1300 * subsequent light-weight ->init() to succeed.
b8eed8af 1301 */
1c3d85dd 1302 if (cpufreq_driver->exit)
3a3e9e06 1303 cpufreq_driver->exit(policy);
27ecddc2 1304
9515f4d6
VK
1305 /* Remove policy from list of active policies */
1306 write_lock_irqsave(&cpufreq_driver_lock, flags);
1307 list_del(&policy->policy_list);
1308 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1309
8414809c 1310 if (!frozen)
3a3e9e06 1311 cpufreq_policy_free(policy);
2a998599 1312 } else {
9c0ebcf7 1313 if (has_target()) {
3de9bdeb
VK
1314 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1315 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1316 pr_err("%s: Failed to start governor\n",
1317 __func__);
1318 return ret;
1319 }
2a998599 1320 }
27ecddc2 1321 }
1da177e4 1322
474deff7 1323 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1da177e4
LT
1324 return 0;
1325}
1326
cedb70af 1327/**
27a862e9 1328 * cpufreq_remove_dev - remove a CPU device
cedb70af
SB
1329 *
1330 * Removes the cpufreq interface for a CPU device.
cedb70af 1331 */
8a25a2fd 1332static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1333{
8a25a2fd 1334 unsigned int cpu = dev->id;
27a862e9 1335 int ret;
ec28297a
VP
1336
1337 if (cpu_is_offline(cpu))
1338 return 0;
1339
27a862e9
VK
1340 ret = __cpufreq_remove_dev_prepare(dev, sif, false);
1341
1342 if (!ret)
1343 ret = __cpufreq_remove_dev_finish(dev, sif, false);
1344
1345 return ret;
5a01f2e8
VP
1346}
1347
65f27f38 1348static void handle_update(struct work_struct *work)
1da177e4 1349{
65f27f38
DH
1350 struct cpufreq_policy *policy =
1351 container_of(work, struct cpufreq_policy, update);
1352 unsigned int cpu = policy->cpu;
2d06d8c4 1353 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1354 cpufreq_update_policy(cpu);
1355}
1356
1357/**
bb176f7d
VK
1358 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1359 * in deep trouble.
1da177e4
LT
1360 * @cpu: cpu number
1361 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1362 * @new_freq: CPU frequency the CPU actually runs at
1363 *
29464f28
DJ
1364 * We adjust to current frequency first, and need to clean up later.
1365 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1366 */
e08f5f5b
GS
1367static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1368 unsigned int new_freq)
1da177e4 1369{
b43a7ffb 1370 struct cpufreq_policy *policy;
1da177e4 1371 struct cpufreq_freqs freqs;
b43a7ffb
VK
1372 unsigned long flags;
1373
2d06d8c4 1374 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1375 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1376
1da177e4
LT
1377 freqs.old = old_freq;
1378 freqs.new = new_freq;
b43a7ffb
VK
1379
1380 read_lock_irqsave(&cpufreq_driver_lock, flags);
1381 policy = per_cpu(cpufreq_cpu_data, cpu);
1382 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1383
1384 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1385 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1386}
1387
32ee8c3e 1388/**
4ab70df4 1389 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1390 * @cpu: CPU number
1391 *
1392 * This is the last known freq, without actually getting it from the driver.
1393 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1394 */
1395unsigned int cpufreq_quick_get(unsigned int cpu)
1396{
9e21ba8b 1397 struct cpufreq_policy *policy;
e08f5f5b 1398 unsigned int ret_freq = 0;
95235ca2 1399
1c3d85dd
RW
1400 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1401 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1402
1403 policy = cpufreq_cpu_get(cpu);
95235ca2 1404 if (policy) {
e08f5f5b 1405 ret_freq = policy->cur;
95235ca2
VP
1406 cpufreq_cpu_put(policy);
1407 }
1408
4d34a67d 1409 return ret_freq;
95235ca2
VP
1410}
1411EXPORT_SYMBOL(cpufreq_quick_get);
1412
3d737108
JB
1413/**
1414 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1415 * @cpu: CPU number
1416 *
1417 * Just return the max possible frequency for a given CPU.
1418 */
1419unsigned int cpufreq_quick_get_max(unsigned int cpu)
1420{
1421 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1422 unsigned int ret_freq = 0;
1423
1424 if (policy) {
1425 ret_freq = policy->max;
1426 cpufreq_cpu_put(policy);
1427 }
1428
1429 return ret_freq;
1430}
1431EXPORT_SYMBOL(cpufreq_quick_get_max);
1432
5a01f2e8 1433static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1434{
7a6aedfa 1435 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1436 unsigned int ret_freq = 0;
5800043b 1437
1c3d85dd 1438 if (!cpufreq_driver->get)
4d34a67d 1439 return ret_freq;
1da177e4 1440
1c3d85dd 1441 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1442
e08f5f5b 1443 if (ret_freq && policy->cur &&
1c3d85dd 1444 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1445 /* verify no discrepancy between actual and
1446 saved value exists */
1447 if (unlikely(ret_freq != policy->cur)) {
1448 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1449 schedule_work(&policy->update);
1450 }
1451 }
1452
4d34a67d 1453 return ret_freq;
5a01f2e8 1454}
1da177e4 1455
5a01f2e8
VP
1456/**
1457 * cpufreq_get - get the current CPU frequency (in kHz)
1458 * @cpu: CPU number
1459 *
1460 * Get the CPU current (static) CPU frequency
1461 */
1462unsigned int cpufreq_get(unsigned int cpu)
1463{
ad7722da 1464 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
5a01f2e8 1465 unsigned int ret_freq = 0;
5a01f2e8 1466
26ca8694
VK
1467 if (cpufreq_disabled() || !cpufreq_driver)
1468 return -ENOENT;
1469
ad7722da 1470 BUG_ON(!policy);
1471
6eed9404
VK
1472 if (!down_read_trylock(&cpufreq_rwsem))
1473 return 0;
5a01f2e8 1474
ad7722da 1475 down_read(&policy->rwsem);
5a01f2e8
VP
1476
1477 ret_freq = __cpufreq_get(cpu);
1478
ad7722da 1479 up_read(&policy->rwsem);
6eed9404
VK
1480 up_read(&cpufreq_rwsem);
1481
4d34a67d 1482 return ret_freq;
1da177e4
LT
1483}
1484EXPORT_SYMBOL(cpufreq_get);
1485
8a25a2fd
KS
1486static struct subsys_interface cpufreq_interface = {
1487 .name = "cpufreq",
1488 .subsys = &cpu_subsys,
1489 .add_dev = cpufreq_add_dev,
1490 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1491};
1492
42d4dc3f 1493/**
e00e56df
RW
1494 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1495 *
1496 * This function is only executed for the boot processor. The other CPUs
1497 * have been put offline by means of CPU hotplug.
42d4dc3f 1498 */
e00e56df 1499static int cpufreq_bp_suspend(void)
42d4dc3f 1500{
e08f5f5b 1501 int ret = 0;
4bc5d341 1502
e00e56df 1503 int cpu = smp_processor_id();
3a3e9e06 1504 struct cpufreq_policy *policy;
42d4dc3f 1505
2d06d8c4 1506 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1507
e00e56df 1508 /* If there's no policy for the boot CPU, we have nothing to do. */
3a3e9e06
VK
1509 policy = cpufreq_cpu_get(cpu);
1510 if (!policy)
e00e56df 1511 return 0;
42d4dc3f 1512
1c3d85dd 1513 if (cpufreq_driver->suspend) {
3a3e9e06 1514 ret = cpufreq_driver->suspend(policy);
ce6c3997 1515 if (ret)
42d4dc3f 1516 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
3a3e9e06 1517 "step on CPU %u\n", policy->cpu);
42d4dc3f
BH
1518 }
1519
3a3e9e06 1520 cpufreq_cpu_put(policy);
c9060494 1521 return ret;
42d4dc3f
BH
1522}
1523
1da177e4 1524/**
e00e56df 1525 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1526 *
1527 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1528 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1529 * restored. It will verify that the current freq is in sync with
1530 * what we believe it to be. This is a bit later than when it
1531 * should be, but nonethteless it's better than calling
1532 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1533 *
1534 * This function is only executed for the boot CPU. The other CPUs have not
1535 * been turned on yet.
1da177e4 1536 */
e00e56df 1537static void cpufreq_bp_resume(void)
1da177e4 1538{
e08f5f5b 1539 int ret = 0;
4bc5d341 1540
e00e56df 1541 int cpu = smp_processor_id();
3a3e9e06 1542 struct cpufreq_policy *policy;
1da177e4 1543
2d06d8c4 1544 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1545
e00e56df 1546 /* If there's no policy for the boot CPU, we have nothing to do. */
3a3e9e06
VK
1547 policy = cpufreq_cpu_get(cpu);
1548 if (!policy)
e00e56df 1549 return;
1da177e4 1550
1c3d85dd 1551 if (cpufreq_driver->resume) {
3a3e9e06 1552 ret = cpufreq_driver->resume(policy);
1da177e4
LT
1553 if (ret) {
1554 printk(KERN_ERR "cpufreq: resume failed in ->resume "
3a3e9e06 1555 "step on CPU %u\n", policy->cpu);
c9060494 1556 goto fail;
1da177e4
LT
1557 }
1558 }
1559
3a3e9e06 1560 schedule_work(&policy->update);
ce6c3997 1561
c9060494 1562fail:
3a3e9e06 1563 cpufreq_cpu_put(policy);
1da177e4
LT
1564}
1565
e00e56df
RW
1566static struct syscore_ops cpufreq_syscore_ops = {
1567 .suspend = cpufreq_bp_suspend,
1568 .resume = cpufreq_bp_resume,
1da177e4
LT
1569};
1570
9d95046e
BP
1571/**
1572 * cpufreq_get_current_driver - return current driver's name
1573 *
1574 * Return the name string of the currently loaded cpufreq driver
1575 * or NULL, if none.
1576 */
1577const char *cpufreq_get_current_driver(void)
1578{
1c3d85dd
RW
1579 if (cpufreq_driver)
1580 return cpufreq_driver->name;
1581
1582 return NULL;
9d95046e
BP
1583}
1584EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1585
1586/*********************************************************************
1587 * NOTIFIER LISTS INTERFACE *
1588 *********************************************************************/
1589
1590/**
1591 * cpufreq_register_notifier - register a driver with cpufreq
1592 * @nb: notifier function to register
1593 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1594 *
32ee8c3e 1595 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1596 * are notified about clock rate changes (once before and once after
1597 * the transition), or a list of drivers that are notified about
1598 * changes in cpufreq policy.
1599 *
1600 * This function may sleep, and has the same return conditions as
e041c683 1601 * blocking_notifier_chain_register.
1da177e4
LT
1602 */
1603int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1604{
1605 int ret;
1606
d5aaffa9
DB
1607 if (cpufreq_disabled())
1608 return -EINVAL;
1609
74212ca4
CEB
1610 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1611
1da177e4
LT
1612 switch (list) {
1613 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1614 ret = srcu_notifier_chain_register(
e041c683 1615 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1616 break;
1617 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1618 ret = blocking_notifier_chain_register(
1619 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1620 break;
1621 default:
1622 ret = -EINVAL;
1623 }
1da177e4
LT
1624
1625 return ret;
1626}
1627EXPORT_SYMBOL(cpufreq_register_notifier);
1628
1da177e4
LT
1629/**
1630 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1631 * @nb: notifier block to be unregistered
bb176f7d 1632 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1da177e4
LT
1633 *
1634 * Remove a driver from the CPU frequency notifier list.
1635 *
1636 * This function may sleep, and has the same return conditions as
e041c683 1637 * blocking_notifier_chain_unregister.
1da177e4
LT
1638 */
1639int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1640{
1641 int ret;
1642
d5aaffa9
DB
1643 if (cpufreq_disabled())
1644 return -EINVAL;
1645
1da177e4
LT
1646 switch (list) {
1647 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1648 ret = srcu_notifier_chain_unregister(
e041c683 1649 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1650 break;
1651 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1652 ret = blocking_notifier_chain_unregister(
1653 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1654 break;
1655 default:
1656 ret = -EINVAL;
1657 }
1da177e4
LT
1658
1659 return ret;
1660}
1661EXPORT_SYMBOL(cpufreq_unregister_notifier);
1662
1663
1664/*********************************************************************
1665 * GOVERNORS *
1666 *********************************************************************/
1667
1da177e4
LT
1668int __cpufreq_driver_target(struct cpufreq_policy *policy,
1669 unsigned int target_freq,
1670 unsigned int relation)
1671{
1672 int retval = -EINVAL;
7249924e 1673 unsigned int old_target_freq = target_freq;
c32b6b8e 1674
a7b422cd
KRW
1675 if (cpufreq_disabled())
1676 return -ENODEV;
1677
7249924e
VK
1678 /* Make sure that target_freq is within supported range */
1679 if (target_freq > policy->max)
1680 target_freq = policy->max;
1681 if (target_freq < policy->min)
1682 target_freq = policy->min;
1683
1684 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1685 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228 1686
9c0ebcf7
VK
1687 /*
1688 * This might look like a redundant call as we are checking it again
1689 * after finding index. But it is left intentionally for cases where
1690 * exactly same freq is called again and so we can save on few function
1691 * calls.
1692 */
5a1c0228
VK
1693 if (target_freq == policy->cur)
1694 return 0;
1695
1c3d85dd
RW
1696 if (cpufreq_driver->target)
1697 retval = cpufreq_driver->target(policy, target_freq, relation);
9c0ebcf7
VK
1698 else if (cpufreq_driver->target_index) {
1699 struct cpufreq_frequency_table *freq_table;
d4019f0a
VK
1700 struct cpufreq_freqs freqs;
1701 bool notify;
9c0ebcf7 1702 int index;
90d45d17 1703
9c0ebcf7
VK
1704 freq_table = cpufreq_frequency_get_table(policy->cpu);
1705 if (unlikely(!freq_table)) {
1706 pr_err("%s: Unable to find freq_table\n", __func__);
1707 goto out;
1708 }
1709
1710 retval = cpufreq_frequency_table_target(policy, freq_table,
1711 target_freq, relation, &index);
1712 if (unlikely(retval)) {
1713 pr_err("%s: Unable to find matching freq\n", __func__);
1714 goto out;
1715 }
1716
d4019f0a 1717 if (freq_table[index].frequency == policy->cur) {
9c0ebcf7 1718 retval = 0;
d4019f0a
VK
1719 goto out;
1720 }
1721
1722 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1723
1724 if (notify) {
1725 freqs.old = policy->cur;
1726 freqs.new = freq_table[index].frequency;
1727 freqs.flags = 0;
1728
1729 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1730 __func__, policy->cpu, freqs.old,
1731 freqs.new);
1732
1733 cpufreq_notify_transition(policy, &freqs,
1734 CPUFREQ_PRECHANGE);
1735 }
1736
1737 retval = cpufreq_driver->target_index(policy, index);
1738 if (retval)
1739 pr_err("%s: Failed to change cpu frequency: %d\n",
1740 __func__, retval);
1741
ab1b1c4e
VK
1742 if (notify)
1743 cpufreq_notify_post_transition(policy, &freqs, retval);
9c0ebcf7
VK
1744 }
1745
1746out:
1da177e4
LT
1747 return retval;
1748}
1749EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1750
1da177e4
LT
1751int cpufreq_driver_target(struct cpufreq_policy *policy,
1752 unsigned int target_freq,
1753 unsigned int relation)
1754{
f1829e4a 1755 int ret = -EINVAL;
1da177e4 1756
ad7722da 1757 down_write(&policy->rwsem);
1da177e4
LT
1758
1759 ret = __cpufreq_driver_target(policy, target_freq, relation);
1760
ad7722da 1761 up_write(&policy->rwsem);
1da177e4 1762
1da177e4
LT
1763 return ret;
1764}
1765EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1766
153d7f3f 1767/*
153d7f3f
AV
1768 * when "event" is CPUFREQ_GOV_LIMITS
1769 */
1da177e4 1770
e08f5f5b
GS
1771static int __cpufreq_governor(struct cpufreq_policy *policy,
1772 unsigned int event)
1da177e4 1773{
cc993cab 1774 int ret;
6afde10c
TR
1775
1776 /* Only must be defined when default governor is known to have latency
1777 restrictions, like e.g. conservative or ondemand.
1778 That this is the case is already ensured in Kconfig
1779 */
1780#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1781 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1782#else
1783 struct cpufreq_governor *gov = NULL;
1784#endif
1c256245
TR
1785
1786 if (policy->governor->max_transition_latency &&
1787 policy->cpuinfo.transition_latency >
1788 policy->governor->max_transition_latency) {
6afde10c
TR
1789 if (!gov)
1790 return -EINVAL;
1791 else {
1792 printk(KERN_WARNING "%s governor failed, too long"
1793 " transition latency of HW, fallback"
1794 " to %s governor\n",
1795 policy->governor->name,
1796 gov->name);
1797 policy->governor = gov;
1798 }
1c256245 1799 }
1da177e4 1800
fe492f3f
VK
1801 if (event == CPUFREQ_GOV_POLICY_INIT)
1802 if (!try_module_get(policy->governor->owner))
1803 return -EINVAL;
1da177e4 1804
2d06d8c4 1805 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1806 policy->cpu, event);
95731ebb
XC
1807
1808 mutex_lock(&cpufreq_governor_lock);
56d07db2 1809 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
f73d3933
VK
1810 || (!policy->governor_enabled
1811 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
95731ebb
XC
1812 mutex_unlock(&cpufreq_governor_lock);
1813 return -EBUSY;
1814 }
1815
1816 if (event == CPUFREQ_GOV_STOP)
1817 policy->governor_enabled = false;
1818 else if (event == CPUFREQ_GOV_START)
1819 policy->governor_enabled = true;
1820
1821 mutex_unlock(&cpufreq_governor_lock);
1822
1da177e4
LT
1823 ret = policy->governor->governor(policy, event);
1824
4d5dcc42
VK
1825 if (!ret) {
1826 if (event == CPUFREQ_GOV_POLICY_INIT)
1827 policy->governor->initialized++;
1828 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1829 policy->governor->initialized--;
95731ebb
XC
1830 } else {
1831 /* Restore original values */
1832 mutex_lock(&cpufreq_governor_lock);
1833 if (event == CPUFREQ_GOV_STOP)
1834 policy->governor_enabled = true;
1835 else if (event == CPUFREQ_GOV_START)
1836 policy->governor_enabled = false;
1837 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 1838 }
b394058f 1839
fe492f3f
VK
1840 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1841 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1da177e4
LT
1842 module_put(policy->governor->owner);
1843
1844 return ret;
1845}
1846
1da177e4
LT
1847int cpufreq_register_governor(struct cpufreq_governor *governor)
1848{
3bcb09a3 1849 int err;
1da177e4
LT
1850
1851 if (!governor)
1852 return -EINVAL;
1853
a7b422cd
KRW
1854 if (cpufreq_disabled())
1855 return -ENODEV;
1856
3fc54d37 1857 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1858
b394058f 1859 governor->initialized = 0;
3bcb09a3
JF
1860 err = -EBUSY;
1861 if (__find_governor(governor->name) == NULL) {
1862 err = 0;
1863 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1864 }
1da177e4 1865
32ee8c3e 1866 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1867 return err;
1da177e4
LT
1868}
1869EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1870
1da177e4
LT
1871void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1872{
90e41bac
PB
1873#ifdef CONFIG_HOTPLUG_CPU
1874 int cpu;
1875#endif
1876
1da177e4
LT
1877 if (!governor)
1878 return;
1879
a7b422cd
KRW
1880 if (cpufreq_disabled())
1881 return;
1882
90e41bac
PB
1883#ifdef CONFIG_HOTPLUG_CPU
1884 for_each_present_cpu(cpu) {
1885 if (cpu_online(cpu))
1886 continue;
1887 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1888 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1889 }
1890#endif
1891
3fc54d37 1892 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1893 list_del(&governor->governor_list);
3fc54d37 1894 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1895 return;
1896}
1897EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1898
1899
1da177e4
LT
1900/*********************************************************************
1901 * POLICY INTERFACE *
1902 *********************************************************************/
1903
1904/**
1905 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1906 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1907 * is written
1da177e4
LT
1908 *
1909 * Reads the current cpufreq policy.
1910 */
1911int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1912{
1913 struct cpufreq_policy *cpu_policy;
1914 if (!policy)
1915 return -EINVAL;
1916
1917 cpu_policy = cpufreq_cpu_get(cpu);
1918 if (!cpu_policy)
1919 return -EINVAL;
1920
d5b73cd8 1921 memcpy(policy, cpu_policy, sizeof(*policy));
1da177e4
LT
1922
1923 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1924 return 0;
1925}
1926EXPORT_SYMBOL(cpufreq_get_policy);
1927
153d7f3f 1928/*
037ce839
VK
1929 * policy : current policy.
1930 * new_policy: policy to be set.
153d7f3f 1931 */
037ce839 1932static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 1933 struct cpufreq_policy *new_policy)
1da177e4 1934{
7bd353a9 1935 int ret = 0, failed = 1;
1da177e4 1936
3a3e9e06
VK
1937 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1938 new_policy->min, new_policy->max);
1da177e4 1939
d5b73cd8 1940 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1da177e4 1941
3a3e9e06 1942 if (new_policy->min > policy->max || new_policy->max < policy->min) {
9c9a43ed
MD
1943 ret = -EINVAL;
1944 goto error_out;
1945 }
1946
1da177e4 1947 /* verify the cpu speed can be set within this limit */
3a3e9e06 1948 ret = cpufreq_driver->verify(new_policy);
1da177e4
LT
1949 if (ret)
1950 goto error_out;
1951
1da177e4 1952 /* adjust if necessary - all reasons */
e041c683 1953 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1954 CPUFREQ_ADJUST, new_policy);
1da177e4
LT
1955
1956 /* adjust if necessary - hardware incompatibility*/
e041c683 1957 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1958 CPUFREQ_INCOMPATIBLE, new_policy);
1da177e4 1959
bb176f7d
VK
1960 /*
1961 * verify the cpu speed can be set within this limit, which might be
1962 * different to the first one
1963 */
3a3e9e06 1964 ret = cpufreq_driver->verify(new_policy);
e041c683 1965 if (ret)
1da177e4 1966 goto error_out;
1da177e4
LT
1967
1968 /* notification of the new policy */
e041c683 1969 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1970 CPUFREQ_NOTIFY, new_policy);
1da177e4 1971
3a3e9e06
VK
1972 policy->min = new_policy->min;
1973 policy->max = new_policy->max;
1da177e4 1974
2d06d8c4 1975 pr_debug("new min and max freqs are %u - %u kHz\n",
3a3e9e06 1976 policy->min, policy->max);
1da177e4 1977
1c3d85dd 1978 if (cpufreq_driver->setpolicy) {
3a3e9e06 1979 policy->policy = new_policy->policy;
2d06d8c4 1980 pr_debug("setting range\n");
3a3e9e06 1981 ret = cpufreq_driver->setpolicy(new_policy);
1da177e4 1982 } else {
3a3e9e06 1983 if (new_policy->governor != policy->governor) {
1da177e4 1984 /* save old, working values */
3a3e9e06 1985 struct cpufreq_governor *old_gov = policy->governor;
1da177e4 1986
2d06d8c4 1987 pr_debug("governor switch\n");
1da177e4
LT
1988
1989 /* end old governor */
3a3e9e06
VK
1990 if (policy->governor) {
1991 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
ad7722da 1992 up_write(&policy->rwsem);
3a3e9e06 1993 __cpufreq_governor(policy,
7bd353a9 1994 CPUFREQ_GOV_POLICY_EXIT);
ad7722da 1995 down_write(&policy->rwsem);
7bd353a9 1996 }
1da177e4
LT
1997
1998 /* start new governor */
3a3e9e06
VK
1999 policy->governor = new_policy->governor;
2000 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2001 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
7bd353a9 2002 failed = 0;
955ef483 2003 } else {
ad7722da 2004 up_write(&policy->rwsem);
3a3e9e06 2005 __cpufreq_governor(policy,
7bd353a9 2006 CPUFREQ_GOV_POLICY_EXIT);
ad7722da 2007 down_write(&policy->rwsem);
955ef483 2008 }
7bd353a9
VK
2009 }
2010
2011 if (failed) {
1da177e4 2012 /* new governor failed, so re-start old one */
2d06d8c4 2013 pr_debug("starting governor %s failed\n",
3a3e9e06 2014 policy->governor->name);
1da177e4 2015 if (old_gov) {
3a3e9e06
VK
2016 policy->governor = old_gov;
2017 __cpufreq_governor(policy,
7bd353a9 2018 CPUFREQ_GOV_POLICY_INIT);
3a3e9e06 2019 __cpufreq_governor(policy,
e08f5f5b 2020 CPUFREQ_GOV_START);
1da177e4
LT
2021 }
2022 ret = -EINVAL;
2023 goto error_out;
2024 }
2025 /* might be a policy change, too, so fall through */
2026 }
2d06d8c4 2027 pr_debug("governor: change or update limits\n");
3de9bdeb 2028 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1da177e4
LT
2029 }
2030
7d5e350f 2031error_out:
1da177e4
LT
2032 return ret;
2033}
2034
1da177e4
LT
2035/**
2036 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2037 * @cpu: CPU which shall be re-evaluated
2038 *
25985edc 2039 * Useful for policy notifiers which have different necessities
1da177e4
LT
2040 * at different times.
2041 */
2042int cpufreq_update_policy(unsigned int cpu)
2043{
3a3e9e06
VK
2044 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2045 struct cpufreq_policy new_policy;
f1829e4a 2046 int ret;
1da177e4 2047
3a3e9e06 2048 if (!policy) {
f1829e4a
JL
2049 ret = -ENODEV;
2050 goto no_policy;
2051 }
1da177e4 2052
ad7722da 2053 down_write(&policy->rwsem);
1da177e4 2054
2d06d8c4 2055 pr_debug("updating policy for CPU %u\n", cpu);
d5b73cd8 2056 memcpy(&new_policy, policy, sizeof(*policy));
3a3e9e06
VK
2057 new_policy.min = policy->user_policy.min;
2058 new_policy.max = policy->user_policy.max;
2059 new_policy.policy = policy->user_policy.policy;
2060 new_policy.governor = policy->user_policy.governor;
1da177e4 2061
bb176f7d
VK
2062 /*
2063 * BIOS might change freq behind our back
2064 * -> ask driver for current freq and notify governors about a change
2065 */
1c3d85dd 2066 if (cpufreq_driver->get) {
3a3e9e06
VK
2067 new_policy.cur = cpufreq_driver->get(cpu);
2068 if (!policy->cur) {
2d06d8c4 2069 pr_debug("Driver did not initialize current freq");
3a3e9e06 2070 policy->cur = new_policy.cur;
a85f7bd3 2071 } else {
9c0ebcf7 2072 if (policy->cur != new_policy.cur && has_target())
3a3e9e06
VK
2073 cpufreq_out_of_sync(cpu, policy->cur,
2074 new_policy.cur);
a85f7bd3 2075 }
0961dd0d
TR
2076 }
2077
037ce839 2078 ret = cpufreq_set_policy(policy, &new_policy);
1da177e4 2079
ad7722da 2080 up_write(&policy->rwsem);
5a01f2e8 2081
3a3e9e06 2082 cpufreq_cpu_put(policy);
f1829e4a 2083no_policy:
1da177e4
LT
2084 return ret;
2085}
2086EXPORT_SYMBOL(cpufreq_update_policy);
2087
2760984f 2088static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
2089 unsigned long action, void *hcpu)
2090{
2091 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 2092 struct device *dev;
5302c3fb 2093 bool frozen = false;
c32b6b8e 2094
8a25a2fd
KS
2095 dev = get_cpu_device(cpu);
2096 if (dev) {
5302c3fb 2097
d4faadd5
RW
2098 if (action & CPU_TASKS_FROZEN)
2099 frozen = true;
2100
5302c3fb 2101 switch (action & ~CPU_TASKS_FROZEN) {
c32b6b8e 2102 case CPU_ONLINE:
5302c3fb 2103 __cpufreq_add_dev(dev, NULL, frozen);
23d32899 2104 cpufreq_update_policy(cpu);
c32b6b8e 2105 break;
5302c3fb 2106
c32b6b8e 2107 case CPU_DOWN_PREPARE:
cedb70af 2108 __cpufreq_remove_dev_prepare(dev, NULL, frozen);
1aee40ac
SB
2109 break;
2110
2111 case CPU_POST_DEAD:
cedb70af 2112 __cpufreq_remove_dev_finish(dev, NULL, frozen);
c32b6b8e 2113 break;
5302c3fb 2114
5a01f2e8 2115 case CPU_DOWN_FAILED:
5302c3fb 2116 __cpufreq_add_dev(dev, NULL, frozen);
c32b6b8e
AR
2117 break;
2118 }
2119 }
2120 return NOTIFY_OK;
2121}
2122
9c36f746 2123static struct notifier_block __refdata cpufreq_cpu_notifier = {
bb176f7d 2124 .notifier_call = cpufreq_cpu_callback,
c32b6b8e 2125};
1da177e4
LT
2126
2127/*********************************************************************
2128 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2129 *********************************************************************/
2130
2131/**
2132 * cpufreq_register_driver - register a CPU Frequency driver
2133 * @driver_data: A struct cpufreq_driver containing the values#
2134 * submitted by the CPU Frequency driver.
2135 *
bb176f7d 2136 * Registers a CPU Frequency driver to this core code. This code
1da177e4 2137 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 2138 * (and isn't unregistered in the meantime).
1da177e4
LT
2139 *
2140 */
221dee28 2141int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
2142{
2143 unsigned long flags;
2144 int ret;
2145
a7b422cd
KRW
2146 if (cpufreq_disabled())
2147 return -ENODEV;
2148
1da177e4 2149 if (!driver_data || !driver_data->verify || !driver_data->init ||
9c0ebcf7
VK
2150 !(driver_data->setpolicy || driver_data->target_index ||
2151 driver_data->target))
1da177e4
LT
2152 return -EINVAL;
2153
2d06d8c4 2154 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
2155
2156 if (driver_data->setpolicy)
2157 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2158
0d1857a1 2159 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2160 if (cpufreq_driver) {
0d1857a1 2161 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4dea5806 2162 return -EEXIST;
1da177e4 2163 }
1c3d85dd 2164 cpufreq_driver = driver_data;
0d1857a1 2165 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 2166
8a25a2fd 2167 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
2168 if (ret)
2169 goto err_null_driver;
1da177e4 2170
1c3d85dd 2171 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
2172 int i;
2173 ret = -ENODEV;
2174
2175 /* check for at least one working CPU */
7a6aedfa
MT
2176 for (i = 0; i < nr_cpu_ids; i++)
2177 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 2178 ret = 0;
7a6aedfa
MT
2179 break;
2180 }
1da177e4
LT
2181
2182 /* if all ->init() calls failed, unregister */
2183 if (ret) {
2d06d8c4 2184 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 2185 driver_data->name);
8a25a2fd 2186 goto err_if_unreg;
1da177e4
LT
2187 }
2188 }
2189
8f5bc2ab 2190 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2191 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2192
8f5bc2ab 2193 return 0;
8a25a2fd
KS
2194err_if_unreg:
2195 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 2196err_null_driver:
0d1857a1 2197 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2198 cpufreq_driver = NULL;
0d1857a1 2199 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2200 return ret;
1da177e4
LT
2201}
2202EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2203
1da177e4
LT
2204/**
2205 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2206 *
bb176f7d 2207 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2208 * the right to do so, i.e. if you have succeeded in initialising before!
2209 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2210 * currently not initialised.
2211 */
221dee28 2212int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2213{
2214 unsigned long flags;
2215
1c3d85dd 2216 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2217 return -EINVAL;
1da177e4 2218
2d06d8c4 2219 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2220
8a25a2fd 2221 subsys_interface_unregister(&cpufreq_interface);
65edc68c 2222 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2223
6eed9404 2224 down_write(&cpufreq_rwsem);
0d1857a1 2225 write_lock_irqsave(&cpufreq_driver_lock, flags);
6eed9404 2226
1c3d85dd 2227 cpufreq_driver = NULL;
6eed9404 2228
0d1857a1 2229 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
6eed9404 2230 up_write(&cpufreq_rwsem);
1da177e4
LT
2231
2232 return 0;
2233}
2234EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
2235
2236static int __init cpufreq_core_init(void)
2237{
a7b422cd
KRW
2238 if (cpufreq_disabled())
2239 return -ENODEV;
2240
2361be23 2241 cpufreq_global_kobject = kobject_create();
8aa84ad8 2242 BUG_ON(!cpufreq_global_kobject);
e00e56df 2243 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 2244
5a01f2e8
VP
2245 return 0;
2246}
5a01f2e8 2247core_initcall(cpufreq_core_init);