cpufreq: Add per policy governor-init/exit infrastructure
[linux-2.6-block.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
1da177e4
LT
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/notifier.h>
24#include <linux/cpufreq.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <linux/device.h>
29#include <linux/slab.h>
30#include <linux/cpu.h>
31#include <linux/completion.h>
3fc54d37 32#include <linux/mutex.h>
e00e56df 33#include <linux/syscore_ops.h>
1da177e4 34
6f4f2723
TR
35#include <trace/events/power.h>
36
1da177e4 37/**
cd878479 38 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
41 */
7d5e350f 42static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 43static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
44#ifdef CONFIG_HOTPLUG_CPU
45/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 47#endif
0d1857a1 48static DEFINE_RWLOCK(cpufreq_driver_lock);
1da177e4 49
5a01f2e8
VP
50/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
5a01f2e8
VP
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 66 */
f1625066 67static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69
70#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 71static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 72{ \
f1625066 73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
74 BUG_ON(policy_cpu == -1); \
75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
76 \
77 return 0; \
78}
79
80lock_policy_rwsem(read, cpu);
5a01f2e8 81lock_policy_rwsem(write, cpu);
5a01f2e8 82
fa1d8af4
VK
83#define unlock_policy_rwsem(mode, cpu) \
84static void unlock_policy_rwsem_##mode(int cpu) \
85{ \
86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
87 BUG_ON(policy_cpu == -1); \
88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 89}
5a01f2e8 90
fa1d8af4
VK
91unlock_policy_rwsem(read, cpu);
92unlock_policy_rwsem(write, cpu);
5a01f2e8 93
1da177e4 94/* internal prototypes */
29464f28
DJ
95static int __cpufreq_governor(struct cpufreq_policy *policy,
96 unsigned int event);
5a01f2e8 97static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 98static void handle_update(struct work_struct *work);
1da177e4
LT
99
100/**
32ee8c3e
DJ
101 * Two notifier lists: the "policy" list is involved in the
102 * validation process for a new CPU frequency policy; the
1da177e4
LT
103 * "transition" list for kernel code that needs to handle
104 * changes to devices when the CPU clock speed changes.
105 * The mutex locks both lists.
106 */
e041c683 107static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 108static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 109
74212ca4 110static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
111static int __init init_cpufreq_transition_notifier_list(void)
112{
113 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 114 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
115 return 0;
116}
b3438f82 117pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 118
a7b422cd 119static int off __read_mostly;
da584455 120static int cpufreq_disabled(void)
a7b422cd
KRW
121{
122 return off;
123}
124void disable_cpufreq(void)
125{
126 off = 1;
127}
1da177e4 128static LIST_HEAD(cpufreq_governor_list);
29464f28 129static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 130
a9144436 131static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
132{
133 struct cpufreq_policy *data;
134 unsigned long flags;
135
7a6aedfa 136 if (cpu >= nr_cpu_ids)
1da177e4
LT
137 goto err_out;
138
139 /* get the cpufreq driver */
0d1857a1 140 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4
LT
141
142 if (!cpufreq_driver)
143 goto err_out_unlock;
144
145 if (!try_module_get(cpufreq_driver->owner))
146 goto err_out_unlock;
147
148
149 /* get the CPU */
7a6aedfa 150 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
151
152 if (!data)
153 goto err_out_put_module;
154
a9144436 155 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
156 goto err_out_put_module;
157
0d1857a1 158 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
159 return data;
160
7d5e350f 161err_out_put_module:
1da177e4 162 module_put(cpufreq_driver->owner);
7d5e350f 163err_out_unlock:
0d1857a1 164 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 165err_out:
1da177e4
LT
166 return NULL;
167}
a9144436
SB
168
169struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
170{
d5aaffa9
DB
171 if (cpufreq_disabled())
172 return NULL;
173
a9144436
SB
174 return __cpufreq_cpu_get(cpu, false);
175}
1da177e4
LT
176EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
177
a9144436
SB
178static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
179{
180 return __cpufreq_cpu_get(cpu, true);
181}
182
183static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
184{
185 if (!sysfs)
186 kobject_put(&data->kobj);
187 module_put(cpufreq_driver->owner);
188}
7d5e350f 189
1da177e4
LT
190void cpufreq_cpu_put(struct cpufreq_policy *data)
191{
d5aaffa9
DB
192 if (cpufreq_disabled())
193 return;
194
a9144436 195 __cpufreq_cpu_put(data, false);
1da177e4
LT
196}
197EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
198
a9144436
SB
199static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
200{
201 __cpufreq_cpu_put(data, true);
202}
1da177e4 203
1da177e4
LT
204/*********************************************************************
205 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
206 *********************************************************************/
207
208/**
209 * adjust_jiffies - adjust the system "loops_per_jiffy"
210 *
211 * This function alters the system "loops_per_jiffy" for the clock
212 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 213 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
214 * per-CPU loops_per_jiffy value wherever possible.
215 */
216#ifndef CONFIG_SMP
217static unsigned long l_p_j_ref;
218static unsigned int l_p_j_ref_freq;
219
858119e1 220static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
221{
222 if (ci->flags & CPUFREQ_CONST_LOOPS)
223 return;
224
225 if (!l_p_j_ref_freq) {
226 l_p_j_ref = loops_per_jiffy;
227 l_p_j_ref_freq = ci->old;
2d06d8c4 228 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 229 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 230 }
d08de0c1 231 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 232 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
233 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
234 ci->new);
2d06d8c4 235 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 236 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
237 }
238}
239#else
e08f5f5b
GS
240static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
241{
242 return;
243}
1da177e4
LT
244#endif
245
246
247/**
e4472cb3
DJ
248 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
249 * on frequency transition.
1da177e4 250 *
e4472cb3
DJ
251 * This function calls the transition notifiers and the "adjust_jiffies"
252 * function. It is called twice on all CPU frequency changes that have
32ee8c3e 253 * external effects.
1da177e4
LT
254 */
255void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
256{
e4472cb3 257 struct cpufreq_policy *policy;
2eaa3e2d 258 unsigned long flags;
e4472cb3 259
1da177e4
LT
260 BUG_ON(irqs_disabled());
261
d5aaffa9
DB
262 if (cpufreq_disabled())
263 return;
264
1da177e4 265 freqs->flags = cpufreq_driver->flags;
2d06d8c4 266 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 267 state, freqs->new);
1da177e4 268
0d1857a1 269 read_lock_irqsave(&cpufreq_driver_lock, flags);
7a6aedfa 270 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
0d1857a1 271 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2eaa3e2d 272
1da177e4 273 switch (state) {
e4472cb3 274
1da177e4 275 case CPUFREQ_PRECHANGE:
32ee8c3e 276 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
277 * which is not equal to what the cpufreq core thinks is
278 * "old frequency".
1da177e4
LT
279 */
280 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
281 if ((policy) && (policy->cpu == freqs->cpu) &&
282 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 283 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
284 " %u, cpufreq assumed %u kHz.\n",
285 freqs->old, policy->cur);
286 freqs->old = policy->cur;
1da177e4
LT
287 }
288 }
b4dfdbb3 289 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 290 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
291 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
292 break;
e4472cb3 293
1da177e4
LT
294 case CPUFREQ_POSTCHANGE:
295 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 296 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 297 (unsigned long)freqs->cpu);
25e41933 298 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 299 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 300 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
301 if (likely(policy) && likely(policy->cpu == freqs->cpu))
302 policy->cur = freqs->new;
1da177e4
LT
303 break;
304 }
1da177e4
LT
305}
306EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
307
308
309
310/*********************************************************************
311 * SYSFS INTERFACE *
312 *********************************************************************/
313
3bcb09a3
JF
314static struct cpufreq_governor *__find_governor(const char *str_governor)
315{
316 struct cpufreq_governor *t;
317
318 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 319 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
320 return t;
321
322 return NULL;
323}
324
1da177e4
LT
325/**
326 * cpufreq_parse_governor - parse a governor string
327 */
905d77cd 328static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
329 struct cpufreq_governor **governor)
330{
3bcb09a3
JF
331 int err = -EINVAL;
332
1da177e4 333 if (!cpufreq_driver)
3bcb09a3
JF
334 goto out;
335
1da177e4
LT
336 if (cpufreq_driver->setpolicy) {
337 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
338 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 339 err = 0;
e08f5f5b
GS
340 } else if (!strnicmp(str_governor, "powersave",
341 CPUFREQ_NAME_LEN)) {
1da177e4 342 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 343 err = 0;
1da177e4 344 }
3bcb09a3 345 } else if (cpufreq_driver->target) {
1da177e4 346 struct cpufreq_governor *t;
3bcb09a3 347
3fc54d37 348 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
349
350 t = __find_governor(str_governor);
351
ea714970 352 if (t == NULL) {
1a8e1463 353 int ret;
ea714970 354
1a8e1463
KC
355 mutex_unlock(&cpufreq_governor_mutex);
356 ret = request_module("cpufreq_%s", str_governor);
357 mutex_lock(&cpufreq_governor_mutex);
ea714970 358
1a8e1463
KC
359 if (ret == 0)
360 t = __find_governor(str_governor);
ea714970
JF
361 }
362
3bcb09a3
JF
363 if (t != NULL) {
364 *governor = t;
365 err = 0;
1da177e4 366 }
3bcb09a3 367
3fc54d37 368 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 369 }
29464f28 370out:
3bcb09a3 371 return err;
1da177e4 372}
1da177e4
LT
373
374
1da177e4 375/**
e08f5f5b
GS
376 * cpufreq_per_cpu_attr_read() / show_##file_name() -
377 * print out cpufreq information
1da177e4
LT
378 *
379 * Write out information from cpufreq_driver->policy[cpu]; object must be
380 * "unsigned int".
381 */
382
32ee8c3e
DJ
383#define show_one(file_name, object) \
384static ssize_t show_##file_name \
905d77cd 385(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 386{ \
29464f28 387 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
388}
389
390show_one(cpuinfo_min_freq, cpuinfo.min_freq);
391show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 392show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
393show_one(scaling_min_freq, min);
394show_one(scaling_max_freq, max);
395show_one(scaling_cur_freq, cur);
396
e08f5f5b
GS
397static int __cpufreq_set_policy(struct cpufreq_policy *data,
398 struct cpufreq_policy *policy);
7970e08b 399
1da177e4
LT
400/**
401 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
402 */
403#define store_one(file_name, object) \
404static ssize_t store_##file_name \
905d77cd 405(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 406{ \
f55c9c26 407 unsigned int ret; \
1da177e4
LT
408 struct cpufreq_policy new_policy; \
409 \
410 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
411 if (ret) \
412 return -EINVAL; \
413 \
29464f28 414 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
415 if (ret != 1) \
416 return -EINVAL; \
417 \
7970e08b
TR
418 ret = __cpufreq_set_policy(policy, &new_policy); \
419 policy->user_policy.object = policy->object; \
1da177e4
LT
420 \
421 return ret ? ret : count; \
422}
423
29464f28
DJ
424store_one(scaling_min_freq, min);
425store_one(scaling_max_freq, max);
1da177e4
LT
426
427/**
428 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
429 */
905d77cd
DJ
430static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
431 char *buf)
1da177e4 432{
5a01f2e8 433 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
434 if (!cur_freq)
435 return sprintf(buf, "<unknown>");
436 return sprintf(buf, "%u\n", cur_freq);
437}
438
439
440/**
441 * show_scaling_governor - show the current policy for the specified CPU
442 */
905d77cd 443static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 444{
29464f28 445 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
446 return sprintf(buf, "powersave\n");
447 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
448 return sprintf(buf, "performance\n");
449 else if (policy->governor)
4b972f0b 450 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 451 policy->governor->name);
1da177e4
LT
452 return -EINVAL;
453}
454
455
456/**
457 * store_scaling_governor - store policy for the specified CPU
458 */
905d77cd
DJ
459static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
460 const char *buf, size_t count)
1da177e4 461{
f55c9c26 462 unsigned int ret;
1da177e4
LT
463 char str_governor[16];
464 struct cpufreq_policy new_policy;
465
466 ret = cpufreq_get_policy(&new_policy, policy->cpu);
467 if (ret)
468 return ret;
469
29464f28 470 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
471 if (ret != 1)
472 return -EINVAL;
473
e08f5f5b
GS
474 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
475 &new_policy.governor))
1da177e4
LT
476 return -EINVAL;
477
7970e08b
TR
478 /* Do not use cpufreq_set_policy here or the user_policy.max
479 will be wrongly overridden */
7970e08b
TR
480 ret = __cpufreq_set_policy(policy, &new_policy);
481
482 policy->user_policy.policy = policy->policy;
483 policy->user_policy.governor = policy->governor;
7970e08b 484
e08f5f5b
GS
485 if (ret)
486 return ret;
487 else
488 return count;
1da177e4
LT
489}
490
491/**
492 * show_scaling_driver - show the cpufreq driver currently loaded
493 */
905d77cd 494static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 495{
4b972f0b 496 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
497}
498
499/**
500 * show_scaling_available_governors - show the available CPUfreq governors
501 */
905d77cd
DJ
502static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
503 char *buf)
1da177e4
LT
504{
505 ssize_t i = 0;
506 struct cpufreq_governor *t;
507
508 if (!cpufreq_driver->target) {
509 i += sprintf(buf, "performance powersave");
510 goto out;
511 }
512
513 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
514 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
515 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 516 goto out;
4b972f0b 517 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 518 }
7d5e350f 519out:
1da177e4
LT
520 i += sprintf(&buf[i], "\n");
521 return i;
522}
e8628dd0 523
835481d9 524static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
525{
526 ssize_t i = 0;
527 unsigned int cpu;
528
835481d9 529 for_each_cpu(cpu, mask) {
1da177e4
LT
530 if (i)
531 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
532 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
533 if (i >= (PAGE_SIZE - 5))
29464f28 534 break;
1da177e4
LT
535 }
536 i += sprintf(&buf[i], "\n");
537 return i;
538}
539
e8628dd0
DW
540/**
541 * show_related_cpus - show the CPUs affected by each transition even if
542 * hw coordination is in use
543 */
544static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
545{
e8628dd0
DW
546 return show_cpus(policy->related_cpus, buf);
547}
548
549/**
550 * show_affected_cpus - show the CPUs affected by each transition
551 */
552static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
553{
554 return show_cpus(policy->cpus, buf);
555}
556
9e76988e 557static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 558 const char *buf, size_t count)
9e76988e
VP
559{
560 unsigned int freq = 0;
561 unsigned int ret;
562
879000f9 563 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
564 return -EINVAL;
565
566 ret = sscanf(buf, "%u", &freq);
567 if (ret != 1)
568 return -EINVAL;
569
570 policy->governor->store_setspeed(policy, freq);
571
572 return count;
573}
574
575static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
576{
879000f9 577 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
578 return sprintf(buf, "<unsupported>\n");
579
580 return policy->governor->show_setspeed(policy, buf);
581}
1da177e4 582
e2f74f35 583/**
8bf1ac72 584 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
585 */
586static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
587{
588 unsigned int limit;
589 int ret;
590 if (cpufreq_driver->bios_limit) {
591 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
592 if (!ret)
593 return sprintf(buf, "%u\n", limit);
594 }
595 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
596}
597
6dad2a29
BP
598cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
599cpufreq_freq_attr_ro(cpuinfo_min_freq);
600cpufreq_freq_attr_ro(cpuinfo_max_freq);
601cpufreq_freq_attr_ro(cpuinfo_transition_latency);
602cpufreq_freq_attr_ro(scaling_available_governors);
603cpufreq_freq_attr_ro(scaling_driver);
604cpufreq_freq_attr_ro(scaling_cur_freq);
605cpufreq_freq_attr_ro(bios_limit);
606cpufreq_freq_attr_ro(related_cpus);
607cpufreq_freq_attr_ro(affected_cpus);
608cpufreq_freq_attr_rw(scaling_min_freq);
609cpufreq_freq_attr_rw(scaling_max_freq);
610cpufreq_freq_attr_rw(scaling_governor);
611cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 612
905d77cd 613static struct attribute *default_attrs[] = {
1da177e4
LT
614 &cpuinfo_min_freq.attr,
615 &cpuinfo_max_freq.attr,
ed129784 616 &cpuinfo_transition_latency.attr,
1da177e4
LT
617 &scaling_min_freq.attr,
618 &scaling_max_freq.attr,
619 &affected_cpus.attr,
e8628dd0 620 &related_cpus.attr,
1da177e4
LT
621 &scaling_governor.attr,
622 &scaling_driver.attr,
623 &scaling_available_governors.attr,
9e76988e 624 &scaling_setspeed.attr,
1da177e4
LT
625 NULL
626};
627
8aa84ad8
TR
628struct kobject *cpufreq_global_kobject;
629EXPORT_SYMBOL(cpufreq_global_kobject);
630
29464f28
DJ
631#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
632#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 633
29464f28 634static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 635{
905d77cd
DJ
636 struct cpufreq_policy *policy = to_policy(kobj);
637 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 638 ssize_t ret = -EINVAL;
a9144436 639 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 640 if (!policy)
0db4a8a9 641 goto no_policy;
5a01f2e8
VP
642
643 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 644 goto fail;
5a01f2e8 645
e08f5f5b
GS
646 if (fattr->show)
647 ret = fattr->show(policy, buf);
648 else
649 ret = -EIO;
650
5a01f2e8 651 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 652fail:
a9144436 653 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 654no_policy:
1da177e4
LT
655 return ret;
656}
657
905d77cd
DJ
658static ssize_t store(struct kobject *kobj, struct attribute *attr,
659 const char *buf, size_t count)
1da177e4 660{
905d77cd
DJ
661 struct cpufreq_policy *policy = to_policy(kobj);
662 struct freq_attr *fattr = to_attr(attr);
a07530b4 663 ssize_t ret = -EINVAL;
a9144436 664 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 665 if (!policy)
a07530b4 666 goto no_policy;
5a01f2e8
VP
667
668 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 669 goto fail;
5a01f2e8 670
e08f5f5b
GS
671 if (fattr->store)
672 ret = fattr->store(policy, buf, count);
673 else
674 ret = -EIO;
675
5a01f2e8 676 unlock_policy_rwsem_write(policy->cpu);
a07530b4 677fail:
a9144436 678 cpufreq_cpu_put_sysfs(policy);
a07530b4 679no_policy:
1da177e4
LT
680 return ret;
681}
682
905d77cd 683static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 684{
905d77cd 685 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 686 pr_debug("last reference is dropped\n");
1da177e4
LT
687 complete(&policy->kobj_unregister);
688}
689
52cf25d0 690static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
691 .show = show,
692 .store = store,
693};
694
695static struct kobj_type ktype_cpufreq = {
696 .sysfs_ops = &sysfs_ops,
697 .default_attrs = default_attrs,
698 .release = cpufreq_sysfs_release,
699};
700
19d6f7ec 701/* symlink affected CPUs */
cf3289d0
AC
702static int cpufreq_add_dev_symlink(unsigned int cpu,
703 struct cpufreq_policy *policy)
19d6f7ec
DJ
704{
705 unsigned int j;
706 int ret = 0;
707
708 for_each_cpu(j, policy->cpus) {
709 struct cpufreq_policy *managed_policy;
8a25a2fd 710 struct device *cpu_dev;
19d6f7ec
DJ
711
712 if (j == cpu)
713 continue;
19d6f7ec 714
2d06d8c4 715 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 716 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
717 cpu_dev = get_cpu_device(j);
718 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
719 "cpufreq");
720 if (ret) {
721 cpufreq_cpu_put(managed_policy);
722 return ret;
723 }
724 }
725 return ret;
726}
727
cf3289d0
AC
728static int cpufreq_add_dev_interface(unsigned int cpu,
729 struct cpufreq_policy *policy,
8a25a2fd 730 struct device *dev)
909a694e 731{
ecf7e461 732 struct cpufreq_policy new_policy;
909a694e
DJ
733 struct freq_attr **drv_attr;
734 unsigned long flags;
735 int ret = 0;
736 unsigned int j;
737
738 /* prepare interface data */
739 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 740 &dev->kobj, "cpufreq");
909a694e
DJ
741 if (ret)
742 return ret;
743
744 /* set up files for this cpu device */
745 drv_attr = cpufreq_driver->attr;
746 while ((drv_attr) && (*drv_attr)) {
747 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
748 if (ret)
749 goto err_out_kobj_put;
750 drv_attr++;
751 }
752 if (cpufreq_driver->get) {
753 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
754 if (ret)
755 goto err_out_kobj_put;
756 }
757 if (cpufreq_driver->target) {
758 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
759 if (ret)
760 goto err_out_kobj_put;
761 }
e2f74f35
TR
762 if (cpufreq_driver->bios_limit) {
763 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
764 if (ret)
765 goto err_out_kobj_put;
766 }
909a694e 767
0d1857a1 768 write_lock_irqsave(&cpufreq_driver_lock, flags);
909a694e 769 for_each_cpu(j, policy->cpus) {
909a694e 770 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 771 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e 772 }
0d1857a1 773 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
909a694e
DJ
774
775 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
776 if (ret)
777 goto err_out_kobj_put;
778
779 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
780 /* assure that the starting sequence is run in __cpufreq_set_policy */
781 policy->governor = NULL;
782
783 /* set default policy */
784 ret = __cpufreq_set_policy(policy, &new_policy);
785 policy->user_policy.policy = policy->policy;
786 policy->user_policy.governor = policy->governor;
787
788 if (ret) {
2d06d8c4 789 pr_debug("setting policy failed\n");
ecf7e461
DJ
790 if (cpufreq_driver->exit)
791 cpufreq_driver->exit(policy);
792 }
909a694e
DJ
793 return ret;
794
795err_out_kobj_put:
796 kobject_put(&policy->kobj);
797 wait_for_completion(&policy->kobj_unregister);
798 return ret;
799}
800
fcf80582
VK
801#ifdef CONFIG_HOTPLUG_CPU
802static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
803 struct device *dev)
804{
805 struct cpufreq_policy *policy;
806 int ret = 0;
807 unsigned long flags;
808
809 policy = cpufreq_cpu_get(sibling);
810 WARN_ON(!policy);
811
fcf80582
VK
812 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
813
2eaa3e2d
VK
814 lock_policy_rwsem_write(sibling);
815
0d1857a1 816 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 817
fcf80582 818 cpumask_set_cpu(cpu, policy->cpus);
2eaa3e2d 819 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
fcf80582 820 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 821 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 822
2eaa3e2d
VK
823 unlock_policy_rwsem_write(sibling);
824
fcf80582
VK
825 __cpufreq_governor(policy, CPUFREQ_GOV_START);
826 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
827
fcf80582
VK
828 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
829 if (ret) {
830 cpufreq_cpu_put(policy);
831 return ret;
832 }
833
834 return 0;
835}
836#endif
1da177e4
LT
837
838/**
839 * cpufreq_add_dev - add a CPU device
840 *
32ee8c3e 841 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
842 *
843 * The Oracle says: try running cpufreq registration/unregistration concurrently
844 * with with cpu hotplugging and all hell will break loose. Tried to clean this
845 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 846 */
8a25a2fd 847static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 848{
fcf80582 849 unsigned int j, cpu = dev->id;
65922465 850 int ret = -ENOMEM;
1da177e4 851 struct cpufreq_policy *policy;
1da177e4 852 unsigned long flags;
90e41bac 853#ifdef CONFIG_HOTPLUG_CPU
fcf80582 854 struct cpufreq_governor *gov;
90e41bac
PB
855 int sibling;
856#endif
1da177e4 857
c32b6b8e
AR
858 if (cpu_is_offline(cpu))
859 return 0;
860
2d06d8c4 861 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
862
863#ifdef CONFIG_SMP
864 /* check whether a different CPU already registered this
865 * CPU because it is in the same boat. */
866 policy = cpufreq_cpu_get(cpu);
867 if (unlikely(policy)) {
8ff69732 868 cpufreq_cpu_put(policy);
1da177e4
LT
869 return 0;
870 }
fcf80582
VK
871
872#ifdef CONFIG_HOTPLUG_CPU
873 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 874 read_lock_irqsave(&cpufreq_driver_lock, flags);
fcf80582
VK
875 for_each_online_cpu(sibling) {
876 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
2eaa3e2d 877 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
0d1857a1 878 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 879 return cpufreq_add_policy_cpu(cpu, sibling, dev);
2eaa3e2d 880 }
fcf80582 881 }
0d1857a1 882 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 883#endif
1da177e4
LT
884#endif
885
886 if (!try_module_get(cpufreq_driver->owner)) {
887 ret = -EINVAL;
888 goto module_out;
889 }
890
e98df50c 891 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 892 if (!policy)
1da177e4 893 goto nomem_out;
059019a3
DJ
894
895 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 896 goto err_free_policy;
059019a3
DJ
897
898 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 899 goto err_free_cpumask;
1da177e4
LT
900
901 policy->cpu = cpu;
65922465 902 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 903 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 904
5a01f2e8 905 /* Initially set CPU itself as the policy_cpu */
f1625066 906 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
5a01f2e8 907
1da177e4 908 init_completion(&policy->kobj_unregister);
65f27f38 909 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
910
911 /* call driver. From then on the cpufreq must be able
912 * to accept all calls to ->verify and ->setpolicy for this CPU
913 */
914 ret = cpufreq_driver->init(policy);
915 if (ret) {
2d06d8c4 916 pr_debug("initialization failed\n");
2eaa3e2d 917 goto err_set_policy_cpu;
1da177e4 918 }
643ae6e8 919
fcf80582
VK
920 /* related cpus should atleast have policy->cpus */
921 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
922
643ae6e8
VK
923 /*
924 * affected cpus must always be the one, which are online. We aren't
925 * managing offline cpus here.
926 */
927 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
928
187d9f4e
MC
929 policy->user_policy.min = policy->min;
930 policy->user_policy.max = policy->max;
1da177e4 931
a1531acd
TR
932 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
933 CPUFREQ_START, policy);
934
fcf80582
VK
935#ifdef CONFIG_HOTPLUG_CPU
936 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
937 if (gov) {
938 policy->governor = gov;
939 pr_debug("Restoring governor %s for cpu %d\n",
940 policy->governor->name, cpu);
4bfa042c 941 }
fcf80582 942#endif
1da177e4 943
8a25a2fd 944 ret = cpufreq_add_dev_interface(cpu, policy, dev);
19d6f7ec
DJ
945 if (ret)
946 goto err_out_unregister;
8ff69732 947
038c5b3e 948 kobject_uevent(&policy->kobj, KOBJ_ADD);
1da177e4 949 module_put(cpufreq_driver->owner);
2d06d8c4 950 pr_debug("initialization complete\n");
87c32271 951
1da177e4
LT
952 return 0;
953
1da177e4 954err_out_unregister:
0d1857a1 955 write_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 956 for_each_cpu(j, policy->cpus)
7a6aedfa 957 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 958 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 959
c10997f6 960 kobject_put(&policy->kobj);
1da177e4
LT
961 wait_for_completion(&policy->kobj_unregister);
962
2eaa3e2d
VK
963err_set_policy_cpu:
964 per_cpu(cpufreq_policy_cpu, cpu) = -1;
cad70a6a 965 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
966err_free_cpumask:
967 free_cpumask_var(policy->cpus);
968err_free_policy:
1da177e4 969 kfree(policy);
1da177e4
LT
970nomem_out:
971 module_put(cpufreq_driver->owner);
c32b6b8e 972module_out:
1da177e4
LT
973 return ret;
974}
975
b8eed8af
VK
976static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
977{
978 int j;
979
980 policy->last_cpu = policy->cpu;
981 policy->cpu = cpu;
982
3361b7b1 983 for_each_cpu(j, policy->cpus)
b8eed8af 984 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
985
986#ifdef CONFIG_CPU_FREQ_TABLE
987 cpufreq_frequency_table_update_policy_cpu(policy);
988#endif
989 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
990 CPUFREQ_UPDATE_POLICY_CPU, policy);
991}
1da177e4
LT
992
993/**
5a01f2e8 994 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
995 *
996 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
997 * Caller should already have policy_rwsem in write mode for this CPU.
998 * This routine frees the rwsem before returning.
1da177e4 999 */
8a25a2fd 1000static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1001{
b8eed8af 1002 unsigned int cpu = dev->id, ret, cpus;
1da177e4
LT
1003 unsigned long flags;
1004 struct cpufreq_policy *data;
499bca9b
AW
1005 struct kobject *kobj;
1006 struct completion *cmp;
8a25a2fd 1007 struct device *cpu_dev;
1da177e4 1008
b8eed8af 1009 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1010
0d1857a1 1011 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1012
7a6aedfa 1013 data = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d
VK
1014 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1015
0d1857a1 1016 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1017
1018 if (!data) {
b8eed8af 1019 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1020 return -EINVAL;
1021 }
1da177e4 1022
b8eed8af 1023 if (cpufreq_driver->target)
f6a7409c 1024 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1da177e4 1025
084f3493 1026#ifdef CONFIG_HOTPLUG_CPU
fa69e33f
DB
1027 if (!cpufreq_driver->setpolicy)
1028 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1029 data->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1030#endif
1031
2eaa3e2d 1032 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af
VK
1033 cpus = cpumask_weight(data->cpus);
1034 cpumask_clear_cpu(cpu, data->cpus);
2eaa3e2d 1035 unlock_policy_rwsem_write(cpu);
084f3493 1036
73bf0fc2
VK
1037 if (cpu != data->cpu) {
1038 sysfs_remove_link(&dev->kobj, "cpufreq");
1039 } else if (cpus > 1) {
b8eed8af
VK
1040 /* first sibling now owns the new sysfs dir */
1041 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1042 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1043 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1044 if (ret) {
1045 pr_err("%s: Failed to move kobj: %d", __func__, ret);
084f3493 1046
2eaa3e2d 1047 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1048 cpumask_set_cpu(cpu, data->cpus);
1da177e4 1049
0d1857a1 1050 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1051 per_cpu(cpufreq_cpu_data, cpu) = data;
0d1857a1 1052 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1053
499bca9b 1054 unlock_policy_rwsem_write(cpu);
1da177e4 1055
2eaa3e2d
VK
1056 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1057 "cpufreq");
b8eed8af 1058 return -EINVAL;
1da177e4 1059 }
5a01f2e8 1060
2eaa3e2d 1061 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1062 update_policy_cpu(data, cpu_dev->id);
2eaa3e2d 1063 unlock_policy_rwsem_write(cpu);
b8eed8af
VK
1064 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1065 __func__, cpu_dev->id, cpu);
1da177e4 1066 }
1da177e4 1067
b8eed8af
VK
1068 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1069 cpufreq_cpu_put(data);
1da177e4 1070
b8eed8af
VK
1071 /* If cpu is last user of policy, free policy */
1072 if (cpus == 1) {
7bd353a9
VK
1073 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1074
2eaa3e2d 1075 lock_policy_rwsem_read(cpu);
b8eed8af
VK
1076 kobj = &data->kobj;
1077 cmp = &data->kobj_unregister;
2eaa3e2d 1078 unlock_policy_rwsem_read(cpu);
b8eed8af 1079 kobject_put(kobj);
7d26e2d5 1080
b8eed8af
VK
1081 /* we need to make sure that the underlying kobj is actually
1082 * not referenced anymore by anybody before we proceed with
1083 * unloading.
1084 */
1085 pr_debug("waiting for dropping of refcount\n");
1086 wait_for_completion(cmp);
1087 pr_debug("wait complete\n");
7d26e2d5 1088
b8eed8af
VK
1089 if (cpufreq_driver->exit)
1090 cpufreq_driver->exit(data);
27ecddc2 1091
b8eed8af
VK
1092 free_cpumask_var(data->related_cpus);
1093 free_cpumask_var(data->cpus);
1094 kfree(data);
1095 } else if (cpufreq_driver->target) {
1096 __cpufreq_governor(data, CPUFREQ_GOV_START);
1097 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
27ecddc2 1098 }
1da177e4 1099
2eaa3e2d 1100 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1da177e4
LT
1101 return 0;
1102}
1103
1104
8a25a2fd 1105static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1106{
8a25a2fd 1107 unsigned int cpu = dev->id;
5a01f2e8 1108 int retval;
ec28297a
VP
1109
1110 if (cpu_is_offline(cpu))
1111 return 0;
1112
8a25a2fd 1113 retval = __cpufreq_remove_dev(dev, sif);
5a01f2e8
VP
1114 return retval;
1115}
1116
1117
65f27f38 1118static void handle_update(struct work_struct *work)
1da177e4 1119{
65f27f38
DH
1120 struct cpufreq_policy *policy =
1121 container_of(work, struct cpufreq_policy, update);
1122 unsigned int cpu = policy->cpu;
2d06d8c4 1123 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1124 cpufreq_update_policy(cpu);
1125}
1126
1127/**
1128 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1129 * @cpu: cpu number
1130 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1131 * @new_freq: CPU frequency the CPU actually runs at
1132 *
29464f28
DJ
1133 * We adjust to current frequency first, and need to clean up later.
1134 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1135 */
e08f5f5b
GS
1136static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1137 unsigned int new_freq)
1da177e4
LT
1138{
1139 struct cpufreq_freqs freqs;
1140
2d06d8c4 1141 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1142 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1143
1144 freqs.cpu = cpu;
1145 freqs.old = old_freq;
1146 freqs.new = new_freq;
1147 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1148 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1149}
1150
1151
32ee8c3e 1152/**
4ab70df4 1153 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1154 * @cpu: CPU number
1155 *
1156 * This is the last known freq, without actually getting it from the driver.
1157 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1158 */
1159unsigned int cpufreq_quick_get(unsigned int cpu)
1160{
9e21ba8b 1161 struct cpufreq_policy *policy;
e08f5f5b 1162 unsigned int ret_freq = 0;
95235ca2 1163
9e21ba8b
DB
1164 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1165 return cpufreq_driver->get(cpu);
1166
1167 policy = cpufreq_cpu_get(cpu);
95235ca2 1168 if (policy) {
e08f5f5b 1169 ret_freq = policy->cur;
95235ca2
VP
1170 cpufreq_cpu_put(policy);
1171 }
1172
4d34a67d 1173 return ret_freq;
95235ca2
VP
1174}
1175EXPORT_SYMBOL(cpufreq_quick_get);
1176
3d737108
JB
1177/**
1178 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1179 * @cpu: CPU number
1180 *
1181 * Just return the max possible frequency for a given CPU.
1182 */
1183unsigned int cpufreq_quick_get_max(unsigned int cpu)
1184{
1185 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1186 unsigned int ret_freq = 0;
1187
1188 if (policy) {
1189 ret_freq = policy->max;
1190 cpufreq_cpu_put(policy);
1191 }
1192
1193 return ret_freq;
1194}
1195EXPORT_SYMBOL(cpufreq_quick_get_max);
1196
95235ca2 1197
5a01f2e8 1198static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1199{
7a6aedfa 1200 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1201 unsigned int ret_freq = 0;
1da177e4 1202
1da177e4 1203 if (!cpufreq_driver->get)
4d34a67d 1204 return ret_freq;
1da177e4 1205
e08f5f5b 1206 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1207
e08f5f5b
GS
1208 if (ret_freq && policy->cur &&
1209 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1210 /* verify no discrepancy between actual and
1211 saved value exists */
1212 if (unlikely(ret_freq != policy->cur)) {
1213 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1214 schedule_work(&policy->update);
1215 }
1216 }
1217
4d34a67d 1218 return ret_freq;
5a01f2e8 1219}
1da177e4 1220
5a01f2e8
VP
1221/**
1222 * cpufreq_get - get the current CPU frequency (in kHz)
1223 * @cpu: CPU number
1224 *
1225 * Get the CPU current (static) CPU frequency
1226 */
1227unsigned int cpufreq_get(unsigned int cpu)
1228{
1229 unsigned int ret_freq = 0;
1230 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1231
1232 if (!policy)
1233 goto out;
1234
1235 if (unlikely(lock_policy_rwsem_read(cpu)))
1236 goto out_policy;
1237
1238 ret_freq = __cpufreq_get(cpu);
1239
1240 unlock_policy_rwsem_read(cpu);
1da177e4 1241
5a01f2e8
VP
1242out_policy:
1243 cpufreq_cpu_put(policy);
1244out:
4d34a67d 1245 return ret_freq;
1da177e4
LT
1246}
1247EXPORT_SYMBOL(cpufreq_get);
1248
8a25a2fd
KS
1249static struct subsys_interface cpufreq_interface = {
1250 .name = "cpufreq",
1251 .subsys = &cpu_subsys,
1252 .add_dev = cpufreq_add_dev,
1253 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1254};
1255
1da177e4 1256
42d4dc3f 1257/**
e00e56df
RW
1258 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1259 *
1260 * This function is only executed for the boot processor. The other CPUs
1261 * have been put offline by means of CPU hotplug.
42d4dc3f 1262 */
e00e56df 1263static int cpufreq_bp_suspend(void)
42d4dc3f 1264{
e08f5f5b 1265 int ret = 0;
4bc5d341 1266
e00e56df 1267 int cpu = smp_processor_id();
42d4dc3f
BH
1268 struct cpufreq_policy *cpu_policy;
1269
2d06d8c4 1270 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1271
e00e56df 1272 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1273 cpu_policy = cpufreq_cpu_get(cpu);
1274 if (!cpu_policy)
e00e56df 1275 return 0;
42d4dc3f
BH
1276
1277 if (cpufreq_driver->suspend) {
7ca64e2d 1278 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1279 if (ret)
42d4dc3f
BH
1280 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1281 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1282 }
1283
42d4dc3f 1284 cpufreq_cpu_put(cpu_policy);
c9060494 1285 return ret;
42d4dc3f
BH
1286}
1287
1da177e4 1288/**
e00e56df 1289 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1290 *
1291 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1292 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1293 * restored. It will verify that the current freq is in sync with
1294 * what we believe it to be. This is a bit later than when it
1295 * should be, but nonethteless it's better than calling
1296 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1297 *
1298 * This function is only executed for the boot CPU. The other CPUs have not
1299 * been turned on yet.
1da177e4 1300 */
e00e56df 1301static void cpufreq_bp_resume(void)
1da177e4 1302{
e08f5f5b 1303 int ret = 0;
4bc5d341 1304
e00e56df 1305 int cpu = smp_processor_id();
1da177e4
LT
1306 struct cpufreq_policy *cpu_policy;
1307
2d06d8c4 1308 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1309
e00e56df 1310 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1311 cpu_policy = cpufreq_cpu_get(cpu);
1312 if (!cpu_policy)
e00e56df 1313 return;
1da177e4
LT
1314
1315 if (cpufreq_driver->resume) {
1316 ret = cpufreq_driver->resume(cpu_policy);
1317 if (ret) {
1318 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1319 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1320 goto fail;
1da177e4
LT
1321 }
1322 }
1323
1da177e4 1324 schedule_work(&cpu_policy->update);
ce6c3997 1325
c9060494 1326fail:
1da177e4 1327 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1328}
1329
e00e56df
RW
1330static struct syscore_ops cpufreq_syscore_ops = {
1331 .suspend = cpufreq_bp_suspend,
1332 .resume = cpufreq_bp_resume,
1da177e4
LT
1333};
1334
9d95046e
BP
1335/**
1336 * cpufreq_get_current_driver - return current driver's name
1337 *
1338 * Return the name string of the currently loaded cpufreq driver
1339 * or NULL, if none.
1340 */
1341const char *cpufreq_get_current_driver(void)
1342{
1343 if (cpufreq_driver)
1344 return cpufreq_driver->name;
1345
1346 return NULL;
1347}
1348EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1349
1350/*********************************************************************
1351 * NOTIFIER LISTS INTERFACE *
1352 *********************************************************************/
1353
1354/**
1355 * cpufreq_register_notifier - register a driver with cpufreq
1356 * @nb: notifier function to register
1357 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1358 *
32ee8c3e 1359 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1360 * are notified about clock rate changes (once before and once after
1361 * the transition), or a list of drivers that are notified about
1362 * changes in cpufreq policy.
1363 *
1364 * This function may sleep, and has the same return conditions as
e041c683 1365 * blocking_notifier_chain_register.
1da177e4
LT
1366 */
1367int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1368{
1369 int ret;
1370
d5aaffa9
DB
1371 if (cpufreq_disabled())
1372 return -EINVAL;
1373
74212ca4
CEB
1374 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1375
1da177e4
LT
1376 switch (list) {
1377 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1378 ret = srcu_notifier_chain_register(
e041c683 1379 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1380 break;
1381 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1382 ret = blocking_notifier_chain_register(
1383 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1384 break;
1385 default:
1386 ret = -EINVAL;
1387 }
1da177e4
LT
1388
1389 return ret;
1390}
1391EXPORT_SYMBOL(cpufreq_register_notifier);
1392
1393
1394/**
1395 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1396 * @nb: notifier block to be unregistered
1397 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1398 *
1399 * Remove a driver from the CPU frequency notifier list.
1400 *
1401 * This function may sleep, and has the same return conditions as
e041c683 1402 * blocking_notifier_chain_unregister.
1da177e4
LT
1403 */
1404int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1405{
1406 int ret;
1407
d5aaffa9
DB
1408 if (cpufreq_disabled())
1409 return -EINVAL;
1410
1da177e4
LT
1411 switch (list) {
1412 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1413 ret = srcu_notifier_chain_unregister(
e041c683 1414 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1415 break;
1416 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1417 ret = blocking_notifier_chain_unregister(
1418 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1419 break;
1420 default:
1421 ret = -EINVAL;
1422 }
1da177e4
LT
1423
1424 return ret;
1425}
1426EXPORT_SYMBOL(cpufreq_unregister_notifier);
1427
1428
1429/*********************************************************************
1430 * GOVERNORS *
1431 *********************************************************************/
1432
1433
1434int __cpufreq_driver_target(struct cpufreq_policy *policy,
1435 unsigned int target_freq,
1436 unsigned int relation)
1437{
1438 int retval = -EINVAL;
7249924e 1439 unsigned int old_target_freq = target_freq;
c32b6b8e 1440
a7b422cd
KRW
1441 if (cpufreq_disabled())
1442 return -ENODEV;
1443
7249924e
VK
1444 /* Make sure that target_freq is within supported range */
1445 if (target_freq > policy->max)
1446 target_freq = policy->max;
1447 if (target_freq < policy->min)
1448 target_freq = policy->min;
1449
1450 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1451 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1452
1453 if (target_freq == policy->cur)
1454 return 0;
1455
3361b7b1 1456 if (cpufreq_driver->target)
1da177e4 1457 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1458
1da177e4
LT
1459 return retval;
1460}
1461EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1462
1da177e4
LT
1463int cpufreq_driver_target(struct cpufreq_policy *policy,
1464 unsigned int target_freq,
1465 unsigned int relation)
1466{
f1829e4a 1467 int ret = -EINVAL;
1da177e4
LT
1468
1469 policy = cpufreq_cpu_get(policy->cpu);
1470 if (!policy)
f1829e4a 1471 goto no_policy;
1da177e4 1472
5a01f2e8 1473 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1474 goto fail;
1da177e4
LT
1475
1476 ret = __cpufreq_driver_target(policy, target_freq, relation);
1477
5a01f2e8 1478 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1479
f1829e4a 1480fail:
1da177e4 1481 cpufreq_cpu_put(policy);
f1829e4a 1482no_policy:
1da177e4
LT
1483 return ret;
1484}
1485EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1486
bf0b90e3 1487int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1488{
1489 int ret = 0;
1490
d5aaffa9
DB
1491 if (cpufreq_disabled())
1492 return ret;
1493
3361b7b1 1494 if (!cpufreq_driver->getavg)
0676f7f2
VK
1495 return 0;
1496
dfde5d62
VP
1497 policy = cpufreq_cpu_get(policy->cpu);
1498 if (!policy)
1499 return -EINVAL;
1500
0676f7f2 1501 ret = cpufreq_driver->getavg(policy, cpu);
dfde5d62 1502
dfde5d62
VP
1503 cpufreq_cpu_put(policy);
1504 return ret;
1505}
5a01f2e8 1506EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1507
153d7f3f 1508/*
153d7f3f
AV
1509 * when "event" is CPUFREQ_GOV_LIMITS
1510 */
1da177e4 1511
e08f5f5b
GS
1512static int __cpufreq_governor(struct cpufreq_policy *policy,
1513 unsigned int event)
1da177e4 1514{
cc993cab 1515 int ret;
6afde10c
TR
1516
1517 /* Only must be defined when default governor is known to have latency
1518 restrictions, like e.g. conservative or ondemand.
1519 That this is the case is already ensured in Kconfig
1520 */
1521#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1522 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1523#else
1524 struct cpufreq_governor *gov = NULL;
1525#endif
1c256245
TR
1526
1527 if (policy->governor->max_transition_latency &&
1528 policy->cpuinfo.transition_latency >
1529 policy->governor->max_transition_latency) {
6afde10c
TR
1530 if (!gov)
1531 return -EINVAL;
1532 else {
1533 printk(KERN_WARNING "%s governor failed, too long"
1534 " transition latency of HW, fallback"
1535 " to %s governor\n",
1536 policy->governor->name,
1537 gov->name);
1538 policy->governor = gov;
1539 }
1c256245 1540 }
1da177e4
LT
1541
1542 if (!try_module_get(policy->governor->owner))
1543 return -EINVAL;
1544
2d06d8c4 1545 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1546 policy->cpu, event);
1da177e4
LT
1547 ret = policy->governor->governor(policy, event);
1548
8e53695f
VK
1549 if (event == CPUFREQ_GOV_START)
1550 policy->governor->initialized++;
1551 else if (event == CPUFREQ_GOV_STOP)
1552 policy->governor->initialized--;
b394058f 1553
e08f5f5b
GS
1554 /* we keep one module reference alive for
1555 each CPU governed by this CPU */
1da177e4
LT
1556 if ((event != CPUFREQ_GOV_START) || ret)
1557 module_put(policy->governor->owner);
1558 if ((event == CPUFREQ_GOV_STOP) && !ret)
1559 module_put(policy->governor->owner);
1560
1561 return ret;
1562}
1563
1564
1da177e4
LT
1565int cpufreq_register_governor(struct cpufreq_governor *governor)
1566{
3bcb09a3 1567 int err;
1da177e4
LT
1568
1569 if (!governor)
1570 return -EINVAL;
1571
a7b422cd
KRW
1572 if (cpufreq_disabled())
1573 return -ENODEV;
1574
3fc54d37 1575 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1576
b394058f 1577 governor->initialized = 0;
3bcb09a3
JF
1578 err = -EBUSY;
1579 if (__find_governor(governor->name) == NULL) {
1580 err = 0;
1581 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1582 }
1da177e4 1583
32ee8c3e 1584 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1585 return err;
1da177e4
LT
1586}
1587EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1588
1589
1590void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1591{
90e41bac
PB
1592#ifdef CONFIG_HOTPLUG_CPU
1593 int cpu;
1594#endif
1595
1da177e4
LT
1596 if (!governor)
1597 return;
1598
a7b422cd
KRW
1599 if (cpufreq_disabled())
1600 return;
1601
90e41bac
PB
1602#ifdef CONFIG_HOTPLUG_CPU
1603 for_each_present_cpu(cpu) {
1604 if (cpu_online(cpu))
1605 continue;
1606 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1607 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1608 }
1609#endif
1610
3fc54d37 1611 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1612 list_del(&governor->governor_list);
3fc54d37 1613 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1614 return;
1615}
1616EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1617
1618
1619
1620/*********************************************************************
1621 * POLICY INTERFACE *
1622 *********************************************************************/
1623
1624/**
1625 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1626 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1627 * is written
1da177e4
LT
1628 *
1629 * Reads the current cpufreq policy.
1630 */
1631int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1632{
1633 struct cpufreq_policy *cpu_policy;
1634 if (!policy)
1635 return -EINVAL;
1636
1637 cpu_policy = cpufreq_cpu_get(cpu);
1638 if (!cpu_policy)
1639 return -EINVAL;
1640
1da177e4 1641 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1642
1643 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1644 return 0;
1645}
1646EXPORT_SYMBOL(cpufreq_get_policy);
1647
1648
153d7f3f 1649/*
e08f5f5b
GS
1650 * data : current policy.
1651 * policy : policy to be set.
153d7f3f 1652 */
e08f5f5b
GS
1653static int __cpufreq_set_policy(struct cpufreq_policy *data,
1654 struct cpufreq_policy *policy)
1da177e4 1655{
7bd353a9 1656 int ret = 0, failed = 1;
1da177e4 1657
2d06d8c4 1658 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1659 policy->min, policy->max);
1660
e08f5f5b
GS
1661 memcpy(&policy->cpuinfo, &data->cpuinfo,
1662 sizeof(struct cpufreq_cpuinfo));
1da177e4 1663
53391fa2 1664 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1665 ret = -EINVAL;
1666 goto error_out;
1667 }
1668
1da177e4
LT
1669 /* verify the cpu speed can be set within this limit */
1670 ret = cpufreq_driver->verify(policy);
1671 if (ret)
1672 goto error_out;
1673
1da177e4 1674 /* adjust if necessary - all reasons */
e041c683
AS
1675 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1676 CPUFREQ_ADJUST, policy);
1da177e4
LT
1677
1678 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1679 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1680 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1681
1682 /* verify the cpu speed can be set within this limit,
1683 which might be different to the first one */
1684 ret = cpufreq_driver->verify(policy);
e041c683 1685 if (ret)
1da177e4 1686 goto error_out;
1da177e4
LT
1687
1688 /* notification of the new policy */
e041c683
AS
1689 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1690 CPUFREQ_NOTIFY, policy);
1da177e4 1691
7d5e350f
DJ
1692 data->min = policy->min;
1693 data->max = policy->max;
1da177e4 1694
2d06d8c4 1695 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1696 data->min, data->max);
1da177e4
LT
1697
1698 if (cpufreq_driver->setpolicy) {
1699 data->policy = policy->policy;
2d06d8c4 1700 pr_debug("setting range\n");
1da177e4
LT
1701 ret = cpufreq_driver->setpolicy(policy);
1702 } else {
1703 if (policy->governor != data->governor) {
1704 /* save old, working values */
1705 struct cpufreq_governor *old_gov = data->governor;
1706
2d06d8c4 1707 pr_debug("governor switch\n");
1da177e4
LT
1708
1709 /* end old governor */
7bd353a9 1710 if (data->governor) {
1da177e4 1711 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
7bd353a9
VK
1712 __cpufreq_governor(data,
1713 CPUFREQ_GOV_POLICY_EXIT);
1714 }
1da177e4
LT
1715
1716 /* start new governor */
1717 data->governor = policy->governor;
7bd353a9
VK
1718 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1719 if (!__cpufreq_governor(data, CPUFREQ_GOV_START))
1720 failed = 0;
1721 else
1722 __cpufreq_governor(data,
1723 CPUFREQ_GOV_POLICY_EXIT);
1724 }
1725
1726 if (failed) {
1da177e4 1727 /* new governor failed, so re-start old one */
2d06d8c4 1728 pr_debug("starting governor %s failed\n",
e08f5f5b 1729 data->governor->name);
1da177e4
LT
1730 if (old_gov) {
1731 data->governor = old_gov;
7bd353a9
VK
1732 __cpufreq_governor(data,
1733 CPUFREQ_GOV_POLICY_INIT);
e08f5f5b
GS
1734 __cpufreq_governor(data,
1735 CPUFREQ_GOV_START);
1da177e4
LT
1736 }
1737 ret = -EINVAL;
1738 goto error_out;
1739 }
1740 /* might be a policy change, too, so fall through */
1741 }
2d06d8c4 1742 pr_debug("governor: change or update limits\n");
1da177e4
LT
1743 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1744 }
1745
7d5e350f 1746error_out:
1da177e4
LT
1747 return ret;
1748}
1749
1da177e4
LT
1750/**
1751 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1752 * @cpu: CPU which shall be re-evaluated
1753 *
25985edc 1754 * Useful for policy notifiers which have different necessities
1da177e4
LT
1755 * at different times.
1756 */
1757int cpufreq_update_policy(unsigned int cpu)
1758{
1759 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1760 struct cpufreq_policy policy;
f1829e4a 1761 int ret;
1da177e4 1762
f1829e4a
JL
1763 if (!data) {
1764 ret = -ENODEV;
1765 goto no_policy;
1766 }
1da177e4 1767
f1829e4a
JL
1768 if (unlikely(lock_policy_rwsem_write(cpu))) {
1769 ret = -EINVAL;
1770 goto fail;
1771 }
1da177e4 1772
2d06d8c4 1773 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1774 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1775 policy.min = data->user_policy.min;
1776 policy.max = data->user_policy.max;
1777 policy.policy = data->user_policy.policy;
1778 policy.governor = data->user_policy.governor;
1779
0961dd0d
TR
1780 /* BIOS might change freq behind our back
1781 -> ask driver for current freq and notify governors about a change */
1782 if (cpufreq_driver->get) {
1783 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3 1784 if (!data->cur) {
2d06d8c4 1785 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1786 data->cur = policy.cur;
1787 } else {
f6b0515b 1788 if (data->cur != policy.cur && cpufreq_driver->target)
e08f5f5b
GS
1789 cpufreq_out_of_sync(cpu, data->cur,
1790 policy.cur);
a85f7bd3 1791 }
0961dd0d
TR
1792 }
1793
1da177e4
LT
1794 ret = __cpufreq_set_policy(data, &policy);
1795
5a01f2e8
VP
1796 unlock_policy_rwsem_write(cpu);
1797
f1829e4a 1798fail:
1da177e4 1799 cpufreq_cpu_put(data);
f1829e4a 1800no_policy:
1da177e4
LT
1801 return ret;
1802}
1803EXPORT_SYMBOL(cpufreq_update_policy);
1804
dd184a01 1805static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1806 unsigned long action, void *hcpu)
1807{
1808 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1809 struct device *dev;
c32b6b8e 1810
8a25a2fd
KS
1811 dev = get_cpu_device(cpu);
1812 if (dev) {
c32b6b8e
AR
1813 switch (action) {
1814 case CPU_ONLINE:
8bb78442 1815 case CPU_ONLINE_FROZEN:
8a25a2fd 1816 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1817 break;
1818 case CPU_DOWN_PREPARE:
8bb78442 1819 case CPU_DOWN_PREPARE_FROZEN:
8a25a2fd 1820 __cpufreq_remove_dev(dev, NULL);
c32b6b8e 1821 break;
5a01f2e8 1822 case CPU_DOWN_FAILED:
8bb78442 1823 case CPU_DOWN_FAILED_FROZEN:
8a25a2fd 1824 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1825 break;
1826 }
1827 }
1828 return NOTIFY_OK;
1829}
1830
9c36f746 1831static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1832 .notifier_call = cpufreq_cpu_callback,
1833};
1da177e4
LT
1834
1835/*********************************************************************
1836 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1837 *********************************************************************/
1838
1839/**
1840 * cpufreq_register_driver - register a CPU Frequency driver
1841 * @driver_data: A struct cpufreq_driver containing the values#
1842 * submitted by the CPU Frequency driver.
1843 *
32ee8c3e 1844 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1845 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1846 * (and isn't unregistered in the meantime).
1da177e4
LT
1847 *
1848 */
221dee28 1849int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1850{
1851 unsigned long flags;
1852 int ret;
1853
a7b422cd
KRW
1854 if (cpufreq_disabled())
1855 return -ENODEV;
1856
1da177e4
LT
1857 if (!driver_data || !driver_data->verify || !driver_data->init ||
1858 ((!driver_data->setpolicy) && (!driver_data->target)))
1859 return -EINVAL;
1860
2d06d8c4 1861 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
1862
1863 if (driver_data->setpolicy)
1864 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1865
0d1857a1 1866 write_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 1867 if (cpufreq_driver) {
0d1857a1 1868 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1869 return -EBUSY;
1870 }
1871 cpufreq_driver = driver_data;
0d1857a1 1872 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1873
8a25a2fd 1874 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
1875 if (ret)
1876 goto err_null_driver;
1da177e4 1877
8f5bc2ab 1878 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
1879 int i;
1880 ret = -ENODEV;
1881
1882 /* check for at least one working CPU */
7a6aedfa
MT
1883 for (i = 0; i < nr_cpu_ids; i++)
1884 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1885 ret = 0;
7a6aedfa
MT
1886 break;
1887 }
1da177e4
LT
1888
1889 /* if all ->init() calls failed, unregister */
1890 if (ret) {
2d06d8c4 1891 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 1892 driver_data->name);
8a25a2fd 1893 goto err_if_unreg;
1da177e4
LT
1894 }
1895 }
1896
8f5bc2ab 1897 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 1898 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 1899
8f5bc2ab 1900 return 0;
8a25a2fd
KS
1901err_if_unreg:
1902 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 1903err_null_driver:
0d1857a1 1904 write_lock_irqsave(&cpufreq_driver_lock, flags);
8f5bc2ab 1905 cpufreq_driver = NULL;
0d1857a1 1906 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 1907 return ret;
1da177e4
LT
1908}
1909EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1910
1911
1912/**
1913 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1914 *
32ee8c3e 1915 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
1916 * the right to do so, i.e. if you have succeeded in initialising before!
1917 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1918 * currently not initialised.
1919 */
221dee28 1920int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
1921{
1922 unsigned long flags;
1923
2d06d8c4 1924 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 1925 return -EINVAL;
1da177e4 1926
2d06d8c4 1927 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 1928
8a25a2fd 1929 subsys_interface_unregister(&cpufreq_interface);
65edc68c 1930 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 1931
0d1857a1 1932 write_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 1933 cpufreq_driver = NULL;
0d1857a1 1934 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1935
1936 return 0;
1937}
1938EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
1939
1940static int __init cpufreq_core_init(void)
1941{
1942 int cpu;
1943
a7b422cd
KRW
1944 if (cpufreq_disabled())
1945 return -ENODEV;
1946
5a01f2e8 1947 for_each_possible_cpu(cpu) {
f1625066 1948 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
1949 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1950 }
8aa84ad8 1951
8a25a2fd 1952 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
8aa84ad8 1953 BUG_ON(!cpufreq_global_kobject);
e00e56df 1954 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 1955
5a01f2e8
VP
1956 return 0;
1957}
5a01f2e8 1958core_initcall(cpufreq_core_init);