[CPUFREQ] make internal cpufreq_add_dev_* static
[linux-2.6-block.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
1da177e4
LT
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/notifier.h>
22#include <linux/cpufreq.h>
23#include <linux/delay.h>
24#include <linux/interrupt.h>
25#include <linux/spinlock.h>
26#include <linux/device.h>
27#include <linux/slab.h>
28#include <linux/cpu.h>
29#include <linux/completion.h>
3fc54d37 30#include <linux/mutex.h>
1da177e4 31
e08f5f5b
GS
32#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
33 "cpufreq-core", msg)
1da177e4
LT
34
35/**
cd878479 36 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
37 * level driver of CPUFreq support, and its spinlock. This lock
38 * also protects the cpufreq_cpu_data array.
39 */
7d5e350f 40static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 41static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
42#ifdef CONFIG_HOTPLUG_CPU
43/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 44static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 45#endif
1da177e4
LT
46static DEFINE_SPINLOCK(cpufreq_driver_lock);
47
5a01f2e8
VP
48/*
49 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
50 * all cpufreq/hotplug/workqueue/etc related lock issues.
51 *
52 * The rules for this semaphore:
53 * - Any routine that wants to read from the policy structure will
54 * do a down_read on this semaphore.
55 * - Any routine that will write to the policy structure and/or may take away
56 * the policy altogether (eg. CPU hotplug), will hold this lock in write
57 * mode before doing so.
58 *
59 * Additional rules:
60 * - All holders of the lock should check to make sure that the CPU they
61 * are concerned with are online after they get the lock.
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8
VP
66 */
67static DEFINE_PER_CPU(int, policy_cpu);
68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69
70#define lock_policy_rwsem(mode, cpu) \
71int lock_policy_rwsem_##mode \
72(int cpu) \
73{ \
74 int policy_cpu = per_cpu(policy_cpu, cpu); \
75 BUG_ON(policy_cpu == -1); \
76 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
77 if (unlikely(!cpu_online(cpu))) { \
78 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
79 return -1; \
80 } \
81 \
82 return 0; \
83}
84
85lock_policy_rwsem(read, cpu);
86EXPORT_SYMBOL_GPL(lock_policy_rwsem_read);
87
88lock_policy_rwsem(write, cpu);
89EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
90
91void unlock_policy_rwsem_read(int cpu)
92{
93 int policy_cpu = per_cpu(policy_cpu, cpu);
94 BUG_ON(policy_cpu == -1);
95 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
96}
97EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
98
99void unlock_policy_rwsem_write(int cpu)
100{
101 int policy_cpu = per_cpu(policy_cpu, cpu);
102 BUG_ON(policy_cpu == -1);
103 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
104}
105EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
106
107
1da177e4 108/* internal prototypes */
29464f28
DJ
109static int __cpufreq_governor(struct cpufreq_policy *policy,
110 unsigned int event);
5a01f2e8 111static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 112static void handle_update(struct work_struct *work);
1da177e4
LT
113
114/**
32ee8c3e
DJ
115 * Two notifier lists: the "policy" list is involved in the
116 * validation process for a new CPU frequency policy; the
1da177e4
LT
117 * "transition" list for kernel code that needs to handle
118 * changes to devices when the CPU clock speed changes.
119 * The mutex locks both lists.
120 */
e041c683 121static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 122static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 123
74212ca4 124static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
125static int __init init_cpufreq_transition_notifier_list(void)
126{
127 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 128 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
129 return 0;
130}
b3438f82 131pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4
LT
132
133static LIST_HEAD(cpufreq_governor_list);
29464f28 134static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 135
7d5e350f 136struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4
LT
137{
138 struct cpufreq_policy *data;
139 unsigned long flags;
140
7a6aedfa 141 if (cpu >= nr_cpu_ids)
1da177e4
LT
142 goto err_out;
143
144 /* get the cpufreq driver */
145 spin_lock_irqsave(&cpufreq_driver_lock, flags);
146
147 if (!cpufreq_driver)
148 goto err_out_unlock;
149
150 if (!try_module_get(cpufreq_driver->owner))
151 goto err_out_unlock;
152
153
154 /* get the CPU */
7a6aedfa 155 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
156
157 if (!data)
158 goto err_out_put_module;
159
160 if (!kobject_get(&data->kobj))
161 goto err_out_put_module;
162
1da177e4 163 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
164 return data;
165
7d5e350f 166err_out_put_module:
1da177e4 167 module_put(cpufreq_driver->owner);
7d5e350f 168err_out_unlock:
1da177e4 169 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 170err_out:
1da177e4
LT
171 return NULL;
172}
173EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
174
7d5e350f 175
1da177e4
LT
176void cpufreq_cpu_put(struct cpufreq_policy *data)
177{
178 kobject_put(&data->kobj);
179 module_put(cpufreq_driver->owner);
180}
181EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
182
183
184/*********************************************************************
185 * UNIFIED DEBUG HELPERS *
186 *********************************************************************/
187#ifdef CONFIG_CPU_FREQ_DEBUG
188
189/* what part(s) of the CPUfreq subsystem are debugged? */
190static unsigned int debug;
191
192/* is the debug output ratelimit'ed using printk_ratelimit? User can
193 * set or modify this value.
194 */
195static unsigned int debug_ratelimit = 1;
196
197/* is the printk_ratelimit'ing enabled? It's enabled after a successful
198 * loading of a cpufreq driver, temporarily disabled when a new policy
199 * is set, and disabled upon cpufreq driver removal
200 */
201static unsigned int disable_ratelimit = 1;
202static DEFINE_SPINLOCK(disable_ratelimit_lock);
203
858119e1 204static void cpufreq_debug_enable_ratelimit(void)
1da177e4
LT
205{
206 unsigned long flags;
207
208 spin_lock_irqsave(&disable_ratelimit_lock, flags);
209 if (disable_ratelimit)
210 disable_ratelimit--;
211 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
212}
213
858119e1 214static void cpufreq_debug_disable_ratelimit(void)
1da177e4
LT
215{
216 unsigned long flags;
217
218 spin_lock_irqsave(&disable_ratelimit_lock, flags);
219 disable_ratelimit++;
220 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
221}
222
e08f5f5b 223void cpufreq_debug_printk(unsigned int type, const char *prefix,
905d77cd 224 const char *fmt, ...)
1da177e4
LT
225{
226 char s[256];
227 va_list args;
228 unsigned int len;
229 unsigned long flags;
32ee8c3e 230
1da177e4
LT
231 WARN_ON(!prefix);
232 if (type & debug) {
233 spin_lock_irqsave(&disable_ratelimit_lock, flags);
e08f5f5b
GS
234 if (!disable_ratelimit && debug_ratelimit
235 && !printk_ratelimit()) {
1da177e4
LT
236 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
237 return;
238 }
239 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
240
241 len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
242
243 va_start(args, fmt);
244 len += vsnprintf(&s[len], (256 - len), fmt, args);
245 va_end(args);
246
247 printk(s);
248
249 WARN_ON(len < 5);
250 }
251}
252EXPORT_SYMBOL(cpufreq_debug_printk);
253
254
255module_param(debug, uint, 0644);
e08f5f5b
GS
256MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
257 " 2 to debug drivers, and 4 to debug governors.");
1da177e4
LT
258
259module_param(debug_ratelimit, uint, 0644);
e08f5f5b
GS
260MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
261 " set to 0 to disable ratelimiting.");
1da177e4
LT
262
263#else /* !CONFIG_CPU_FREQ_DEBUG */
264
265static inline void cpufreq_debug_enable_ratelimit(void) { return; }
266static inline void cpufreq_debug_disable_ratelimit(void) { return; }
267
268#endif /* CONFIG_CPU_FREQ_DEBUG */
269
270
271/*********************************************************************
272 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
273 *********************************************************************/
274
275/**
276 * adjust_jiffies - adjust the system "loops_per_jiffy"
277 *
278 * This function alters the system "loops_per_jiffy" for the clock
279 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 280 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
281 * per-CPU loops_per_jiffy value wherever possible.
282 */
283#ifndef CONFIG_SMP
284static unsigned long l_p_j_ref;
285static unsigned int l_p_j_ref_freq;
286
858119e1 287static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
288{
289 if (ci->flags & CPUFREQ_CONST_LOOPS)
290 return;
291
292 if (!l_p_j_ref_freq) {
293 l_p_j_ref = loops_per_jiffy;
294 l_p_j_ref_freq = ci->old;
a4a9df58 295 dprintk("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 296 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4
LT
297 }
298 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
299 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
42d4dc3f 300 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
301 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
302 ci->new);
a4a9df58 303 dprintk("scaling loops_per_jiffy to %lu "
e08f5f5b 304 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
305 }
306}
307#else
e08f5f5b
GS
308static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
309{
310 return;
311}
1da177e4
LT
312#endif
313
314
315/**
e4472cb3
DJ
316 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
317 * on frequency transition.
1da177e4 318 *
e4472cb3
DJ
319 * This function calls the transition notifiers and the "adjust_jiffies"
320 * function. It is called twice on all CPU frequency changes that have
32ee8c3e 321 * external effects.
1da177e4
LT
322 */
323void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
324{
e4472cb3
DJ
325 struct cpufreq_policy *policy;
326
1da177e4
LT
327 BUG_ON(irqs_disabled());
328
329 freqs->flags = cpufreq_driver->flags;
e4472cb3
DJ
330 dprintk("notification %u of frequency transition to %u kHz\n",
331 state, freqs->new);
1da177e4 332
7a6aedfa 333 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
1da177e4 334 switch (state) {
e4472cb3 335
1da177e4 336 case CPUFREQ_PRECHANGE:
32ee8c3e 337 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
338 * which is not equal to what the cpufreq core thinks is
339 * "old frequency".
1da177e4
LT
340 */
341 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
342 if ((policy) && (policy->cpu == freqs->cpu) &&
343 (policy->cur) && (policy->cur != freqs->old)) {
b10eec22 344 dprintk("Warning: CPU frequency is"
e4472cb3
DJ
345 " %u, cpufreq assumed %u kHz.\n",
346 freqs->old, policy->cur);
347 freqs->old = policy->cur;
1da177e4
LT
348 }
349 }
b4dfdbb3 350 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 351 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
352 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
353 break;
e4472cb3 354
1da177e4
LT
355 case CPUFREQ_POSTCHANGE:
356 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
b4dfdbb3 357 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 358 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
359 if (likely(policy) && likely(policy->cpu == freqs->cpu))
360 policy->cur = freqs->new;
1da177e4
LT
361 break;
362 }
1da177e4
LT
363}
364EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
365
366
367
368/*********************************************************************
369 * SYSFS INTERFACE *
370 *********************************************************************/
371
3bcb09a3
JF
372static struct cpufreq_governor *__find_governor(const char *str_governor)
373{
374 struct cpufreq_governor *t;
375
376 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 377 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
378 return t;
379
380 return NULL;
381}
382
1da177e4
LT
383/**
384 * cpufreq_parse_governor - parse a governor string
385 */
905d77cd 386static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
387 struct cpufreq_governor **governor)
388{
3bcb09a3
JF
389 int err = -EINVAL;
390
1da177e4 391 if (!cpufreq_driver)
3bcb09a3
JF
392 goto out;
393
1da177e4
LT
394 if (cpufreq_driver->setpolicy) {
395 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
396 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 397 err = 0;
e08f5f5b
GS
398 } else if (!strnicmp(str_governor, "powersave",
399 CPUFREQ_NAME_LEN)) {
1da177e4 400 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 401 err = 0;
1da177e4 402 }
3bcb09a3 403 } else if (cpufreq_driver->target) {
1da177e4 404 struct cpufreq_governor *t;
3bcb09a3 405
3fc54d37 406 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
407
408 t = __find_governor(str_governor);
409
ea714970 410 if (t == NULL) {
e08f5f5b
GS
411 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
412 str_governor);
ea714970
JF
413
414 if (name) {
415 int ret;
416
417 mutex_unlock(&cpufreq_governor_mutex);
326f6a5c 418 ret = request_module("%s", name);
ea714970
JF
419 mutex_lock(&cpufreq_governor_mutex);
420
421 if (ret == 0)
422 t = __find_governor(str_governor);
423 }
424
425 kfree(name);
426 }
427
3bcb09a3
JF
428 if (t != NULL) {
429 *governor = t;
430 err = 0;
1da177e4 431 }
3bcb09a3 432
3fc54d37 433 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 434 }
29464f28 435out:
3bcb09a3 436 return err;
1da177e4 437}
1da177e4
LT
438
439
1da177e4 440/**
e08f5f5b
GS
441 * cpufreq_per_cpu_attr_read() / show_##file_name() -
442 * print out cpufreq information
1da177e4
LT
443 *
444 * Write out information from cpufreq_driver->policy[cpu]; object must be
445 * "unsigned int".
446 */
447
32ee8c3e
DJ
448#define show_one(file_name, object) \
449static ssize_t show_##file_name \
905d77cd 450(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 451{ \
29464f28 452 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
453}
454
455show_one(cpuinfo_min_freq, cpuinfo.min_freq);
456show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 457show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
458show_one(scaling_min_freq, min);
459show_one(scaling_max_freq, max);
460show_one(scaling_cur_freq, cur);
461
e08f5f5b
GS
462static int __cpufreq_set_policy(struct cpufreq_policy *data,
463 struct cpufreq_policy *policy);
7970e08b 464
1da177e4
LT
465/**
466 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
467 */
468#define store_one(file_name, object) \
469static ssize_t store_##file_name \
905d77cd 470(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4
LT
471{ \
472 unsigned int ret = -EINVAL; \
473 struct cpufreq_policy new_policy; \
474 \
475 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
476 if (ret) \
477 return -EINVAL; \
478 \
29464f28 479 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
480 if (ret != 1) \
481 return -EINVAL; \
482 \
7970e08b
TR
483 ret = __cpufreq_set_policy(policy, &new_policy); \
484 policy->user_policy.object = policy->object; \
1da177e4
LT
485 \
486 return ret ? ret : count; \
487}
488
29464f28
DJ
489store_one(scaling_min_freq, min);
490store_one(scaling_max_freq, max);
1da177e4
LT
491
492/**
493 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
494 */
905d77cd
DJ
495static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
496 char *buf)
1da177e4 497{
5a01f2e8 498 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
499 if (!cur_freq)
500 return sprintf(buf, "<unknown>");
501 return sprintf(buf, "%u\n", cur_freq);
502}
503
504
505/**
506 * show_scaling_governor - show the current policy for the specified CPU
507 */
905d77cd 508static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 509{
29464f28 510 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
511 return sprintf(buf, "powersave\n");
512 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
513 return sprintf(buf, "performance\n");
514 else if (policy->governor)
29464f28
DJ
515 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
516 policy->governor->name);
1da177e4
LT
517 return -EINVAL;
518}
519
520
521/**
522 * store_scaling_governor - store policy for the specified CPU
523 */
905d77cd
DJ
524static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
525 const char *buf, size_t count)
1da177e4
LT
526{
527 unsigned int ret = -EINVAL;
528 char str_governor[16];
529 struct cpufreq_policy new_policy;
530
531 ret = cpufreq_get_policy(&new_policy, policy->cpu);
532 if (ret)
533 return ret;
534
29464f28 535 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
536 if (ret != 1)
537 return -EINVAL;
538
e08f5f5b
GS
539 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
540 &new_policy.governor))
1da177e4
LT
541 return -EINVAL;
542
7970e08b
TR
543 /* Do not use cpufreq_set_policy here or the user_policy.max
544 will be wrongly overridden */
7970e08b
TR
545 ret = __cpufreq_set_policy(policy, &new_policy);
546
547 policy->user_policy.policy = policy->policy;
548 policy->user_policy.governor = policy->governor;
7970e08b 549
e08f5f5b
GS
550 if (ret)
551 return ret;
552 else
553 return count;
1da177e4
LT
554}
555
556/**
557 * show_scaling_driver - show the cpufreq driver currently loaded
558 */
905d77cd 559static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4
LT
560{
561 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
562}
563
564/**
565 * show_scaling_available_governors - show the available CPUfreq governors
566 */
905d77cd
DJ
567static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
568 char *buf)
1da177e4
LT
569{
570 ssize_t i = 0;
571 struct cpufreq_governor *t;
572
573 if (!cpufreq_driver->target) {
574 i += sprintf(buf, "performance powersave");
575 goto out;
576 }
577
578 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
579 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
580 - (CPUFREQ_NAME_LEN + 2)))
1da177e4
LT
581 goto out;
582 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
583 }
7d5e350f 584out:
1da177e4
LT
585 i += sprintf(&buf[i], "\n");
586 return i;
587}
e8628dd0 588
835481d9 589static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
590{
591 ssize_t i = 0;
592 unsigned int cpu;
593
835481d9 594 for_each_cpu(cpu, mask) {
1da177e4
LT
595 if (i)
596 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
597 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
598 if (i >= (PAGE_SIZE - 5))
29464f28 599 break;
1da177e4
LT
600 }
601 i += sprintf(&buf[i], "\n");
602 return i;
603}
604
e8628dd0
DW
605/**
606 * show_related_cpus - show the CPUs affected by each transition even if
607 * hw coordination is in use
608 */
609static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
610{
835481d9 611 if (cpumask_empty(policy->related_cpus))
e8628dd0
DW
612 return show_cpus(policy->cpus, buf);
613 return show_cpus(policy->related_cpus, buf);
614}
615
616/**
617 * show_affected_cpus - show the CPUs affected by each transition
618 */
619static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
620{
621 return show_cpus(policy->cpus, buf);
622}
623
9e76988e 624static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 625 const char *buf, size_t count)
9e76988e
VP
626{
627 unsigned int freq = 0;
628 unsigned int ret;
629
879000f9 630 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
631 return -EINVAL;
632
633 ret = sscanf(buf, "%u", &freq);
634 if (ret != 1)
635 return -EINVAL;
636
637 policy->governor->store_setspeed(policy, freq);
638
639 return count;
640}
641
642static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
643{
879000f9 644 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
645 return sprintf(buf, "<unsupported>\n");
646
647 return policy->governor->show_setspeed(policy, buf);
648}
1da177e4
LT
649
650#define define_one_ro(_name) \
651static struct freq_attr _name = \
652__ATTR(_name, 0444, show_##_name, NULL)
653
654#define define_one_ro0400(_name) \
655static struct freq_attr _name = \
656__ATTR(_name, 0400, show_##_name, NULL)
657
658#define define_one_rw(_name) \
659static struct freq_attr _name = \
660__ATTR(_name, 0644, show_##_name, store_##_name)
661
662define_one_ro0400(cpuinfo_cur_freq);
663define_one_ro(cpuinfo_min_freq);
664define_one_ro(cpuinfo_max_freq);
ed129784 665define_one_ro(cpuinfo_transition_latency);
1da177e4
LT
666define_one_ro(scaling_available_governors);
667define_one_ro(scaling_driver);
668define_one_ro(scaling_cur_freq);
e8628dd0 669define_one_ro(related_cpus);
1da177e4
LT
670define_one_ro(affected_cpus);
671define_one_rw(scaling_min_freq);
672define_one_rw(scaling_max_freq);
673define_one_rw(scaling_governor);
9e76988e 674define_one_rw(scaling_setspeed);
1da177e4 675
905d77cd 676static struct attribute *default_attrs[] = {
1da177e4
LT
677 &cpuinfo_min_freq.attr,
678 &cpuinfo_max_freq.attr,
ed129784 679 &cpuinfo_transition_latency.attr,
1da177e4
LT
680 &scaling_min_freq.attr,
681 &scaling_max_freq.attr,
682 &affected_cpus.attr,
e8628dd0 683 &related_cpus.attr,
1da177e4
LT
684 &scaling_governor.attr,
685 &scaling_driver.attr,
686 &scaling_available_governors.attr,
9e76988e 687 &scaling_setspeed.attr,
1da177e4
LT
688 NULL
689};
690
8aa84ad8
TR
691struct kobject *cpufreq_global_kobject;
692EXPORT_SYMBOL(cpufreq_global_kobject);
693
29464f28
DJ
694#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
695#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 696
29464f28 697static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 698{
905d77cd
DJ
699 struct cpufreq_policy *policy = to_policy(kobj);
700 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 701 ssize_t ret = -EINVAL;
1da177e4
LT
702 policy = cpufreq_cpu_get(policy->cpu);
703 if (!policy)
0db4a8a9 704 goto no_policy;
5a01f2e8
VP
705
706 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 707 goto fail;
5a01f2e8 708
e08f5f5b
GS
709 if (fattr->show)
710 ret = fattr->show(policy, buf);
711 else
712 ret = -EIO;
713
5a01f2e8 714 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 715fail:
1da177e4 716 cpufreq_cpu_put(policy);
0db4a8a9 717no_policy:
1da177e4
LT
718 return ret;
719}
720
905d77cd
DJ
721static ssize_t store(struct kobject *kobj, struct attribute *attr,
722 const char *buf, size_t count)
1da177e4 723{
905d77cd
DJ
724 struct cpufreq_policy *policy = to_policy(kobj);
725 struct freq_attr *fattr = to_attr(attr);
a07530b4 726 ssize_t ret = -EINVAL;
1da177e4
LT
727 policy = cpufreq_cpu_get(policy->cpu);
728 if (!policy)
a07530b4 729 goto no_policy;
5a01f2e8
VP
730
731 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 732 goto fail;
5a01f2e8 733
e08f5f5b
GS
734 if (fattr->store)
735 ret = fattr->store(policy, buf, count);
736 else
737 ret = -EIO;
738
5a01f2e8 739 unlock_policy_rwsem_write(policy->cpu);
a07530b4 740fail:
1da177e4 741 cpufreq_cpu_put(policy);
a07530b4 742no_policy:
1da177e4
LT
743 return ret;
744}
745
905d77cd 746static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 747{
905d77cd 748 struct cpufreq_policy *policy = to_policy(kobj);
1da177e4
LT
749 dprintk("last reference is dropped\n");
750 complete(&policy->kobj_unregister);
751}
752
753static struct sysfs_ops sysfs_ops = {
754 .show = show,
755 .store = store,
756};
757
758static struct kobj_type ktype_cpufreq = {
759 .sysfs_ops = &sysfs_ops,
760 .default_attrs = default_attrs,
761 .release = cpufreq_sysfs_release,
762};
763
4bfa042c
TR
764/*
765 * Returns:
766 * Negative: Failure
767 * 0: Success
768 * Positive: When we have a managed CPU and the sysfs got symlinked
769 */
cf3289d0
AC
770static int cpufreq_add_dev_policy(unsigned int cpu,
771 struct cpufreq_policy *policy,
772 struct sys_device *sys_dev)
ecf7e461
DJ
773{
774 int ret = 0;
775#ifdef CONFIG_SMP
776 unsigned long flags;
777 unsigned int j;
ecf7e461 778#ifdef CONFIG_HOTPLUG_CPU
e77b89f1
DM
779 struct cpufreq_governor *gov;
780
781 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
782 if (gov) {
783 policy->governor = gov;
ecf7e461
DJ
784 dprintk("Restoring governor %s for cpu %d\n",
785 policy->governor->name, cpu);
786 }
787#endif
788
789 for_each_cpu(j, policy->cpus) {
790 struct cpufreq_policy *managed_policy;
791
792 if (cpu == j)
793 continue;
794
795 /* Check for existing affected CPUs.
796 * They may not be aware of it due to CPU Hotplug.
797 * cpufreq_cpu_put is called when the device is removed
798 * in __cpufreq_remove_dev()
799 */
800 managed_policy = cpufreq_cpu_get(j);
801 if (unlikely(managed_policy)) {
802
803 /* Set proper policy_cpu */
804 unlock_policy_rwsem_write(cpu);
805 per_cpu(policy_cpu, cpu) = managed_policy->cpu;
806
807 if (lock_policy_rwsem_write(cpu) < 0) {
808 /* Should not go through policy unlock path */
809 if (cpufreq_driver->exit)
810 cpufreq_driver->exit(policy);
811 cpufreq_cpu_put(managed_policy);
812 return -EBUSY;
813 }
814
815 spin_lock_irqsave(&cpufreq_driver_lock, flags);
816 cpumask_copy(managed_policy->cpus, policy->cpus);
817 per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
818 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
819
820 dprintk("CPU already managed, adding link\n");
821 ret = sysfs_create_link(&sys_dev->kobj,
822 &managed_policy->kobj,
823 "cpufreq");
824 if (ret)
825 cpufreq_cpu_put(managed_policy);
826 /*
827 * Success. We only needed to be added to the mask.
828 * Call driver->exit() because only the cpu parent of
829 * the kobj needed to call init().
830 */
831 if (cpufreq_driver->exit)
832 cpufreq_driver->exit(policy);
4bfa042c
TR
833
834 if (!ret)
835 return 1;
836 else
837 return ret;
ecf7e461
DJ
838 }
839 }
840#endif
841 return ret;
842}
843
844
19d6f7ec 845/* symlink affected CPUs */
cf3289d0
AC
846static int cpufreq_add_dev_symlink(unsigned int cpu,
847 struct cpufreq_policy *policy)
19d6f7ec
DJ
848{
849 unsigned int j;
850 int ret = 0;
851
852 for_each_cpu(j, policy->cpus) {
853 struct cpufreq_policy *managed_policy;
854 struct sys_device *cpu_sys_dev;
855
856 if (j == cpu)
857 continue;
858 if (!cpu_online(j))
859 continue;
860
861 dprintk("CPU %u already managed, adding link\n", j);
862 managed_policy = cpufreq_cpu_get(cpu);
863 cpu_sys_dev = get_cpu_sysdev(j);
864 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
865 "cpufreq");
866 if (ret) {
867 cpufreq_cpu_put(managed_policy);
868 return ret;
869 }
870 }
871 return ret;
872}
873
cf3289d0
AC
874static int cpufreq_add_dev_interface(unsigned int cpu,
875 struct cpufreq_policy *policy,
876 struct sys_device *sys_dev)
909a694e 877{
ecf7e461 878 struct cpufreq_policy new_policy;
909a694e
DJ
879 struct freq_attr **drv_attr;
880 unsigned long flags;
881 int ret = 0;
882 unsigned int j;
883
884 /* prepare interface data */
885 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
886 &sys_dev->kobj, "cpufreq");
887 if (ret)
888 return ret;
889
890 /* set up files for this cpu device */
891 drv_attr = cpufreq_driver->attr;
892 while ((drv_attr) && (*drv_attr)) {
893 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
894 if (ret)
895 goto err_out_kobj_put;
896 drv_attr++;
897 }
898 if (cpufreq_driver->get) {
899 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
900 if (ret)
901 goto err_out_kobj_put;
902 }
903 if (cpufreq_driver->target) {
904 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
905 if (ret)
906 goto err_out_kobj_put;
907 }
908
909 spin_lock_irqsave(&cpufreq_driver_lock, flags);
910 for_each_cpu(j, policy->cpus) {
911 if (!cpu_online(j))
912 continue;
913 per_cpu(cpufreq_cpu_data, j) = policy;
914 per_cpu(policy_cpu, j) = policy->cpu;
915 }
916 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
917
918 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
919 if (ret)
920 goto err_out_kobj_put;
921
922 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
923 /* assure that the starting sequence is run in __cpufreq_set_policy */
924 policy->governor = NULL;
925
926 /* set default policy */
927 ret = __cpufreq_set_policy(policy, &new_policy);
928 policy->user_policy.policy = policy->policy;
929 policy->user_policy.governor = policy->governor;
930
931 if (ret) {
932 dprintk("setting policy failed\n");
933 if (cpufreq_driver->exit)
934 cpufreq_driver->exit(policy);
935 }
909a694e
DJ
936 return ret;
937
938err_out_kobj_put:
939 kobject_put(&policy->kobj);
940 wait_for_completion(&policy->kobj_unregister);
941 return ret;
942}
943
1da177e4
LT
944
945/**
946 * cpufreq_add_dev - add a CPU device
947 *
32ee8c3e 948 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
949 *
950 * The Oracle says: try running cpufreq registration/unregistration concurrently
951 * with with cpu hotplugging and all hell will break loose. Tried to clean this
952 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 953 */
905d77cd 954static int cpufreq_add_dev(struct sys_device *sys_dev)
1da177e4
LT
955{
956 unsigned int cpu = sys_dev->id;
90e41bac 957 int ret = 0, found = 0;
1da177e4 958 struct cpufreq_policy *policy;
1da177e4
LT
959 unsigned long flags;
960 unsigned int j;
90e41bac
PB
961#ifdef CONFIG_HOTPLUG_CPU
962 int sibling;
963#endif
1da177e4 964
c32b6b8e
AR
965 if (cpu_is_offline(cpu))
966 return 0;
967
1da177e4
LT
968 cpufreq_debug_disable_ratelimit();
969 dprintk("adding CPU %u\n", cpu);
970
971#ifdef CONFIG_SMP
972 /* check whether a different CPU already registered this
973 * CPU because it is in the same boat. */
974 policy = cpufreq_cpu_get(cpu);
975 if (unlikely(policy)) {
8ff69732 976 cpufreq_cpu_put(policy);
1da177e4
LT
977 cpufreq_debug_enable_ratelimit();
978 return 0;
979 }
980#endif
981
982 if (!try_module_get(cpufreq_driver->owner)) {
983 ret = -EINVAL;
984 goto module_out;
985 }
986
059019a3 987 ret = -ENOMEM;
e98df50c 988 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 989 if (!policy)
1da177e4 990 goto nomem_out;
059019a3
DJ
991
992 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 993 goto err_free_policy;
059019a3
DJ
994
995 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 996 goto err_free_cpumask;
1da177e4
LT
997
998 policy->cpu = cpu;
835481d9 999 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1000
5a01f2e8
VP
1001 /* Initially set CPU itself as the policy_cpu */
1002 per_cpu(policy_cpu, cpu) = cpu;
3f4a782b
MD
1003 ret = (lock_policy_rwsem_write(cpu) < 0);
1004 WARN_ON(ret);
5a01f2e8 1005
1da177e4 1006 init_completion(&policy->kobj_unregister);
65f27f38 1007 INIT_WORK(&policy->update, handle_update);
1da177e4 1008
8122c6ce 1009 /* Set governor before ->init, so that driver could check it */
90e41bac
PB
1010#ifdef CONFIG_HOTPLUG_CPU
1011 for_each_online_cpu(sibling) {
1012 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
1013 if (cp && cp->governor &&
1014 (cpumask_test_cpu(cpu, cp->related_cpus))) {
1015 policy->governor = cp->governor;
1016 found = 1;
1017 break;
1018 }
1019 }
1020#endif
1021 if (!found)
1022 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1da177e4
LT
1023 /* call driver. From then on the cpufreq must be able
1024 * to accept all calls to ->verify and ->setpolicy for this CPU
1025 */
1026 ret = cpufreq_driver->init(policy);
1027 if (ret) {
1028 dprintk("initialization failed\n");
3f4a782b 1029 goto err_unlock_policy;
1da177e4 1030 }
187d9f4e
MC
1031 policy->user_policy.min = policy->min;
1032 policy->user_policy.max = policy->max;
1da177e4 1033
a1531acd
TR
1034 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1035 CPUFREQ_START, policy);
1036
ecf7e461 1037 ret = cpufreq_add_dev_policy(cpu, policy, sys_dev);
4bfa042c
TR
1038 if (ret) {
1039 if (ret > 0)
1040 /* This is a managed cpu, symlink created,
1041 exit with 0 */
1042 ret = 0;
ecf7e461 1043 goto err_unlock_policy;
4bfa042c 1044 }
1da177e4 1045
909a694e 1046 ret = cpufreq_add_dev_interface(cpu, policy, sys_dev);
19d6f7ec
DJ
1047 if (ret)
1048 goto err_out_unregister;
8ff69732 1049
dca02613
LW
1050 unlock_policy_rwsem_write(cpu);
1051
038c5b3e 1052 kobject_uevent(&policy->kobj, KOBJ_ADD);
1da177e4 1053 module_put(cpufreq_driver->owner);
1da177e4
LT
1054 dprintk("initialization complete\n");
1055 cpufreq_debug_enable_ratelimit();
87c32271 1056
1da177e4
LT
1057 return 0;
1058
1059
1060err_out_unregister:
1061 spin_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 1062 for_each_cpu(j, policy->cpus)
7a6aedfa 1063 per_cpu(cpufreq_cpu_data, j) = NULL;
1da177e4
LT
1064 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1065
c10997f6 1066 kobject_put(&policy->kobj);
1da177e4
LT
1067 wait_for_completion(&policy->kobj_unregister);
1068
3f4a782b 1069err_unlock_policy:
45709118 1070 unlock_policy_rwsem_write(cpu);
3f4a782b
MD
1071err_free_cpumask:
1072 free_cpumask_var(policy->cpus);
1073err_free_policy:
1da177e4 1074 kfree(policy);
1da177e4
LT
1075nomem_out:
1076 module_put(cpufreq_driver->owner);
c32b6b8e 1077module_out:
1da177e4
LT
1078 cpufreq_debug_enable_ratelimit();
1079 return ret;
1080}
1081
1082
1083/**
5a01f2e8 1084 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1085 *
1086 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1087 * Caller should already have policy_rwsem in write mode for this CPU.
1088 * This routine frees the rwsem before returning.
1da177e4 1089 */
905d77cd 1090static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1da177e4
LT
1091{
1092 unsigned int cpu = sys_dev->id;
1093 unsigned long flags;
1094 struct cpufreq_policy *data;
1095#ifdef CONFIG_SMP
e738cf6d 1096 struct sys_device *cpu_sys_dev;
1da177e4
LT
1097 unsigned int j;
1098#endif
1099
1100 cpufreq_debug_disable_ratelimit();
1101 dprintk("unregistering CPU %u\n", cpu);
1102
1103 spin_lock_irqsave(&cpufreq_driver_lock, flags);
7a6aedfa 1104 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
1105
1106 if (!data) {
1107 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1108 cpufreq_debug_enable_ratelimit();
5a01f2e8 1109 unlock_policy_rwsem_write(cpu);
1da177e4
LT
1110 return -EINVAL;
1111 }
7a6aedfa 1112 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1da177e4
LT
1113
1114
1115#ifdef CONFIG_SMP
1116 /* if this isn't the CPU which is the parent of the kobj, we
32ee8c3e 1117 * only need to unlink, put and exit
1da177e4
LT
1118 */
1119 if (unlikely(cpu != data->cpu)) {
1120 dprintk("removing link\n");
835481d9 1121 cpumask_clear_cpu(cpu, data->cpus);
1da177e4
LT
1122 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1123 sysfs_remove_link(&sys_dev->kobj, "cpufreq");
1da177e4
LT
1124 cpufreq_cpu_put(data);
1125 cpufreq_debug_enable_ratelimit();
5a01f2e8 1126 unlock_policy_rwsem_write(cpu);
1da177e4
LT
1127 return 0;
1128 }
1129#endif
1130
1da177e4 1131#ifdef CONFIG_SMP
084f3493
TR
1132
1133#ifdef CONFIG_HOTPLUG_CPU
e77b89f1
DM
1134 strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
1135 CPUFREQ_NAME_LEN);
084f3493
TR
1136#endif
1137
1da177e4
LT
1138 /* if we have other CPUs still registered, we need to unlink them,
1139 * or else wait_for_completion below will lock up. Clean the
7a6aedfa
MT
1140 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1141 * the sysfs links afterwards.
1da177e4 1142 */
835481d9
RR
1143 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1144 for_each_cpu(j, data->cpus) {
1da177e4
LT
1145 if (j == cpu)
1146 continue;
7a6aedfa 1147 per_cpu(cpufreq_cpu_data, j) = NULL;
1da177e4
LT
1148 }
1149 }
1150
1151 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1152
835481d9
RR
1153 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1154 for_each_cpu(j, data->cpus) {
1da177e4
LT
1155 if (j == cpu)
1156 continue;
1157 dprintk("removing link for cpu %u\n", j);
084f3493 1158#ifdef CONFIG_HOTPLUG_CPU
e77b89f1
DM
1159 strncpy(per_cpu(cpufreq_cpu_governor, j),
1160 data->governor->name, CPUFREQ_NAME_LEN);
084f3493 1161#endif
d434fca7
AR
1162 cpu_sys_dev = get_cpu_sysdev(j);
1163 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
1da177e4
LT
1164 cpufreq_cpu_put(data);
1165 }
1166 }
1167#else
1168 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1169#endif
1170
1da177e4
LT
1171 if (cpufreq_driver->target)
1172 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 1173
1da177e4
LT
1174 kobject_put(&data->kobj);
1175
1176 /* we need to make sure that the underlying kobj is actually
32ee8c3e 1177 * not referenced anymore by anybody before we proceed with
1da177e4
LT
1178 * unloading.
1179 */
1180 dprintk("waiting for dropping of refcount\n");
1181 wait_for_completion(&data->kobj_unregister);
1182 dprintk("wait complete\n");
1183
1184 if (cpufreq_driver->exit)
1185 cpufreq_driver->exit(data);
1186
7d26e2d5 1187 unlock_policy_rwsem_write(cpu);
1188
835481d9
RR
1189 free_cpumask_var(data->related_cpus);
1190 free_cpumask_var(data->cpus);
1da177e4 1191 kfree(data);
835481d9 1192 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1da177e4
LT
1193
1194 cpufreq_debug_enable_ratelimit();
1da177e4
LT
1195 return 0;
1196}
1197
1198
905d77cd 1199static int cpufreq_remove_dev(struct sys_device *sys_dev)
5a01f2e8
VP
1200{
1201 unsigned int cpu = sys_dev->id;
1202 int retval;
ec28297a
VP
1203
1204 if (cpu_is_offline(cpu))
1205 return 0;
1206
5a01f2e8
VP
1207 if (unlikely(lock_policy_rwsem_write(cpu)))
1208 BUG();
1209
1210 retval = __cpufreq_remove_dev(sys_dev);
1211 return retval;
1212}
1213
1214
65f27f38 1215static void handle_update(struct work_struct *work)
1da177e4 1216{
65f27f38
DH
1217 struct cpufreq_policy *policy =
1218 container_of(work, struct cpufreq_policy, update);
1219 unsigned int cpu = policy->cpu;
1da177e4
LT
1220 dprintk("handle_update for cpu %u called\n", cpu);
1221 cpufreq_update_policy(cpu);
1222}
1223
1224/**
1225 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1226 * @cpu: cpu number
1227 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1228 * @new_freq: CPU frequency the CPU actually runs at
1229 *
29464f28
DJ
1230 * We adjust to current frequency first, and need to clean up later.
1231 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1232 */
e08f5f5b
GS
1233static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1234 unsigned int new_freq)
1da177e4
LT
1235{
1236 struct cpufreq_freqs freqs;
1237
b10eec22 1238 dprintk("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1239 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1240
1241 freqs.cpu = cpu;
1242 freqs.old = old_freq;
1243 freqs.new = new_freq;
1244 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1245 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1246}
1247
1248
32ee8c3e 1249/**
4ab70df4 1250 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1251 * @cpu: CPU number
1252 *
1253 * This is the last known freq, without actually getting it from the driver.
1254 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1255 */
1256unsigned int cpufreq_quick_get(unsigned int cpu)
1257{
1258 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
e08f5f5b 1259 unsigned int ret_freq = 0;
95235ca2
VP
1260
1261 if (policy) {
e08f5f5b 1262 ret_freq = policy->cur;
95235ca2
VP
1263 cpufreq_cpu_put(policy);
1264 }
1265
4d34a67d 1266 return ret_freq;
95235ca2
VP
1267}
1268EXPORT_SYMBOL(cpufreq_quick_get);
1269
1270
5a01f2e8 1271static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1272{
7a6aedfa 1273 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1274 unsigned int ret_freq = 0;
1da177e4 1275
1da177e4 1276 if (!cpufreq_driver->get)
4d34a67d 1277 return ret_freq;
1da177e4 1278
e08f5f5b 1279 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1280
e08f5f5b
GS
1281 if (ret_freq && policy->cur &&
1282 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1283 /* verify no discrepancy between actual and
1284 saved value exists */
1285 if (unlikely(ret_freq != policy->cur)) {
1286 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1287 schedule_work(&policy->update);
1288 }
1289 }
1290
4d34a67d 1291 return ret_freq;
5a01f2e8 1292}
1da177e4 1293
5a01f2e8
VP
1294/**
1295 * cpufreq_get - get the current CPU frequency (in kHz)
1296 * @cpu: CPU number
1297 *
1298 * Get the CPU current (static) CPU frequency
1299 */
1300unsigned int cpufreq_get(unsigned int cpu)
1301{
1302 unsigned int ret_freq = 0;
1303 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1304
1305 if (!policy)
1306 goto out;
1307
1308 if (unlikely(lock_policy_rwsem_read(cpu)))
1309 goto out_policy;
1310
1311 ret_freq = __cpufreq_get(cpu);
1312
1313 unlock_policy_rwsem_read(cpu);
1da177e4 1314
5a01f2e8
VP
1315out_policy:
1316 cpufreq_cpu_put(policy);
1317out:
4d34a67d 1318 return ret_freq;
1da177e4
LT
1319}
1320EXPORT_SYMBOL(cpufreq_get);
1321
1322
42d4dc3f
BH
1323/**
1324 * cpufreq_suspend - let the low level driver prepare for suspend
1325 */
1326
905d77cd 1327static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
42d4dc3f 1328{
e08f5f5b 1329 int ret = 0;
4bc5d341 1330
4bc5d341 1331 int cpu = sysdev->id;
42d4dc3f
BH
1332 struct cpufreq_policy *cpu_policy;
1333
0e37b159 1334 dprintk("suspending cpu %u\n", cpu);
42d4dc3f
BH
1335
1336 if (!cpu_online(cpu))
1337 return 0;
1338
1339 /* we may be lax here as interrupts are off. Nonetheless
1340 * we need to grab the correct cpu policy, as to check
1341 * whether we really run on this CPU.
1342 */
1343
1344 cpu_policy = cpufreq_cpu_get(cpu);
1345 if (!cpu_policy)
1346 return -EINVAL;
1347
1348 /* only handle each CPU group once */
c9060494
DJ
1349 if (unlikely(cpu_policy->cpu != cpu))
1350 goto out;
42d4dc3f
BH
1351
1352 if (cpufreq_driver->suspend) {
e00d9967 1353 ret = cpufreq_driver->suspend(cpu_policy, pmsg);
ce6c3997 1354 if (ret)
42d4dc3f
BH
1355 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1356 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1357 }
1358
7d5e350f 1359out:
42d4dc3f 1360 cpufreq_cpu_put(cpu_policy);
c9060494 1361 return ret;
42d4dc3f
BH
1362}
1363
1da177e4
LT
1364/**
1365 * cpufreq_resume - restore proper CPU frequency handling after resume
1366 *
1367 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1368 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1369 * restored. It will verify that the current freq is in sync with
1370 * what we believe it to be. This is a bit later than when it
1371 * should be, but nonethteless it's better than calling
1372 * cpufreq_driver->get() here which might re-enable interrupts...
1da177e4 1373 */
905d77cd 1374static int cpufreq_resume(struct sys_device *sysdev)
1da177e4 1375{
e08f5f5b 1376 int ret = 0;
4bc5d341 1377
4bc5d341 1378 int cpu = sysdev->id;
1da177e4
LT
1379 struct cpufreq_policy *cpu_policy;
1380
1381 dprintk("resuming cpu %u\n", cpu);
1382
1383 if (!cpu_online(cpu))
1384 return 0;
1385
1386 /* we may be lax here as interrupts are off. Nonetheless
1387 * we need to grab the correct cpu policy, as to check
1388 * whether we really run on this CPU.
1389 */
1390
1391 cpu_policy = cpufreq_cpu_get(cpu);
1392 if (!cpu_policy)
1393 return -EINVAL;
1394
1395 /* only handle each CPU group once */
c9060494
DJ
1396 if (unlikely(cpu_policy->cpu != cpu))
1397 goto fail;
1da177e4
LT
1398
1399 if (cpufreq_driver->resume) {
1400 ret = cpufreq_driver->resume(cpu_policy);
1401 if (ret) {
1402 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1403 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1404 goto fail;
1da177e4
LT
1405 }
1406 }
1407
1da177e4 1408 schedule_work(&cpu_policy->update);
ce6c3997 1409
c9060494 1410fail:
1da177e4
LT
1411 cpufreq_cpu_put(cpu_policy);
1412 return ret;
1413}
1414
1415static struct sysdev_driver cpufreq_sysdev_driver = {
1416 .add = cpufreq_add_dev,
1417 .remove = cpufreq_remove_dev,
42d4dc3f 1418 .suspend = cpufreq_suspend,
1da177e4
LT
1419 .resume = cpufreq_resume,
1420};
1421
1422
1423/*********************************************************************
1424 * NOTIFIER LISTS INTERFACE *
1425 *********************************************************************/
1426
1427/**
1428 * cpufreq_register_notifier - register a driver with cpufreq
1429 * @nb: notifier function to register
1430 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1431 *
32ee8c3e 1432 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1433 * are notified about clock rate changes (once before and once after
1434 * the transition), or a list of drivers that are notified about
1435 * changes in cpufreq policy.
1436 *
1437 * This function may sleep, and has the same return conditions as
e041c683 1438 * blocking_notifier_chain_register.
1da177e4
LT
1439 */
1440int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1441{
1442 int ret;
1443
74212ca4
CEB
1444 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1445
1da177e4
LT
1446 switch (list) {
1447 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1448 ret = srcu_notifier_chain_register(
e041c683 1449 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1450 break;
1451 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1452 ret = blocking_notifier_chain_register(
1453 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1454 break;
1455 default:
1456 ret = -EINVAL;
1457 }
1da177e4
LT
1458
1459 return ret;
1460}
1461EXPORT_SYMBOL(cpufreq_register_notifier);
1462
1463
1464/**
1465 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1466 * @nb: notifier block to be unregistered
1467 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1468 *
1469 * Remove a driver from the CPU frequency notifier list.
1470 *
1471 * This function may sleep, and has the same return conditions as
e041c683 1472 * blocking_notifier_chain_unregister.
1da177e4
LT
1473 */
1474int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1475{
1476 int ret;
1477
1da177e4
LT
1478 switch (list) {
1479 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1480 ret = srcu_notifier_chain_unregister(
e041c683 1481 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1482 break;
1483 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1484 ret = blocking_notifier_chain_unregister(
1485 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1486 break;
1487 default:
1488 ret = -EINVAL;
1489 }
1da177e4
LT
1490
1491 return ret;
1492}
1493EXPORT_SYMBOL(cpufreq_unregister_notifier);
1494
1495
1496/*********************************************************************
1497 * GOVERNORS *
1498 *********************************************************************/
1499
1500
1501int __cpufreq_driver_target(struct cpufreq_policy *policy,
1502 unsigned int target_freq,
1503 unsigned int relation)
1504{
1505 int retval = -EINVAL;
c32b6b8e 1506
1da177e4
LT
1507 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1508 target_freq, relation);
1509 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1510 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1511
1da177e4
LT
1512 return retval;
1513}
1514EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1515
1da177e4
LT
1516int cpufreq_driver_target(struct cpufreq_policy *policy,
1517 unsigned int target_freq,
1518 unsigned int relation)
1519{
f1829e4a 1520 int ret = -EINVAL;
1da177e4
LT
1521
1522 policy = cpufreq_cpu_get(policy->cpu);
1523 if (!policy)
f1829e4a 1524 goto no_policy;
1da177e4 1525
5a01f2e8 1526 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1527 goto fail;
1da177e4
LT
1528
1529 ret = __cpufreq_driver_target(policy, target_freq, relation);
1530
5a01f2e8 1531 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1532
f1829e4a 1533fail:
1da177e4 1534 cpufreq_cpu_put(policy);
f1829e4a 1535no_policy:
1da177e4
LT
1536 return ret;
1537}
1538EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1539
bf0b90e3 1540int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1541{
1542 int ret = 0;
1543
1544 policy = cpufreq_cpu_get(policy->cpu);
1545 if (!policy)
1546 return -EINVAL;
1547
bf0b90e3 1548 if (cpu_online(cpu) && cpufreq_driver->getavg)
1549 ret = cpufreq_driver->getavg(policy, cpu);
dfde5d62 1550
dfde5d62
VP
1551 cpufreq_cpu_put(policy);
1552 return ret;
1553}
5a01f2e8 1554EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1555
153d7f3f 1556/*
153d7f3f
AV
1557 * when "event" is CPUFREQ_GOV_LIMITS
1558 */
1da177e4 1559
e08f5f5b
GS
1560static int __cpufreq_governor(struct cpufreq_policy *policy,
1561 unsigned int event)
1da177e4 1562{
cc993cab 1563 int ret;
6afde10c
TR
1564
1565 /* Only must be defined when default governor is known to have latency
1566 restrictions, like e.g. conservative or ondemand.
1567 That this is the case is already ensured in Kconfig
1568 */
1569#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1570 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1571#else
1572 struct cpufreq_governor *gov = NULL;
1573#endif
1c256245
TR
1574
1575 if (policy->governor->max_transition_latency &&
1576 policy->cpuinfo.transition_latency >
1577 policy->governor->max_transition_latency) {
6afde10c
TR
1578 if (!gov)
1579 return -EINVAL;
1580 else {
1581 printk(KERN_WARNING "%s governor failed, too long"
1582 " transition latency of HW, fallback"
1583 " to %s governor\n",
1584 policy->governor->name,
1585 gov->name);
1586 policy->governor = gov;
1587 }
1c256245 1588 }
1da177e4
LT
1589
1590 if (!try_module_get(policy->governor->owner))
1591 return -EINVAL;
1592
e08f5f5b
GS
1593 dprintk("__cpufreq_governor for CPU %u, event %u\n",
1594 policy->cpu, event);
1da177e4
LT
1595 ret = policy->governor->governor(policy, event);
1596
e08f5f5b
GS
1597 /* we keep one module reference alive for
1598 each CPU governed by this CPU */
1da177e4
LT
1599 if ((event != CPUFREQ_GOV_START) || ret)
1600 module_put(policy->governor->owner);
1601 if ((event == CPUFREQ_GOV_STOP) && !ret)
1602 module_put(policy->governor->owner);
1603
1604 return ret;
1605}
1606
1607
1da177e4
LT
1608int cpufreq_register_governor(struct cpufreq_governor *governor)
1609{
3bcb09a3 1610 int err;
1da177e4
LT
1611
1612 if (!governor)
1613 return -EINVAL;
1614
3fc54d37 1615 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1616
3bcb09a3
JF
1617 err = -EBUSY;
1618 if (__find_governor(governor->name) == NULL) {
1619 err = 0;
1620 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1621 }
1da177e4 1622
32ee8c3e 1623 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1624 return err;
1da177e4
LT
1625}
1626EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1627
1628
1629void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1630{
90e41bac
PB
1631#ifdef CONFIG_HOTPLUG_CPU
1632 int cpu;
1633#endif
1634
1da177e4
LT
1635 if (!governor)
1636 return;
1637
90e41bac
PB
1638#ifdef CONFIG_HOTPLUG_CPU
1639 for_each_present_cpu(cpu) {
1640 if (cpu_online(cpu))
1641 continue;
1642 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1643 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1644 }
1645#endif
1646
3fc54d37 1647 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1648 list_del(&governor->governor_list);
3fc54d37 1649 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1650 return;
1651}
1652EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1653
1654
1655
1656/*********************************************************************
1657 * POLICY INTERFACE *
1658 *********************************************************************/
1659
1660/**
1661 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1662 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1663 * is written
1da177e4
LT
1664 *
1665 * Reads the current cpufreq policy.
1666 */
1667int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1668{
1669 struct cpufreq_policy *cpu_policy;
1670 if (!policy)
1671 return -EINVAL;
1672
1673 cpu_policy = cpufreq_cpu_get(cpu);
1674 if (!cpu_policy)
1675 return -EINVAL;
1676
1da177e4 1677 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1678
1679 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1680 return 0;
1681}
1682EXPORT_SYMBOL(cpufreq_get_policy);
1683
1684
153d7f3f 1685/*
e08f5f5b
GS
1686 * data : current policy.
1687 * policy : policy to be set.
153d7f3f 1688 */
e08f5f5b
GS
1689static int __cpufreq_set_policy(struct cpufreq_policy *data,
1690 struct cpufreq_policy *policy)
1da177e4
LT
1691{
1692 int ret = 0;
1693
1694 cpufreq_debug_disable_ratelimit();
1695 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1696 policy->min, policy->max);
1697
e08f5f5b
GS
1698 memcpy(&policy->cpuinfo, &data->cpuinfo,
1699 sizeof(struct cpufreq_cpuinfo));
1da177e4 1700
53391fa2 1701 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1702 ret = -EINVAL;
1703 goto error_out;
1704 }
1705
1da177e4
LT
1706 /* verify the cpu speed can be set within this limit */
1707 ret = cpufreq_driver->verify(policy);
1708 if (ret)
1709 goto error_out;
1710
1da177e4 1711 /* adjust if necessary - all reasons */
e041c683
AS
1712 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1713 CPUFREQ_ADJUST, policy);
1da177e4
LT
1714
1715 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1716 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1717 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1718
1719 /* verify the cpu speed can be set within this limit,
1720 which might be different to the first one */
1721 ret = cpufreq_driver->verify(policy);
e041c683 1722 if (ret)
1da177e4 1723 goto error_out;
1da177e4
LT
1724
1725 /* notification of the new policy */
e041c683
AS
1726 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1727 CPUFREQ_NOTIFY, policy);
1da177e4 1728
7d5e350f
DJ
1729 data->min = policy->min;
1730 data->max = policy->max;
1da177e4 1731
e08f5f5b
GS
1732 dprintk("new min and max freqs are %u - %u kHz\n",
1733 data->min, data->max);
1da177e4
LT
1734
1735 if (cpufreq_driver->setpolicy) {
1736 data->policy = policy->policy;
1737 dprintk("setting range\n");
1738 ret = cpufreq_driver->setpolicy(policy);
1739 } else {
1740 if (policy->governor != data->governor) {
1741 /* save old, working values */
1742 struct cpufreq_governor *old_gov = data->governor;
1743
1744 dprintk("governor switch\n");
1745
1746 /* end old governor */
395913d0
MD
1747 if (data->governor) {
1748 /*
1749 * Need to release the rwsem around governor
1750 * stop due to lock dependency between
1751 * cancel_delayed_work_sync and the read lock
1752 * taken in the delayed work handler.
1753 */
1754 unlock_policy_rwsem_write(data->cpu);
1da177e4 1755 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
395913d0
MD
1756 lock_policy_rwsem_write(data->cpu);
1757 }
1da177e4
LT
1758
1759 /* start new governor */
1760 data->governor = policy->governor;
1761 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1762 /* new governor failed, so re-start old one */
e08f5f5b
GS
1763 dprintk("starting governor %s failed\n",
1764 data->governor->name);
1da177e4
LT
1765 if (old_gov) {
1766 data->governor = old_gov;
e08f5f5b
GS
1767 __cpufreq_governor(data,
1768 CPUFREQ_GOV_START);
1da177e4
LT
1769 }
1770 ret = -EINVAL;
1771 goto error_out;
1772 }
1773 /* might be a policy change, too, so fall through */
1774 }
1775 dprintk("governor: change or update limits\n");
1776 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1777 }
1778
7d5e350f 1779error_out:
1da177e4
LT
1780 cpufreq_debug_enable_ratelimit();
1781 return ret;
1782}
1783
1da177e4
LT
1784/**
1785 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1786 * @cpu: CPU which shall be re-evaluated
1787 *
1788 * Usefull for policy notifiers which have different necessities
1789 * at different times.
1790 */
1791int cpufreq_update_policy(unsigned int cpu)
1792{
1793 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1794 struct cpufreq_policy policy;
f1829e4a 1795 int ret;
1da177e4 1796
f1829e4a
JL
1797 if (!data) {
1798 ret = -ENODEV;
1799 goto no_policy;
1800 }
1da177e4 1801
f1829e4a
JL
1802 if (unlikely(lock_policy_rwsem_write(cpu))) {
1803 ret = -EINVAL;
1804 goto fail;
1805 }
1da177e4
LT
1806
1807 dprintk("updating policy for CPU %u\n", cpu);
7d5e350f 1808 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1809 policy.min = data->user_policy.min;
1810 policy.max = data->user_policy.max;
1811 policy.policy = data->user_policy.policy;
1812 policy.governor = data->user_policy.governor;
1813
0961dd0d
TR
1814 /* BIOS might change freq behind our back
1815 -> ask driver for current freq and notify governors about a change */
1816 if (cpufreq_driver->get) {
1817 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3
TR
1818 if (!data->cur) {
1819 dprintk("Driver did not initialize current freq");
1820 data->cur = policy.cur;
1821 } else {
1822 if (data->cur != policy.cur)
e08f5f5b
GS
1823 cpufreq_out_of_sync(cpu, data->cur,
1824 policy.cur);
a85f7bd3 1825 }
0961dd0d
TR
1826 }
1827
1da177e4
LT
1828 ret = __cpufreq_set_policy(data, &policy);
1829
5a01f2e8
VP
1830 unlock_policy_rwsem_write(cpu);
1831
f1829e4a 1832fail:
1da177e4 1833 cpufreq_cpu_put(data);
f1829e4a 1834no_policy:
1da177e4
LT
1835 return ret;
1836}
1837EXPORT_SYMBOL(cpufreq_update_policy);
1838
dd184a01 1839static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1840 unsigned long action, void *hcpu)
1841{
1842 unsigned int cpu = (unsigned long)hcpu;
c32b6b8e
AR
1843 struct sys_device *sys_dev;
1844
1845 sys_dev = get_cpu_sysdev(cpu);
c32b6b8e
AR
1846 if (sys_dev) {
1847 switch (action) {
1848 case CPU_ONLINE:
8bb78442 1849 case CPU_ONLINE_FROZEN:
c32b6b8e
AR
1850 cpufreq_add_dev(sys_dev);
1851 break;
1852 case CPU_DOWN_PREPARE:
8bb78442 1853 case CPU_DOWN_PREPARE_FROZEN:
5a01f2e8
VP
1854 if (unlikely(lock_policy_rwsem_write(cpu)))
1855 BUG();
1856
5a01f2e8 1857 __cpufreq_remove_dev(sys_dev);
c32b6b8e 1858 break;
5a01f2e8 1859 case CPU_DOWN_FAILED:
8bb78442 1860 case CPU_DOWN_FAILED_FROZEN:
5a01f2e8 1861 cpufreq_add_dev(sys_dev);
c32b6b8e
AR
1862 break;
1863 }
1864 }
1865 return NOTIFY_OK;
1866}
1867
f6ebef30 1868static struct notifier_block __refdata cpufreq_cpu_notifier =
c32b6b8e
AR
1869{
1870 .notifier_call = cpufreq_cpu_callback,
1871};
1da177e4
LT
1872
1873/*********************************************************************
1874 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1875 *********************************************************************/
1876
1877/**
1878 * cpufreq_register_driver - register a CPU Frequency driver
1879 * @driver_data: A struct cpufreq_driver containing the values#
1880 * submitted by the CPU Frequency driver.
1881 *
32ee8c3e 1882 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1883 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1884 * (and isn't unregistered in the meantime).
1da177e4
LT
1885 *
1886 */
221dee28 1887int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1888{
1889 unsigned long flags;
1890 int ret;
1891
1892 if (!driver_data || !driver_data->verify || !driver_data->init ||
1893 ((!driver_data->setpolicy) && (!driver_data->target)))
1894 return -EINVAL;
1895
1896 dprintk("trying to register driver %s\n", driver_data->name);
1897
1898 if (driver_data->setpolicy)
1899 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1900
1901 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1902 if (cpufreq_driver) {
1903 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1904 return -EBUSY;
1905 }
1906 cpufreq_driver = driver_data;
1907 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1908
7a6aedfa
MT
1909 ret = sysdev_driver_register(&cpu_sysdev_class,
1910 &cpufreq_sysdev_driver);
1da177e4
LT
1911
1912 if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1913 int i;
1914 ret = -ENODEV;
1915
1916 /* check for at least one working CPU */
7a6aedfa
MT
1917 for (i = 0; i < nr_cpu_ids; i++)
1918 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1919 ret = 0;
7a6aedfa
MT
1920 break;
1921 }
1da177e4
LT
1922
1923 /* if all ->init() calls failed, unregister */
1924 if (ret) {
e08f5f5b
GS
1925 dprintk("no CPU initialized for driver %s\n",
1926 driver_data->name);
1927 sysdev_driver_unregister(&cpu_sysdev_class,
1928 &cpufreq_sysdev_driver);
1da177e4
LT
1929
1930 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1931 cpufreq_driver = NULL;
1932 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1933 }
1934 }
1935
1936 if (!ret) {
65edc68c 1937 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4
LT
1938 dprintk("driver %s up and running\n", driver_data->name);
1939 cpufreq_debug_enable_ratelimit();
1940 }
1941
4d34a67d 1942 return ret;
1da177e4
LT
1943}
1944EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1945
1946
1947/**
1948 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1949 *
32ee8c3e 1950 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
1951 * the right to do so, i.e. if you have succeeded in initialising before!
1952 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1953 * currently not initialised.
1954 */
221dee28 1955int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
1956{
1957 unsigned long flags;
1958
1959 cpufreq_debug_disable_ratelimit();
1960
1961 if (!cpufreq_driver || (driver != cpufreq_driver)) {
1962 cpufreq_debug_enable_ratelimit();
1963 return -EINVAL;
1964 }
1965
1966 dprintk("unregistering driver %s\n", driver->name);
1967
1968 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
65edc68c 1969 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4
LT
1970
1971 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1972 cpufreq_driver = NULL;
1973 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1974
1975 return 0;
1976}
1977EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
1978
1979static int __init cpufreq_core_init(void)
1980{
1981 int cpu;
1982
1983 for_each_possible_cpu(cpu) {
1984 per_cpu(policy_cpu, cpu) = -1;
1985 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1986 }
8aa84ad8
TR
1987
1988 cpufreq_global_kobject = kobject_create_and_add("cpufreq",
1989 &cpu_sysdev_class.kset.kobj);
1990 BUG_ON(!cpufreq_global_kobject);
1991
5a01f2e8
VP
1992 return 0;
1993}
5a01f2e8 1994core_initcall(cpufreq_core_init);