[CPUFREQ] Longhaul - Rename & fix multipliers table
[linux-2.6-block.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
1da177e4
LT
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/notifier.h>
22#include <linux/cpufreq.h>
23#include <linux/delay.h>
24#include <linux/interrupt.h>
25#include <linux/spinlock.h>
26#include <linux/device.h>
27#include <linux/slab.h>
28#include <linux/cpu.h>
29#include <linux/completion.h>
3fc54d37 30#include <linux/mutex.h>
1da177e4
LT
31
32#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg)
33
34/**
35 * The "cpufreq driver" - the arch- or hardware-dependend low
36 * level driver of CPUFreq support, and its spinlock. This lock
37 * also protects the cpufreq_cpu_data array.
38 */
7d5e350f
DJ
39static struct cpufreq_driver *cpufreq_driver;
40static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS];
1da177e4
LT
41static DEFINE_SPINLOCK(cpufreq_driver_lock);
42
1da177e4
LT
43/* internal prototypes */
44static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
45static void handle_update(void *data);
1da177e4
LT
46
47/**
32ee8c3e
DJ
48 * Two notifier lists: the "policy" list is involved in the
49 * validation process for a new CPU frequency policy; the
1da177e4
LT
50 * "transition" list for kernel code that needs to handle
51 * changes to devices when the CPU clock speed changes.
52 * The mutex locks both lists.
53 */
e041c683
AS
54static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
55static BLOCKING_NOTIFIER_HEAD(cpufreq_transition_notifier_list);
1da177e4
LT
56
57
58static LIST_HEAD(cpufreq_governor_list);
7d5e350f 59static DEFINE_MUTEX (cpufreq_governor_mutex);
1da177e4 60
7d5e350f 61struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4
LT
62{
63 struct cpufreq_policy *data;
64 unsigned long flags;
65
66 if (cpu >= NR_CPUS)
67 goto err_out;
68
69 /* get the cpufreq driver */
70 spin_lock_irqsave(&cpufreq_driver_lock, flags);
71
72 if (!cpufreq_driver)
73 goto err_out_unlock;
74
75 if (!try_module_get(cpufreq_driver->owner))
76 goto err_out_unlock;
77
78
79 /* get the CPU */
80 data = cpufreq_cpu_data[cpu];
81
82 if (!data)
83 goto err_out_put_module;
84
85 if (!kobject_get(&data->kobj))
86 goto err_out_put_module;
87
1da177e4 88 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
89 return data;
90
7d5e350f 91err_out_put_module:
1da177e4 92 module_put(cpufreq_driver->owner);
7d5e350f 93err_out_unlock:
1da177e4 94 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 95err_out:
1da177e4
LT
96 return NULL;
97}
98EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
99
7d5e350f 100
1da177e4
LT
101void cpufreq_cpu_put(struct cpufreq_policy *data)
102{
103 kobject_put(&data->kobj);
104 module_put(cpufreq_driver->owner);
105}
106EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
107
108
109/*********************************************************************
110 * UNIFIED DEBUG HELPERS *
111 *********************************************************************/
112#ifdef CONFIG_CPU_FREQ_DEBUG
113
114/* what part(s) of the CPUfreq subsystem are debugged? */
115static unsigned int debug;
116
117/* is the debug output ratelimit'ed using printk_ratelimit? User can
118 * set or modify this value.
119 */
120static unsigned int debug_ratelimit = 1;
121
122/* is the printk_ratelimit'ing enabled? It's enabled after a successful
123 * loading of a cpufreq driver, temporarily disabled when a new policy
124 * is set, and disabled upon cpufreq driver removal
125 */
126static unsigned int disable_ratelimit = 1;
127static DEFINE_SPINLOCK(disable_ratelimit_lock);
128
858119e1 129static void cpufreq_debug_enable_ratelimit(void)
1da177e4
LT
130{
131 unsigned long flags;
132
133 spin_lock_irqsave(&disable_ratelimit_lock, flags);
134 if (disable_ratelimit)
135 disable_ratelimit--;
136 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
137}
138
858119e1 139static void cpufreq_debug_disable_ratelimit(void)
1da177e4
LT
140{
141 unsigned long flags;
142
143 spin_lock_irqsave(&disable_ratelimit_lock, flags);
144 disable_ratelimit++;
145 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
146}
147
148void cpufreq_debug_printk(unsigned int type, const char *prefix, const char *fmt, ...)
149{
150 char s[256];
151 va_list args;
152 unsigned int len;
153 unsigned long flags;
32ee8c3e 154
1da177e4
LT
155 WARN_ON(!prefix);
156 if (type & debug) {
157 spin_lock_irqsave(&disable_ratelimit_lock, flags);
158 if (!disable_ratelimit && debug_ratelimit && !printk_ratelimit()) {
159 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
160 return;
161 }
162 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
163
164 len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
165
166 va_start(args, fmt);
167 len += vsnprintf(&s[len], (256 - len), fmt, args);
168 va_end(args);
169
170 printk(s);
171
172 WARN_ON(len < 5);
173 }
174}
175EXPORT_SYMBOL(cpufreq_debug_printk);
176
177
178module_param(debug, uint, 0644);
179MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core, 2 to debug drivers, and 4 to debug governors.");
180
181module_param(debug_ratelimit, uint, 0644);
182MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging: set to 0 to disable ratelimiting.");
183
184#else /* !CONFIG_CPU_FREQ_DEBUG */
185
186static inline void cpufreq_debug_enable_ratelimit(void) { return; }
187static inline void cpufreq_debug_disable_ratelimit(void) { return; }
188
189#endif /* CONFIG_CPU_FREQ_DEBUG */
190
191
192/*********************************************************************
193 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
194 *********************************************************************/
195
196/**
197 * adjust_jiffies - adjust the system "loops_per_jiffy"
198 *
199 * This function alters the system "loops_per_jiffy" for the clock
200 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 201 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
202 * per-CPU loops_per_jiffy value wherever possible.
203 */
204#ifndef CONFIG_SMP
205static unsigned long l_p_j_ref;
206static unsigned int l_p_j_ref_freq;
207
858119e1 208static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
209{
210 if (ci->flags & CPUFREQ_CONST_LOOPS)
211 return;
212
213 if (!l_p_j_ref_freq) {
214 l_p_j_ref = loops_per_jiffy;
215 l_p_j_ref_freq = ci->old;
216 dprintk("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
217 }
218 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
219 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
42d4dc3f 220 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
1da177e4
LT
221 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new);
222 dprintk("scaling loops_per_jiffy to %lu for frequency %u kHz\n", loops_per_jiffy, ci->new);
223 }
224}
225#else
226static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) { return; }
227#endif
228
229
230/**
e4472cb3
DJ
231 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
232 * on frequency transition.
1da177e4 233 *
e4472cb3
DJ
234 * This function calls the transition notifiers and the "adjust_jiffies"
235 * function. It is called twice on all CPU frequency changes that have
32ee8c3e 236 * external effects.
1da177e4
LT
237 */
238void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
239{
e4472cb3
DJ
240 struct cpufreq_policy *policy;
241
1da177e4
LT
242 BUG_ON(irqs_disabled());
243
244 freqs->flags = cpufreq_driver->flags;
e4472cb3
DJ
245 dprintk("notification %u of frequency transition to %u kHz\n",
246 state, freqs->new);
1da177e4 247
e4472cb3 248 policy = cpufreq_cpu_data[freqs->cpu];
1da177e4 249 switch (state) {
e4472cb3 250
1da177e4 251 case CPUFREQ_PRECHANGE:
32ee8c3e 252 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
253 * which is not equal to what the cpufreq core thinks is
254 * "old frequency".
1da177e4
LT
255 */
256 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
257 if ((policy) && (policy->cpu == freqs->cpu) &&
258 (policy->cur) && (policy->cur != freqs->old)) {
b10eec22 259 dprintk("Warning: CPU frequency is"
e4472cb3
DJ
260 " %u, cpufreq assumed %u kHz.\n",
261 freqs->old, policy->cur);
262 freqs->old = policy->cur;
1da177e4
LT
263 }
264 }
e041c683
AS
265 blocking_notifier_call_chain(&cpufreq_transition_notifier_list,
266 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
267 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
268 break;
e4472cb3 269
1da177e4
LT
270 case CPUFREQ_POSTCHANGE:
271 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
e041c683
AS
272 blocking_notifier_call_chain(&cpufreq_transition_notifier_list,
273 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
274 if (likely(policy) && likely(policy->cpu == freqs->cpu))
275 policy->cur = freqs->new;
1da177e4
LT
276 break;
277 }
1da177e4
LT
278}
279EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
280
281
282
283/*********************************************************************
284 * SYSFS INTERFACE *
285 *********************************************************************/
286
287/**
288 * cpufreq_parse_governor - parse a governor string
289 */
290static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
291 struct cpufreq_governor **governor)
292{
293 if (!cpufreq_driver)
294 return -EINVAL;
295 if (cpufreq_driver->setpolicy) {
296 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
297 *policy = CPUFREQ_POLICY_PERFORMANCE;
298 return 0;
299 } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
300 *policy = CPUFREQ_POLICY_POWERSAVE;
301 return 0;
302 }
303 return -EINVAL;
304 } else {
305 struct cpufreq_governor *t;
3fc54d37 306 mutex_lock(&cpufreq_governor_mutex);
1da177e4
LT
307 if (!cpufreq_driver || !cpufreq_driver->target)
308 goto out;
309 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
310 if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) {
311 *governor = t;
3fc54d37 312 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
313 return 0;
314 }
315 }
7d5e350f 316out:
3fc54d37 317 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
318 }
319 return -EINVAL;
320}
1da177e4
LT
321
322
323/* drivers/base/cpu.c */
324extern struct sysdev_class cpu_sysdev_class;
325
326
327/**
328 * cpufreq_per_cpu_attr_read() / show_##file_name() - print out cpufreq information
329 *
330 * Write out information from cpufreq_driver->policy[cpu]; object must be
331 * "unsigned int".
332 */
333
32ee8c3e
DJ
334#define show_one(file_name, object) \
335static ssize_t show_##file_name \
336(struct cpufreq_policy * policy, char *buf) \
337{ \
338 return sprintf (buf, "%u\n", policy->object); \
1da177e4
LT
339}
340
341show_one(cpuinfo_min_freq, cpuinfo.min_freq);
342show_one(cpuinfo_max_freq, cpuinfo.max_freq);
343show_one(scaling_min_freq, min);
344show_one(scaling_max_freq, max);
345show_one(scaling_cur_freq, cur);
346
7970e08b
TR
347static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy);
348
1da177e4
LT
349/**
350 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
351 */
352#define store_one(file_name, object) \
353static ssize_t store_##file_name \
354(struct cpufreq_policy * policy, const char *buf, size_t count) \
355{ \
356 unsigned int ret = -EINVAL; \
357 struct cpufreq_policy new_policy; \
358 \
359 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
360 if (ret) \
361 return -EINVAL; \
362 \
363 ret = sscanf (buf, "%u", &new_policy.object); \
364 if (ret != 1) \
365 return -EINVAL; \
366 \
153d7f3f 367 lock_cpu_hotplug(); \
7970e08b
TR
368 mutex_lock(&policy->lock); \
369 ret = __cpufreq_set_policy(policy, &new_policy); \
370 policy->user_policy.object = policy->object; \
371 mutex_unlock(&policy->lock); \
153d7f3f 372 unlock_cpu_hotplug(); \
1da177e4
LT
373 \
374 return ret ? ret : count; \
375}
376
377store_one(scaling_min_freq,min);
378store_one(scaling_max_freq,max);
379
380/**
381 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
382 */
383static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, char *buf)
384{
385 unsigned int cur_freq = cpufreq_get(policy->cpu);
386 if (!cur_freq)
387 return sprintf(buf, "<unknown>");
388 return sprintf(buf, "%u\n", cur_freq);
389}
390
391
392/**
393 * show_scaling_governor - show the current policy for the specified CPU
394 */
395static ssize_t show_scaling_governor (struct cpufreq_policy * policy, char *buf)
396{
397 if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
398 return sprintf(buf, "powersave\n");
399 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
400 return sprintf(buf, "performance\n");
401 else if (policy->governor)
402 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name);
403 return -EINVAL;
404}
405
406
407/**
408 * store_scaling_governor - store policy for the specified CPU
409 */
32ee8c3e
DJ
410static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
411 const char *buf, size_t count)
1da177e4
LT
412{
413 unsigned int ret = -EINVAL;
414 char str_governor[16];
415 struct cpufreq_policy new_policy;
416
417 ret = cpufreq_get_policy(&new_policy, policy->cpu);
418 if (ret)
419 return ret;
420
421 ret = sscanf (buf, "%15s", str_governor);
422 if (ret != 1)
423 return -EINVAL;
424
425 if (cpufreq_parse_governor(str_governor, &new_policy.policy, &new_policy.governor))
426 return -EINVAL;
427
a496e25d
DJ
428 lock_cpu_hotplug();
429
7970e08b
TR
430 /* Do not use cpufreq_set_policy here or the user_policy.max
431 will be wrongly overridden */
432 mutex_lock(&policy->lock);
433 ret = __cpufreq_set_policy(policy, &new_policy);
434
435 policy->user_policy.policy = policy->policy;
436 policy->user_policy.governor = policy->governor;
437 mutex_unlock(&policy->lock);
438
a496e25d
DJ
439 unlock_cpu_hotplug();
440
1da177e4
LT
441 return ret ? ret : count;
442}
443
444/**
445 * show_scaling_driver - show the cpufreq driver currently loaded
446 */
447static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf)
448{
449 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
450}
451
452/**
453 * show_scaling_available_governors - show the available CPUfreq governors
454 */
455static ssize_t show_scaling_available_governors (struct cpufreq_policy * policy,
456 char *buf)
457{
458 ssize_t i = 0;
459 struct cpufreq_governor *t;
460
461 if (!cpufreq_driver->target) {
462 i += sprintf(buf, "performance powersave");
463 goto out;
464 }
465
466 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
467 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2)))
468 goto out;
469 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
470 }
7d5e350f 471out:
1da177e4
LT
472 i += sprintf(&buf[i], "\n");
473 return i;
474}
475/**
476 * show_affected_cpus - show the CPUs affected by each transition
477 */
478static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf)
479{
480 ssize_t i = 0;
481 unsigned int cpu;
482
483 for_each_cpu_mask(cpu, policy->cpus) {
484 if (i)
485 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
486 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
487 if (i >= (PAGE_SIZE - 5))
488 break;
489 }
490 i += sprintf(&buf[i], "\n");
491 return i;
492}
493
494
495#define define_one_ro(_name) \
496static struct freq_attr _name = \
497__ATTR(_name, 0444, show_##_name, NULL)
498
499#define define_one_ro0400(_name) \
500static struct freq_attr _name = \
501__ATTR(_name, 0400, show_##_name, NULL)
502
503#define define_one_rw(_name) \
504static struct freq_attr _name = \
505__ATTR(_name, 0644, show_##_name, store_##_name)
506
507define_one_ro0400(cpuinfo_cur_freq);
508define_one_ro(cpuinfo_min_freq);
509define_one_ro(cpuinfo_max_freq);
510define_one_ro(scaling_available_governors);
511define_one_ro(scaling_driver);
512define_one_ro(scaling_cur_freq);
513define_one_ro(affected_cpus);
514define_one_rw(scaling_min_freq);
515define_one_rw(scaling_max_freq);
516define_one_rw(scaling_governor);
517
518static struct attribute * default_attrs[] = {
519 &cpuinfo_min_freq.attr,
520 &cpuinfo_max_freq.attr,
521 &scaling_min_freq.attr,
522 &scaling_max_freq.attr,
523 &affected_cpus.attr,
524 &scaling_governor.attr,
525 &scaling_driver.attr,
526 &scaling_available_governors.attr,
527 NULL
528};
529
530#define to_policy(k) container_of(k,struct cpufreq_policy,kobj)
531#define to_attr(a) container_of(a,struct freq_attr,attr)
532
533static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
534{
535 struct cpufreq_policy * policy = to_policy(kobj);
536 struct freq_attr * fattr = to_attr(attr);
537 ssize_t ret;
538 policy = cpufreq_cpu_get(policy->cpu);
539 if (!policy)
540 return -EINVAL;
70f2817a 541 ret = fattr->show ? fattr->show(policy,buf) : -EIO;
1da177e4
LT
542 cpufreq_cpu_put(policy);
543 return ret;
544}
545
32ee8c3e 546static ssize_t store(struct kobject * kobj, struct attribute * attr,
1da177e4
LT
547 const char * buf, size_t count)
548{
549 struct cpufreq_policy * policy = to_policy(kobj);
550 struct freq_attr * fattr = to_attr(attr);
551 ssize_t ret;
552 policy = cpufreq_cpu_get(policy->cpu);
553 if (!policy)
554 return -EINVAL;
70f2817a 555 ret = fattr->store ? fattr->store(policy,buf,count) : -EIO;
1da177e4
LT
556 cpufreq_cpu_put(policy);
557 return ret;
558}
559
560static void cpufreq_sysfs_release(struct kobject * kobj)
561{
562 struct cpufreq_policy * policy = to_policy(kobj);
563 dprintk("last reference is dropped\n");
564 complete(&policy->kobj_unregister);
565}
566
567static struct sysfs_ops sysfs_ops = {
568 .show = show,
569 .store = store,
570};
571
572static struct kobj_type ktype_cpufreq = {
573 .sysfs_ops = &sysfs_ops,
574 .default_attrs = default_attrs,
575 .release = cpufreq_sysfs_release,
576};
577
578
579/**
580 * cpufreq_add_dev - add a CPU device
581 *
32ee8c3e 582 * Adds the cpufreq interface for a CPU device.
1da177e4
LT
583 */
584static int cpufreq_add_dev (struct sys_device * sys_dev)
585{
586 unsigned int cpu = sys_dev->id;
587 int ret = 0;
588 struct cpufreq_policy new_policy;
589 struct cpufreq_policy *policy;
590 struct freq_attr **drv_attr;
8ff69732 591 struct sys_device *cpu_sys_dev;
1da177e4
LT
592 unsigned long flags;
593 unsigned int j;
8ff69732
DJ
594#ifdef CONFIG_SMP
595 struct cpufreq_policy *managed_policy;
596#endif
1da177e4 597
c32b6b8e
AR
598 if (cpu_is_offline(cpu))
599 return 0;
600
1da177e4
LT
601 cpufreq_debug_disable_ratelimit();
602 dprintk("adding CPU %u\n", cpu);
603
604#ifdef CONFIG_SMP
605 /* check whether a different CPU already registered this
606 * CPU because it is in the same boat. */
607 policy = cpufreq_cpu_get(cpu);
608 if (unlikely(policy)) {
8ff69732 609 cpufreq_cpu_put(policy);
1da177e4
LT
610 cpufreq_debug_enable_ratelimit();
611 return 0;
612 }
613#endif
614
615 if (!try_module_get(cpufreq_driver->owner)) {
616 ret = -EINVAL;
617 goto module_out;
618 }
619
e98df50c 620 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
1da177e4
LT
621 if (!policy) {
622 ret = -ENOMEM;
623 goto nomem_out;
624 }
1da177e4
LT
625
626 policy->cpu = cpu;
627 policy->cpus = cpumask_of_cpu(cpu);
628
83933af4
AV
629 mutex_init(&policy->lock);
630 mutex_lock(&policy->lock);
1da177e4
LT
631 init_completion(&policy->kobj_unregister);
632 INIT_WORK(&policy->update, handle_update, (void *)(long)cpu);
633
634 /* call driver. From then on the cpufreq must be able
635 * to accept all calls to ->verify and ->setpolicy for this CPU
636 */
637 ret = cpufreq_driver->init(policy);
638 if (ret) {
639 dprintk("initialization failed\n");
f3876c1b 640 mutex_unlock(&policy->lock);
1da177e4
LT
641 goto err_out;
642 }
643
8ff69732
DJ
644#ifdef CONFIG_SMP
645 for_each_cpu_mask(j, policy->cpus) {
646 if (cpu == j)
647 continue;
648
649 /* check for existing affected CPUs. They may not be aware
650 * of it due to CPU Hotplug.
651 */
652 managed_policy = cpufreq_cpu_get(j);
653 if (unlikely(managed_policy)) {
654 spin_lock_irqsave(&cpufreq_driver_lock, flags);
655 managed_policy->cpus = policy->cpus;
656 cpufreq_cpu_data[cpu] = managed_policy;
657 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
658
659 dprintk("CPU already managed, adding link\n");
660 sysfs_create_link(&sys_dev->kobj,
661 &managed_policy->kobj, "cpufreq");
662
663 cpufreq_debug_enable_ratelimit();
664 mutex_unlock(&policy->lock);
665 ret = 0;
666 goto err_out_driver_exit; /* call driver->exit() */
667 }
668 }
669#endif
1da177e4
LT
670 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
671
672 /* prepare interface data */
673 policy->kobj.parent = &sys_dev->kobj;
674 policy->kobj.ktype = &ktype_cpufreq;
675 strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN);
676
677 ret = kobject_register(&policy->kobj);
f3876c1b
AM
678 if (ret) {
679 mutex_unlock(&policy->lock);
8085e1f1 680 goto err_out_driver_exit;
f3876c1b 681 }
1da177e4
LT
682 /* set up files for this cpu device */
683 drv_attr = cpufreq_driver->attr;
684 while ((drv_attr) && (*drv_attr)) {
685 sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
686 drv_attr++;
687 }
688 if (cpufreq_driver->get)
689 sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
690 if (cpufreq_driver->target)
691 sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
692
693 spin_lock_irqsave(&cpufreq_driver_lock, flags);
694 for_each_cpu_mask(j, policy->cpus)
695 cpufreq_cpu_data[j] = policy;
696 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
8ff69732
DJ
697
698 /* symlink affected CPUs */
699 for_each_cpu_mask(j, policy->cpus) {
700 if (j == cpu)
701 continue;
702 if (!cpu_online(j))
703 continue;
704
1f8b2c9d 705 dprintk("CPU %u already managed, adding link\n", j);
8ff69732
DJ
706 cpufreq_cpu_get(cpu);
707 cpu_sys_dev = get_cpu_sysdev(j);
708 sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
709 "cpufreq");
710 }
711
1da177e4
LT
712 policy->governor = NULL; /* to assure that the starting sequence is
713 * run in cpufreq_set_policy */
83933af4 714 mutex_unlock(&policy->lock);
87c32271 715
1da177e4 716 /* set default policy */
1da177e4
LT
717 ret = cpufreq_set_policy(&new_policy);
718 if (ret) {
719 dprintk("setting policy failed\n");
720 goto err_out_unregister;
721 }
722
723 module_put(cpufreq_driver->owner);
1da177e4
LT
724 dprintk("initialization complete\n");
725 cpufreq_debug_enable_ratelimit();
87c32271 726
1da177e4
LT
727 return 0;
728
729
730err_out_unregister:
731 spin_lock_irqsave(&cpufreq_driver_lock, flags);
732 for_each_cpu_mask(j, policy->cpus)
733 cpufreq_cpu_data[j] = NULL;
734 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
735
736 kobject_unregister(&policy->kobj);
737 wait_for_completion(&policy->kobj_unregister);
738
8085e1f1
VP
739err_out_driver_exit:
740 if (cpufreq_driver->exit)
741 cpufreq_driver->exit(policy);
742
1da177e4
LT
743err_out:
744 kfree(policy);
745
746nomem_out:
747 module_put(cpufreq_driver->owner);
c32b6b8e 748module_out:
1da177e4
LT
749 cpufreq_debug_enable_ratelimit();
750 return ret;
751}
752
753
754/**
755 * cpufreq_remove_dev - remove a CPU device
756 *
757 * Removes the cpufreq interface for a CPU device.
758 */
759static int cpufreq_remove_dev (struct sys_device * sys_dev)
760{
761 unsigned int cpu = sys_dev->id;
762 unsigned long flags;
763 struct cpufreq_policy *data;
764#ifdef CONFIG_SMP
e738cf6d 765 struct sys_device *cpu_sys_dev;
1da177e4
LT
766 unsigned int j;
767#endif
768
769 cpufreq_debug_disable_ratelimit();
770 dprintk("unregistering CPU %u\n", cpu);
771
772 spin_lock_irqsave(&cpufreq_driver_lock, flags);
773 data = cpufreq_cpu_data[cpu];
774
775 if (!data) {
776 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
777 cpufreq_debug_enable_ratelimit();
778 return -EINVAL;
779 }
780 cpufreq_cpu_data[cpu] = NULL;
781
782
783#ifdef CONFIG_SMP
784 /* if this isn't the CPU which is the parent of the kobj, we
32ee8c3e 785 * only need to unlink, put and exit
1da177e4
LT
786 */
787 if (unlikely(cpu != data->cpu)) {
788 dprintk("removing link\n");
8ff69732 789 cpu_clear(cpu, data->cpus);
1da177e4
LT
790 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
791 sysfs_remove_link(&sys_dev->kobj, "cpufreq");
1da177e4
LT
792 cpufreq_cpu_put(data);
793 cpufreq_debug_enable_ratelimit();
794 return 0;
795 }
796#endif
797
1da177e4
LT
798
799 if (!kobject_get(&data->kobj)) {
800 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
801 cpufreq_debug_enable_ratelimit();
32ee8c3e 802 return -EFAULT;
1da177e4
LT
803 }
804
805#ifdef CONFIG_SMP
806 /* if we have other CPUs still registered, we need to unlink them,
807 * or else wait_for_completion below will lock up. Clean the
808 * cpufreq_cpu_data[] while holding the lock, and remove the sysfs
809 * links afterwards.
810 */
811 if (unlikely(cpus_weight(data->cpus) > 1)) {
812 for_each_cpu_mask(j, data->cpus) {
813 if (j == cpu)
814 continue;
815 cpufreq_cpu_data[j] = NULL;
816 }
817 }
818
819 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
820
821 if (unlikely(cpus_weight(data->cpus) > 1)) {
822 for_each_cpu_mask(j, data->cpus) {
823 if (j == cpu)
824 continue;
825 dprintk("removing link for cpu %u\n", j);
d434fca7
AR
826 cpu_sys_dev = get_cpu_sysdev(j);
827 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
1da177e4
LT
828 cpufreq_cpu_put(data);
829 }
830 }
831#else
832 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
833#endif
834
83933af4 835 mutex_lock(&data->lock);
1da177e4
LT
836 if (cpufreq_driver->target)
837 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
83933af4 838 mutex_unlock(&data->lock);
1da177e4
LT
839
840 kobject_unregister(&data->kobj);
841
842 kobject_put(&data->kobj);
843
844 /* we need to make sure that the underlying kobj is actually
32ee8c3e 845 * not referenced anymore by anybody before we proceed with
1da177e4
LT
846 * unloading.
847 */
848 dprintk("waiting for dropping of refcount\n");
849 wait_for_completion(&data->kobj_unregister);
850 dprintk("wait complete\n");
851
852 if (cpufreq_driver->exit)
853 cpufreq_driver->exit(data);
854
855 kfree(data);
856
857 cpufreq_debug_enable_ratelimit();
1da177e4
LT
858 return 0;
859}
860
861
862static void handle_update(void *data)
863{
864 unsigned int cpu = (unsigned int)(long)data;
865 dprintk("handle_update for cpu %u called\n", cpu);
866 cpufreq_update_policy(cpu);
867}
868
869/**
870 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
871 * @cpu: cpu number
872 * @old_freq: CPU frequency the kernel thinks the CPU runs at
873 * @new_freq: CPU frequency the CPU actually runs at
874 *
875 * We adjust to current frequency first, and need to clean up later. So either call
876 * to cpufreq_update_policy() or schedule handle_update()).
877 */
878static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigned int new_freq)
879{
880 struct cpufreq_freqs freqs;
881
b10eec22 882 dprintk("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
883 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
884
885 freqs.cpu = cpu;
886 freqs.old = old_freq;
887 freqs.new = new_freq;
888 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
889 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
890}
891
892
32ee8c3e 893/**
95235ca2
VP
894 * cpufreq_quick_get - get the CPU frequency (in kHz) frpm policy->cur
895 * @cpu: CPU number
896 *
897 * This is the last known freq, without actually getting it from the driver.
898 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
899 */
900unsigned int cpufreq_quick_get(unsigned int cpu)
901{
902 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
903 unsigned int ret = 0;
904
905 if (policy) {
83933af4 906 mutex_lock(&policy->lock);
95235ca2 907 ret = policy->cur;
83933af4 908 mutex_unlock(&policy->lock);
95235ca2
VP
909 cpufreq_cpu_put(policy);
910 }
911
912 return (ret);
913}
914EXPORT_SYMBOL(cpufreq_quick_get);
915
916
32ee8c3e 917/**
1da177e4
LT
918 * cpufreq_get - get the current CPU frequency (in kHz)
919 * @cpu: CPU number
920 *
921 * Get the CPU current (static) CPU frequency
922 */
923unsigned int cpufreq_get(unsigned int cpu)
924{
925 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
926 unsigned int ret = 0;
927
928 if (!policy)
929 return 0;
930
931 if (!cpufreq_driver->get)
932 goto out;
933
83933af4 934 mutex_lock(&policy->lock);
1da177e4
LT
935
936 ret = cpufreq_driver->get(cpu);
937
7d5e350f 938 if (ret && policy->cur && !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1da177e4
LT
939 /* verify no discrepancy between actual and saved value exists */
940 if (unlikely(ret != policy->cur)) {
941 cpufreq_out_of_sync(cpu, policy->cur, ret);
942 schedule_work(&policy->update);
943 }
944 }
945
83933af4 946 mutex_unlock(&policy->lock);
1da177e4 947
7d5e350f 948out:
1da177e4
LT
949 cpufreq_cpu_put(policy);
950
951 return (ret);
952}
953EXPORT_SYMBOL(cpufreq_get);
954
955
42d4dc3f
BH
956/**
957 * cpufreq_suspend - let the low level driver prepare for suspend
958 */
959
e00d9967 960static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
42d4dc3f
BH
961{
962 int cpu = sysdev->id;
963 unsigned int ret = 0;
964 unsigned int cur_freq = 0;
965 struct cpufreq_policy *cpu_policy;
966
967 dprintk("resuming cpu %u\n", cpu);
968
969 if (!cpu_online(cpu))
970 return 0;
971
972 /* we may be lax here as interrupts are off. Nonetheless
973 * we need to grab the correct cpu policy, as to check
974 * whether we really run on this CPU.
975 */
976
977 cpu_policy = cpufreq_cpu_get(cpu);
978 if (!cpu_policy)
979 return -EINVAL;
980
981 /* only handle each CPU group once */
982 if (unlikely(cpu_policy->cpu != cpu)) {
983 cpufreq_cpu_put(cpu_policy);
984 return 0;
985 }
986
987 if (cpufreq_driver->suspend) {
e00d9967 988 ret = cpufreq_driver->suspend(cpu_policy, pmsg);
42d4dc3f
BH
989 if (ret) {
990 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
991 "step on CPU %u\n", cpu_policy->cpu);
992 cpufreq_cpu_put(cpu_policy);
993 return ret;
994 }
995 }
996
997
998 if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
999 goto out;
1000
1001 if (cpufreq_driver->get)
1002 cur_freq = cpufreq_driver->get(cpu_policy->cpu);
1003
1004 if (!cur_freq || !cpu_policy->cur) {
1005 printk(KERN_ERR "cpufreq: suspend failed to assert current "
1006 "frequency is what timing core thinks it is.\n");
1007 goto out;
1008 }
1009
1010 if (unlikely(cur_freq != cpu_policy->cur)) {
1011 struct cpufreq_freqs freqs;
1012
1013 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
b10eec22 1014 dprintk("Warning: CPU frequency is %u, "
42d4dc3f
BH
1015 "cpufreq assumed %u kHz.\n",
1016 cur_freq, cpu_policy->cur);
1017
1018 freqs.cpu = cpu;
1019 freqs.old = cpu_policy->cur;
1020 freqs.new = cur_freq;
1021
e041c683 1022 blocking_notifier_call_chain(&cpufreq_transition_notifier_list,
42d4dc3f
BH
1023 CPUFREQ_SUSPENDCHANGE, &freqs);
1024 adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
1025
1026 cpu_policy->cur = cur_freq;
1027 }
1028
7d5e350f 1029out:
42d4dc3f
BH
1030 cpufreq_cpu_put(cpu_policy);
1031 return 0;
1032}
1033
1da177e4
LT
1034/**
1035 * cpufreq_resume - restore proper CPU frequency handling after resume
1036 *
1037 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1038 * 2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync
42d4dc3f
BH
1039 * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are
1040 * restored.
1da177e4
LT
1041 */
1042static int cpufreq_resume(struct sys_device * sysdev)
1043{
1044 int cpu = sysdev->id;
1045 unsigned int ret = 0;
1046 struct cpufreq_policy *cpu_policy;
1047
1048 dprintk("resuming cpu %u\n", cpu);
1049
1050 if (!cpu_online(cpu))
1051 return 0;
1052
1053 /* we may be lax here as interrupts are off. Nonetheless
1054 * we need to grab the correct cpu policy, as to check
1055 * whether we really run on this CPU.
1056 */
1057
1058 cpu_policy = cpufreq_cpu_get(cpu);
1059 if (!cpu_policy)
1060 return -EINVAL;
1061
1062 /* only handle each CPU group once */
1063 if (unlikely(cpu_policy->cpu != cpu)) {
1064 cpufreq_cpu_put(cpu_policy);
1065 return 0;
1066 }
1067
1068 if (cpufreq_driver->resume) {
1069 ret = cpufreq_driver->resume(cpu_policy);
1070 if (ret) {
1071 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1072 "step on CPU %u\n", cpu_policy->cpu);
1073 cpufreq_cpu_put(cpu_policy);
1074 return ret;
1075 }
1076 }
1077
1078 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1079 unsigned int cur_freq = 0;
1080
1081 if (cpufreq_driver->get)
1082 cur_freq = cpufreq_driver->get(cpu_policy->cpu);
1083
1084 if (!cur_freq || !cpu_policy->cur) {
42d4dc3f
BH
1085 printk(KERN_ERR "cpufreq: resume failed to assert "
1086 "current frequency is what timing core "
1087 "thinks it is.\n");
1da177e4
LT
1088 goto out;
1089 }
1090
1091 if (unlikely(cur_freq != cpu_policy->cur)) {
1092 struct cpufreq_freqs freqs;
1093
ac09f698 1094 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
b10eec22 1095 dprintk("Warning: CPU frequency"
ac09f698
BH
1096 "is %u, cpufreq assumed %u kHz.\n",
1097 cur_freq, cpu_policy->cur);
1da177e4
LT
1098
1099 freqs.cpu = cpu;
1100 freqs.old = cpu_policy->cur;
1101 freqs.new = cur_freq;
1102
e041c683
AS
1103 blocking_notifier_call_chain(
1104 &cpufreq_transition_notifier_list,
42d4dc3f 1105 CPUFREQ_RESUMECHANGE, &freqs);
1da177e4
LT
1106 adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
1107
1108 cpu_policy->cur = cur_freq;
1109 }
1110 }
1111
1112out:
1113 schedule_work(&cpu_policy->update);
1114 cpufreq_cpu_put(cpu_policy);
1115 return ret;
1116}
1117
1118static struct sysdev_driver cpufreq_sysdev_driver = {
1119 .add = cpufreq_add_dev,
1120 .remove = cpufreq_remove_dev,
42d4dc3f 1121 .suspend = cpufreq_suspend,
1da177e4
LT
1122 .resume = cpufreq_resume,
1123};
1124
1125
1126/*********************************************************************
1127 * NOTIFIER LISTS INTERFACE *
1128 *********************************************************************/
1129
1130/**
1131 * cpufreq_register_notifier - register a driver with cpufreq
1132 * @nb: notifier function to register
1133 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1134 *
32ee8c3e 1135 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1136 * are notified about clock rate changes (once before and once after
1137 * the transition), or a list of drivers that are notified about
1138 * changes in cpufreq policy.
1139 *
1140 * This function may sleep, and has the same return conditions as
e041c683 1141 * blocking_notifier_chain_register.
1da177e4
LT
1142 */
1143int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1144{
1145 int ret;
1146
1da177e4
LT
1147 switch (list) {
1148 case CPUFREQ_TRANSITION_NOTIFIER:
e041c683
AS
1149 ret = blocking_notifier_chain_register(
1150 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1151 break;
1152 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1153 ret = blocking_notifier_chain_register(
1154 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1155 break;
1156 default:
1157 ret = -EINVAL;
1158 }
1da177e4
LT
1159
1160 return ret;
1161}
1162EXPORT_SYMBOL(cpufreq_register_notifier);
1163
1164
1165/**
1166 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1167 * @nb: notifier block to be unregistered
1168 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1169 *
1170 * Remove a driver from the CPU frequency notifier list.
1171 *
1172 * This function may sleep, and has the same return conditions as
e041c683 1173 * blocking_notifier_chain_unregister.
1da177e4
LT
1174 */
1175int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1176{
1177 int ret;
1178
1da177e4
LT
1179 switch (list) {
1180 case CPUFREQ_TRANSITION_NOTIFIER:
e041c683
AS
1181 ret = blocking_notifier_chain_unregister(
1182 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1183 break;
1184 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1185 ret = blocking_notifier_chain_unregister(
1186 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1187 break;
1188 default:
1189 ret = -EINVAL;
1190 }
1da177e4
LT
1191
1192 return ret;
1193}
1194EXPORT_SYMBOL(cpufreq_unregister_notifier);
1195
1196
1197/*********************************************************************
1198 * GOVERNORS *
1199 *********************************************************************/
1200
1201
153d7f3f 1202/* Must be called with lock_cpu_hotplug held */
1da177e4
LT
1203int __cpufreq_driver_target(struct cpufreq_policy *policy,
1204 unsigned int target_freq,
1205 unsigned int relation)
1206{
1207 int retval = -EINVAL;
c32b6b8e 1208
1da177e4
LT
1209 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1210 target_freq, relation);
1211 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1212 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1213
1da177e4
LT
1214 return retval;
1215}
1216EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1217
1da177e4
LT
1218int cpufreq_driver_target(struct cpufreq_policy *policy,
1219 unsigned int target_freq,
1220 unsigned int relation)
1221{
cc993cab 1222 int ret;
1da177e4
LT
1223
1224 policy = cpufreq_cpu_get(policy->cpu);
1225 if (!policy)
1226 return -EINVAL;
1227
153d7f3f 1228 lock_cpu_hotplug();
83933af4 1229 mutex_lock(&policy->lock);
1da177e4
LT
1230
1231 ret = __cpufreq_driver_target(policy, target_freq, relation);
1232
83933af4 1233 mutex_unlock(&policy->lock);
153d7f3f 1234 unlock_cpu_hotplug();
1da177e4
LT
1235
1236 cpufreq_cpu_put(policy);
1da177e4
LT
1237 return ret;
1238}
1239EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1240
153d7f3f
AV
1241/*
1242 * Locking: Must be called with the lock_cpu_hotplug() lock held
1243 * when "event" is CPUFREQ_GOV_LIMITS
1244 */
1da177e4
LT
1245
1246static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
1247{
cc993cab 1248 int ret;
1da177e4
LT
1249
1250 if (!try_module_get(policy->governor->owner))
1251 return -EINVAL;
1252
1253 dprintk("__cpufreq_governor for CPU %u, event %u\n", policy->cpu, event);
1254 ret = policy->governor->governor(policy, event);
1255
1256 /* we keep one module reference alive for each CPU governed by this CPU */
1257 if ((event != CPUFREQ_GOV_START) || ret)
1258 module_put(policy->governor->owner);
1259 if ((event == CPUFREQ_GOV_STOP) && !ret)
1260 module_put(policy->governor->owner);
1261
1262 return ret;
1263}
1264
1265
1da177e4
LT
1266int cpufreq_register_governor(struct cpufreq_governor *governor)
1267{
1268 struct cpufreq_governor *t;
1269
1270 if (!governor)
1271 return -EINVAL;
1272
3fc54d37 1273 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1274
1da177e4
LT
1275 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
1276 if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) {
3fc54d37 1277 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1278 return -EBUSY;
1279 }
1280 }
1281 list_add(&governor->governor_list, &cpufreq_governor_list);
1282
32ee8c3e 1283 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1284 return 0;
1285}
1286EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1287
1288
1289void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1290{
1291 if (!governor)
1292 return;
1293
3fc54d37 1294 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1295 list_del(&governor->governor_list);
3fc54d37 1296 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1297 return;
1298}
1299EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1300
1301
1302
1303/*********************************************************************
1304 * POLICY INTERFACE *
1305 *********************************************************************/
1306
1307/**
1308 * cpufreq_get_policy - get the current cpufreq_policy
1309 * @policy: struct cpufreq_policy into which the current cpufreq_policy is written
1310 *
1311 * Reads the current cpufreq policy.
1312 */
1313int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1314{
1315 struct cpufreq_policy *cpu_policy;
1316 if (!policy)
1317 return -EINVAL;
1318
1319 cpu_policy = cpufreq_cpu_get(cpu);
1320 if (!cpu_policy)
1321 return -EINVAL;
1322
83933af4 1323 mutex_lock(&cpu_policy->lock);
1da177e4 1324 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
83933af4 1325 mutex_unlock(&cpu_policy->lock);
1da177e4
LT
1326
1327 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1328 return 0;
1329}
1330EXPORT_SYMBOL(cpufreq_get_policy);
1331
1332
153d7f3f
AV
1333/*
1334 * Locking: Must be called with the lock_cpu_hotplug() lock held
1335 */
1da177e4
LT
1336static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy)
1337{
1338 int ret = 0;
1339
1340 cpufreq_debug_disable_ratelimit();
1341 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1342 policy->min, policy->max);
1343
7d5e350f 1344 memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo));
1da177e4 1345
9c9a43ed
MD
1346 if (policy->min > data->min && policy->min > policy->max) {
1347 ret = -EINVAL;
1348 goto error_out;
1349 }
1350
1da177e4
LT
1351 /* verify the cpu speed can be set within this limit */
1352 ret = cpufreq_driver->verify(policy);
1353 if (ret)
1354 goto error_out;
1355
1da177e4 1356 /* adjust if necessary - all reasons */
e041c683
AS
1357 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1358 CPUFREQ_ADJUST, policy);
1da177e4
LT
1359
1360 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1361 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1362 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1363
1364 /* verify the cpu speed can be set within this limit,
1365 which might be different to the first one */
1366 ret = cpufreq_driver->verify(policy);
e041c683 1367 if (ret)
1da177e4 1368 goto error_out;
1da177e4
LT
1369
1370 /* notification of the new policy */
e041c683
AS
1371 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1372 CPUFREQ_NOTIFY, policy);
1da177e4 1373
7d5e350f
DJ
1374 data->min = policy->min;
1375 data->max = policy->max;
1da177e4
LT
1376
1377 dprintk("new min and max freqs are %u - %u kHz\n", data->min, data->max);
1378
1379 if (cpufreq_driver->setpolicy) {
1380 data->policy = policy->policy;
1381 dprintk("setting range\n");
1382 ret = cpufreq_driver->setpolicy(policy);
1383 } else {
1384 if (policy->governor != data->governor) {
1385 /* save old, working values */
1386 struct cpufreq_governor *old_gov = data->governor;
1387
1388 dprintk("governor switch\n");
1389
1390 /* end old governor */
1391 if (data->governor)
1392 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1393
1394 /* start new governor */
1395 data->governor = policy->governor;
1396 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1397 /* new governor failed, so re-start old one */
1398 dprintk("starting governor %s failed\n", data->governor->name);
1399 if (old_gov) {
1400 data->governor = old_gov;
1401 __cpufreq_governor(data, CPUFREQ_GOV_START);
1402 }
1403 ret = -EINVAL;
1404 goto error_out;
1405 }
1406 /* might be a policy change, too, so fall through */
1407 }
1408 dprintk("governor: change or update limits\n");
1409 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1410 }
1411
7d5e350f 1412error_out:
1da177e4
LT
1413 cpufreq_debug_enable_ratelimit();
1414 return ret;
1415}
1416
1417/**
1418 * cpufreq_set_policy - set a new CPUFreq policy
1419 * @policy: policy to be set.
1420 *
1421 * Sets a new CPU frequency and voltage scaling policy.
1422 */
1423int cpufreq_set_policy(struct cpufreq_policy *policy)
1424{
1425 int ret = 0;
1426 struct cpufreq_policy *data;
1427
1428 if (!policy)
1429 return -EINVAL;
1430
1431 data = cpufreq_cpu_get(policy->cpu);
1432 if (!data)
1433 return -EINVAL;
1434
153d7f3f
AV
1435 lock_cpu_hotplug();
1436
1da177e4 1437 /* lock this CPU */
83933af4 1438 mutex_lock(&data->lock);
1da177e4
LT
1439
1440 ret = __cpufreq_set_policy(data, policy);
1441 data->user_policy.min = data->min;
1442 data->user_policy.max = data->max;
1443 data->user_policy.policy = data->policy;
1444 data->user_policy.governor = data->governor;
1445
83933af4 1446 mutex_unlock(&data->lock);
153d7f3f
AV
1447
1448 unlock_cpu_hotplug();
1da177e4
LT
1449 cpufreq_cpu_put(data);
1450
1451 return ret;
1452}
1453EXPORT_SYMBOL(cpufreq_set_policy);
1454
1455
1456/**
1457 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1458 * @cpu: CPU which shall be re-evaluated
1459 *
1460 * Usefull for policy notifiers which have different necessities
1461 * at different times.
1462 */
1463int cpufreq_update_policy(unsigned int cpu)
1464{
1465 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1466 struct cpufreq_policy policy;
1467 int ret = 0;
1468
1469 if (!data)
1470 return -ENODEV;
1471
153d7f3f 1472 lock_cpu_hotplug();
83933af4 1473 mutex_lock(&data->lock);
1da177e4
LT
1474
1475 dprintk("updating policy for CPU %u\n", cpu);
7d5e350f 1476 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1477 policy.min = data->user_policy.min;
1478 policy.max = data->user_policy.max;
1479 policy.policy = data->user_policy.policy;
1480 policy.governor = data->user_policy.governor;
1481
0961dd0d
TR
1482 /* BIOS might change freq behind our back
1483 -> ask driver for current freq and notify governors about a change */
1484 if (cpufreq_driver->get) {
1485 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3
TR
1486 if (!data->cur) {
1487 dprintk("Driver did not initialize current freq");
1488 data->cur = policy.cur;
1489 } else {
1490 if (data->cur != policy.cur)
1491 cpufreq_out_of_sync(cpu, data->cur, policy.cur);
1492 }
0961dd0d
TR
1493 }
1494
1da177e4
LT
1495 ret = __cpufreq_set_policy(data, &policy);
1496
83933af4 1497 mutex_unlock(&data->lock);
153d7f3f 1498 unlock_cpu_hotplug();
1da177e4
LT
1499 cpufreq_cpu_put(data);
1500 return ret;
1501}
1502EXPORT_SYMBOL(cpufreq_update_policy);
1503
65edc68c
CS
1504#ifdef CONFIG_HOTPLUG_CPU
1505static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1506 unsigned long action, void *hcpu)
1507{
1508 unsigned int cpu = (unsigned long)hcpu;
1509 struct cpufreq_policy *policy;
1510 struct sys_device *sys_dev;
1511
1512 sys_dev = get_cpu_sysdev(cpu);
1513
1514 if (sys_dev) {
1515 switch (action) {
1516 case CPU_ONLINE:
1517 cpufreq_add_dev(sys_dev);
1518 break;
1519 case CPU_DOWN_PREPARE:
1520 /*
1521 * We attempt to put this cpu in lowest frequency
1522 * possible before going down. This will permit
1523 * hardware-managed P-State to switch other related
1524 * threads to min or higher speeds if possible.
1525 */
1526 policy = cpufreq_cpu_data[cpu];
1527 if (policy) {
1528 cpufreq_driver_target(policy, policy->min,
1529 CPUFREQ_RELATION_H);
1530 }
1531 break;
1532 case CPU_DEAD:
1533 cpufreq_remove_dev(sys_dev);
1534 break;
1535 }
1536 }
1537 return NOTIFY_OK;
1538}
1539
74b85f37 1540static struct notifier_block __cpuinitdata cpufreq_cpu_notifier =
c32b6b8e
AR
1541{
1542 .notifier_call = cpufreq_cpu_callback,
1543};
65edc68c 1544#endif /* CONFIG_HOTPLUG_CPU */
1da177e4
LT
1545
1546/*********************************************************************
1547 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1548 *********************************************************************/
1549
1550/**
1551 * cpufreq_register_driver - register a CPU Frequency driver
1552 * @driver_data: A struct cpufreq_driver containing the values#
1553 * submitted by the CPU Frequency driver.
1554 *
32ee8c3e 1555 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1556 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1557 * (and isn't unregistered in the meantime).
1da177e4
LT
1558 *
1559 */
1560int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1561{
1562 unsigned long flags;
1563 int ret;
1564
1565 if (!driver_data || !driver_data->verify || !driver_data->init ||
1566 ((!driver_data->setpolicy) && (!driver_data->target)))
1567 return -EINVAL;
1568
1569 dprintk("trying to register driver %s\n", driver_data->name);
1570
1571 if (driver_data->setpolicy)
1572 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1573
1574 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1575 if (cpufreq_driver) {
1576 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1577 return -EBUSY;
1578 }
1579 cpufreq_driver = driver_data;
1580 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1581
1582 ret = sysdev_driver_register(&cpu_sysdev_class,&cpufreq_sysdev_driver);
1583
1584 if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1585 int i;
1586 ret = -ENODEV;
1587
1588 /* check for at least one working CPU */
1589 for (i=0; i<NR_CPUS; i++)
1590 if (cpufreq_cpu_data[i])
1591 ret = 0;
1592
1593 /* if all ->init() calls failed, unregister */
1594 if (ret) {
1595 dprintk("no CPU initialized for driver %s\n", driver_data->name);
1596 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1597
1598 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1599 cpufreq_driver = NULL;
1600 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1601 }
1602 }
1603
1604 if (!ret) {
65edc68c 1605 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4
LT
1606 dprintk("driver %s up and running\n", driver_data->name);
1607 cpufreq_debug_enable_ratelimit();
1608 }
1609
1610 return (ret);
1611}
1612EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1613
1614
1615/**
1616 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1617 *
32ee8c3e 1618 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
1619 * the right to do so, i.e. if you have succeeded in initialising before!
1620 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1621 * currently not initialised.
1622 */
1623int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1624{
1625 unsigned long flags;
1626
1627 cpufreq_debug_disable_ratelimit();
1628
1629 if (!cpufreq_driver || (driver != cpufreq_driver)) {
1630 cpufreq_debug_enable_ratelimit();
1631 return -EINVAL;
1632 }
1633
1634 dprintk("unregistering driver %s\n", driver->name);
1635
1636 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
65edc68c 1637 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4
LT
1638
1639 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1640 cpufreq_driver = NULL;
1641 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1642
1643 return 0;
1644}
1645EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);