Linux 3.7-rc5
[linux-2.6-block.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
1da177e4
LT
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/notifier.h>
22#include <linux/cpufreq.h>
23#include <linux/delay.h>
24#include <linux/interrupt.h>
25#include <linux/spinlock.h>
26#include <linux/device.h>
27#include <linux/slab.h>
28#include <linux/cpu.h>
29#include <linux/completion.h>
3fc54d37 30#include <linux/mutex.h>
e00e56df 31#include <linux/syscore_ops.h>
1da177e4 32
6f4f2723
TR
33#include <trace/events/power.h>
34
1da177e4 35/**
cd878479 36 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
37 * level driver of CPUFreq support, and its spinlock. This lock
38 * also protects the cpufreq_cpu_data array.
39 */
7d5e350f 40static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 41static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
42#ifdef CONFIG_HOTPLUG_CPU
43/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 44static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 45#endif
1da177e4
LT
46static DEFINE_SPINLOCK(cpufreq_driver_lock);
47
5a01f2e8
VP
48/*
49 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
50 * all cpufreq/hotplug/workqueue/etc related lock issues.
51 *
52 * The rules for this semaphore:
53 * - Any routine that wants to read from the policy structure will
54 * do a down_read on this semaphore.
55 * - Any routine that will write to the policy structure and/or may take away
56 * the policy altogether (eg. CPU hotplug), will hold this lock in write
57 * mode before doing so.
58 *
59 * Additional rules:
60 * - All holders of the lock should check to make sure that the CPU they
61 * are concerned with are online after they get the lock.
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 66 */
f1625066 67static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69
70#define lock_policy_rwsem(mode, cpu) \
226528c6 71static int lock_policy_rwsem_##mode \
5a01f2e8
VP
72(int cpu) \
73{ \
f1625066 74 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
75 BUG_ON(policy_cpu == -1); \
76 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
77 if (unlikely(!cpu_online(cpu))) { \
78 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
79 return -1; \
80 } \
81 \
82 return 0; \
83}
84
85lock_policy_rwsem(read, cpu);
5a01f2e8
VP
86
87lock_policy_rwsem(write, cpu);
5a01f2e8 88
226528c6 89static void unlock_policy_rwsem_read(int cpu)
5a01f2e8 90{
f1625066 91 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
5a01f2e8
VP
92 BUG_ON(policy_cpu == -1);
93 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
94}
5a01f2e8 95
226528c6 96static void unlock_policy_rwsem_write(int cpu)
5a01f2e8 97{
f1625066 98 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
5a01f2e8
VP
99 BUG_ON(policy_cpu == -1);
100 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
101}
5a01f2e8
VP
102
103
1da177e4 104/* internal prototypes */
29464f28
DJ
105static int __cpufreq_governor(struct cpufreq_policy *policy,
106 unsigned int event);
5a01f2e8 107static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 108static void handle_update(struct work_struct *work);
1da177e4
LT
109
110/**
32ee8c3e
DJ
111 * Two notifier lists: the "policy" list is involved in the
112 * validation process for a new CPU frequency policy; the
1da177e4
LT
113 * "transition" list for kernel code that needs to handle
114 * changes to devices when the CPU clock speed changes.
115 * The mutex locks both lists.
116 */
e041c683 117static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 118static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 119
74212ca4 120static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
121static int __init init_cpufreq_transition_notifier_list(void)
122{
123 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 124 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
125 return 0;
126}
b3438f82 127pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 128
a7b422cd
KRW
129static int off __read_mostly;
130int cpufreq_disabled(void)
131{
132 return off;
133}
134void disable_cpufreq(void)
135{
136 off = 1;
137}
1da177e4 138static LIST_HEAD(cpufreq_governor_list);
29464f28 139static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 140
a9144436 141static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
142{
143 struct cpufreq_policy *data;
144 unsigned long flags;
145
7a6aedfa 146 if (cpu >= nr_cpu_ids)
1da177e4
LT
147 goto err_out;
148
149 /* get the cpufreq driver */
150 spin_lock_irqsave(&cpufreq_driver_lock, flags);
151
152 if (!cpufreq_driver)
153 goto err_out_unlock;
154
155 if (!try_module_get(cpufreq_driver->owner))
156 goto err_out_unlock;
157
158
159 /* get the CPU */
7a6aedfa 160 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
161
162 if (!data)
163 goto err_out_put_module;
164
a9144436 165 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
166 goto err_out_put_module;
167
1da177e4 168 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
169 return data;
170
7d5e350f 171err_out_put_module:
1da177e4 172 module_put(cpufreq_driver->owner);
7d5e350f 173err_out_unlock:
1da177e4 174 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 175err_out:
1da177e4
LT
176 return NULL;
177}
a9144436
SB
178
179struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
180{
181 return __cpufreq_cpu_get(cpu, false);
182}
1da177e4
LT
183EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
184
a9144436
SB
185static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
186{
187 return __cpufreq_cpu_get(cpu, true);
188}
189
190static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
191{
192 if (!sysfs)
193 kobject_put(&data->kobj);
194 module_put(cpufreq_driver->owner);
195}
7d5e350f 196
1da177e4
LT
197void cpufreq_cpu_put(struct cpufreq_policy *data)
198{
a9144436 199 __cpufreq_cpu_put(data, false);
1da177e4
LT
200}
201EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
202
a9144436
SB
203static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
204{
205 __cpufreq_cpu_put(data, true);
206}
1da177e4 207
1da177e4
LT
208/*********************************************************************
209 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
210 *********************************************************************/
211
212/**
213 * adjust_jiffies - adjust the system "loops_per_jiffy"
214 *
215 * This function alters the system "loops_per_jiffy" for the clock
216 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 217 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
218 * per-CPU loops_per_jiffy value wherever possible.
219 */
220#ifndef CONFIG_SMP
221static unsigned long l_p_j_ref;
222static unsigned int l_p_j_ref_freq;
223
858119e1 224static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
225{
226 if (ci->flags & CPUFREQ_CONST_LOOPS)
227 return;
228
229 if (!l_p_j_ref_freq) {
230 l_p_j_ref = loops_per_jiffy;
231 l_p_j_ref_freq = ci->old;
2d06d8c4 232 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 233 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 234 }
d08de0c1 235 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 236 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
237 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
238 ci->new);
2d06d8c4 239 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 240 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
241 }
242}
243#else
e08f5f5b
GS
244static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
245{
246 return;
247}
1da177e4
LT
248#endif
249
250
251/**
e4472cb3
DJ
252 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
253 * on frequency transition.
1da177e4 254 *
e4472cb3
DJ
255 * This function calls the transition notifiers and the "adjust_jiffies"
256 * function. It is called twice on all CPU frequency changes that have
32ee8c3e 257 * external effects.
1da177e4
LT
258 */
259void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
260{
e4472cb3
DJ
261 struct cpufreq_policy *policy;
262
1da177e4
LT
263 BUG_ON(irqs_disabled());
264
265 freqs->flags = cpufreq_driver->flags;
2d06d8c4 266 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 267 state, freqs->new);
1da177e4 268
7a6aedfa 269 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
1da177e4 270 switch (state) {
e4472cb3 271
1da177e4 272 case CPUFREQ_PRECHANGE:
32ee8c3e 273 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
274 * which is not equal to what the cpufreq core thinks is
275 * "old frequency".
1da177e4
LT
276 */
277 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
278 if ((policy) && (policy->cpu == freqs->cpu) &&
279 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 280 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
281 " %u, cpufreq assumed %u kHz.\n",
282 freqs->old, policy->cur);
283 freqs->old = policy->cur;
1da177e4
LT
284 }
285 }
b4dfdbb3 286 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 287 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
288 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
289 break;
e4472cb3 290
1da177e4
LT
291 case CPUFREQ_POSTCHANGE:
292 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 293 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723
TR
294 (unsigned long)freqs->cpu);
295 trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
25e41933 296 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 297 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 298 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
299 if (likely(policy) && likely(policy->cpu == freqs->cpu))
300 policy->cur = freqs->new;
1da177e4
LT
301 break;
302 }
1da177e4
LT
303}
304EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
305
306
307
308/*********************************************************************
309 * SYSFS INTERFACE *
310 *********************************************************************/
311
3bcb09a3
JF
312static struct cpufreq_governor *__find_governor(const char *str_governor)
313{
314 struct cpufreq_governor *t;
315
316 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 317 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
318 return t;
319
320 return NULL;
321}
322
1da177e4
LT
323/**
324 * cpufreq_parse_governor - parse a governor string
325 */
905d77cd 326static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
327 struct cpufreq_governor **governor)
328{
3bcb09a3
JF
329 int err = -EINVAL;
330
1da177e4 331 if (!cpufreq_driver)
3bcb09a3
JF
332 goto out;
333
1da177e4
LT
334 if (cpufreq_driver->setpolicy) {
335 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
336 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 337 err = 0;
e08f5f5b
GS
338 } else if (!strnicmp(str_governor, "powersave",
339 CPUFREQ_NAME_LEN)) {
1da177e4 340 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 341 err = 0;
1da177e4 342 }
3bcb09a3 343 } else if (cpufreq_driver->target) {
1da177e4 344 struct cpufreq_governor *t;
3bcb09a3 345
3fc54d37 346 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
347
348 t = __find_governor(str_governor);
349
ea714970 350 if (t == NULL) {
1a8e1463 351 int ret;
ea714970 352
1a8e1463
KC
353 mutex_unlock(&cpufreq_governor_mutex);
354 ret = request_module("cpufreq_%s", str_governor);
355 mutex_lock(&cpufreq_governor_mutex);
ea714970 356
1a8e1463
KC
357 if (ret == 0)
358 t = __find_governor(str_governor);
ea714970
JF
359 }
360
3bcb09a3
JF
361 if (t != NULL) {
362 *governor = t;
363 err = 0;
1da177e4 364 }
3bcb09a3 365
3fc54d37 366 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 367 }
29464f28 368out:
3bcb09a3 369 return err;
1da177e4 370}
1da177e4
LT
371
372
1da177e4 373/**
e08f5f5b
GS
374 * cpufreq_per_cpu_attr_read() / show_##file_name() -
375 * print out cpufreq information
1da177e4
LT
376 *
377 * Write out information from cpufreq_driver->policy[cpu]; object must be
378 * "unsigned int".
379 */
380
32ee8c3e
DJ
381#define show_one(file_name, object) \
382static ssize_t show_##file_name \
905d77cd 383(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 384{ \
29464f28 385 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
386}
387
388show_one(cpuinfo_min_freq, cpuinfo.min_freq);
389show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 390show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
391show_one(scaling_min_freq, min);
392show_one(scaling_max_freq, max);
393show_one(scaling_cur_freq, cur);
394
e08f5f5b
GS
395static int __cpufreq_set_policy(struct cpufreq_policy *data,
396 struct cpufreq_policy *policy);
7970e08b 397
1da177e4
LT
398/**
399 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
400 */
401#define store_one(file_name, object) \
402static ssize_t store_##file_name \
905d77cd 403(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4
LT
404{ \
405 unsigned int ret = -EINVAL; \
406 struct cpufreq_policy new_policy; \
407 \
408 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
409 if (ret) \
410 return -EINVAL; \
411 \
29464f28 412 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
413 if (ret != 1) \
414 return -EINVAL; \
415 \
7970e08b
TR
416 ret = __cpufreq_set_policy(policy, &new_policy); \
417 policy->user_policy.object = policy->object; \
1da177e4
LT
418 \
419 return ret ? ret : count; \
420}
421
29464f28
DJ
422store_one(scaling_min_freq, min);
423store_one(scaling_max_freq, max);
1da177e4
LT
424
425/**
426 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
427 */
905d77cd
DJ
428static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
429 char *buf)
1da177e4 430{
5a01f2e8 431 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
432 if (!cur_freq)
433 return sprintf(buf, "<unknown>");
434 return sprintf(buf, "%u\n", cur_freq);
435}
436
437
438/**
439 * show_scaling_governor - show the current policy for the specified CPU
440 */
905d77cd 441static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 442{
29464f28 443 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
444 return sprintf(buf, "powersave\n");
445 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
446 return sprintf(buf, "performance\n");
447 else if (policy->governor)
29464f28
DJ
448 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
449 policy->governor->name);
1da177e4
LT
450 return -EINVAL;
451}
452
453
454/**
455 * store_scaling_governor - store policy for the specified CPU
456 */
905d77cd
DJ
457static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
458 const char *buf, size_t count)
1da177e4
LT
459{
460 unsigned int ret = -EINVAL;
461 char str_governor[16];
462 struct cpufreq_policy new_policy;
463
464 ret = cpufreq_get_policy(&new_policy, policy->cpu);
465 if (ret)
466 return ret;
467
29464f28 468 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
469 if (ret != 1)
470 return -EINVAL;
471
e08f5f5b
GS
472 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
473 &new_policy.governor))
1da177e4
LT
474 return -EINVAL;
475
7970e08b
TR
476 /* Do not use cpufreq_set_policy here or the user_policy.max
477 will be wrongly overridden */
7970e08b
TR
478 ret = __cpufreq_set_policy(policy, &new_policy);
479
480 policy->user_policy.policy = policy->policy;
481 policy->user_policy.governor = policy->governor;
7970e08b 482
e08f5f5b
GS
483 if (ret)
484 return ret;
485 else
486 return count;
1da177e4
LT
487}
488
489/**
490 * show_scaling_driver - show the cpufreq driver currently loaded
491 */
905d77cd 492static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4
LT
493{
494 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
495}
496
497/**
498 * show_scaling_available_governors - show the available CPUfreq governors
499 */
905d77cd
DJ
500static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
501 char *buf)
1da177e4
LT
502{
503 ssize_t i = 0;
504 struct cpufreq_governor *t;
505
506 if (!cpufreq_driver->target) {
507 i += sprintf(buf, "performance powersave");
508 goto out;
509 }
510
511 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
512 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
513 - (CPUFREQ_NAME_LEN + 2)))
1da177e4
LT
514 goto out;
515 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
516 }
7d5e350f 517out:
1da177e4
LT
518 i += sprintf(&buf[i], "\n");
519 return i;
520}
e8628dd0 521
835481d9 522static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
523{
524 ssize_t i = 0;
525 unsigned int cpu;
526
835481d9 527 for_each_cpu(cpu, mask) {
1da177e4
LT
528 if (i)
529 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
530 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
531 if (i >= (PAGE_SIZE - 5))
29464f28 532 break;
1da177e4
LT
533 }
534 i += sprintf(&buf[i], "\n");
535 return i;
536}
537
e8628dd0
DW
538/**
539 * show_related_cpus - show the CPUs affected by each transition even if
540 * hw coordination is in use
541 */
542static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
543{
835481d9 544 if (cpumask_empty(policy->related_cpus))
e8628dd0
DW
545 return show_cpus(policy->cpus, buf);
546 return show_cpus(policy->related_cpus, buf);
547}
548
549/**
550 * show_affected_cpus - show the CPUs affected by each transition
551 */
552static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
553{
554 return show_cpus(policy->cpus, buf);
555}
556
9e76988e 557static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 558 const char *buf, size_t count)
9e76988e
VP
559{
560 unsigned int freq = 0;
561 unsigned int ret;
562
879000f9 563 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
564 return -EINVAL;
565
566 ret = sscanf(buf, "%u", &freq);
567 if (ret != 1)
568 return -EINVAL;
569
570 policy->governor->store_setspeed(policy, freq);
571
572 return count;
573}
574
575static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
576{
879000f9 577 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
578 return sprintf(buf, "<unsupported>\n");
579
580 return policy->governor->show_setspeed(policy, buf);
581}
1da177e4 582
e2f74f35
TR
583/**
584 * show_scaling_driver - show the current cpufreq HW/BIOS limitation
585 */
586static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
587{
588 unsigned int limit;
589 int ret;
590 if (cpufreq_driver->bios_limit) {
591 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
592 if (!ret)
593 return sprintf(buf, "%u\n", limit);
594 }
595 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
596}
597
6dad2a29
BP
598cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
599cpufreq_freq_attr_ro(cpuinfo_min_freq);
600cpufreq_freq_attr_ro(cpuinfo_max_freq);
601cpufreq_freq_attr_ro(cpuinfo_transition_latency);
602cpufreq_freq_attr_ro(scaling_available_governors);
603cpufreq_freq_attr_ro(scaling_driver);
604cpufreq_freq_attr_ro(scaling_cur_freq);
605cpufreq_freq_attr_ro(bios_limit);
606cpufreq_freq_attr_ro(related_cpus);
607cpufreq_freq_attr_ro(affected_cpus);
608cpufreq_freq_attr_rw(scaling_min_freq);
609cpufreq_freq_attr_rw(scaling_max_freq);
610cpufreq_freq_attr_rw(scaling_governor);
611cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 612
905d77cd 613static struct attribute *default_attrs[] = {
1da177e4
LT
614 &cpuinfo_min_freq.attr,
615 &cpuinfo_max_freq.attr,
ed129784 616 &cpuinfo_transition_latency.attr,
1da177e4
LT
617 &scaling_min_freq.attr,
618 &scaling_max_freq.attr,
619 &affected_cpus.attr,
e8628dd0 620 &related_cpus.attr,
1da177e4
LT
621 &scaling_governor.attr,
622 &scaling_driver.attr,
623 &scaling_available_governors.attr,
9e76988e 624 &scaling_setspeed.attr,
1da177e4
LT
625 NULL
626};
627
8aa84ad8
TR
628struct kobject *cpufreq_global_kobject;
629EXPORT_SYMBOL(cpufreq_global_kobject);
630
29464f28
DJ
631#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
632#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 633
29464f28 634static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 635{
905d77cd
DJ
636 struct cpufreq_policy *policy = to_policy(kobj);
637 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 638 ssize_t ret = -EINVAL;
a9144436 639 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 640 if (!policy)
0db4a8a9 641 goto no_policy;
5a01f2e8
VP
642
643 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 644 goto fail;
5a01f2e8 645
e08f5f5b
GS
646 if (fattr->show)
647 ret = fattr->show(policy, buf);
648 else
649 ret = -EIO;
650
5a01f2e8 651 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 652fail:
a9144436 653 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 654no_policy:
1da177e4
LT
655 return ret;
656}
657
905d77cd
DJ
658static ssize_t store(struct kobject *kobj, struct attribute *attr,
659 const char *buf, size_t count)
1da177e4 660{
905d77cd
DJ
661 struct cpufreq_policy *policy = to_policy(kobj);
662 struct freq_attr *fattr = to_attr(attr);
a07530b4 663 ssize_t ret = -EINVAL;
a9144436 664 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 665 if (!policy)
a07530b4 666 goto no_policy;
5a01f2e8
VP
667
668 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 669 goto fail;
5a01f2e8 670
e08f5f5b
GS
671 if (fattr->store)
672 ret = fattr->store(policy, buf, count);
673 else
674 ret = -EIO;
675
5a01f2e8 676 unlock_policy_rwsem_write(policy->cpu);
a07530b4 677fail:
a9144436 678 cpufreq_cpu_put_sysfs(policy);
a07530b4 679no_policy:
1da177e4
LT
680 return ret;
681}
682
905d77cd 683static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 684{
905d77cd 685 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 686 pr_debug("last reference is dropped\n");
1da177e4
LT
687 complete(&policy->kobj_unregister);
688}
689
52cf25d0 690static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
691 .show = show,
692 .store = store,
693};
694
695static struct kobj_type ktype_cpufreq = {
696 .sysfs_ops = &sysfs_ops,
697 .default_attrs = default_attrs,
698 .release = cpufreq_sysfs_release,
699};
700
4bfa042c
TR
701/*
702 * Returns:
703 * Negative: Failure
704 * 0: Success
705 * Positive: When we have a managed CPU and the sysfs got symlinked
706 */
cf3289d0
AC
707static int cpufreq_add_dev_policy(unsigned int cpu,
708 struct cpufreq_policy *policy,
8a25a2fd 709 struct device *dev)
ecf7e461
DJ
710{
711 int ret = 0;
712#ifdef CONFIG_SMP
713 unsigned long flags;
714 unsigned int j;
ecf7e461 715#ifdef CONFIG_HOTPLUG_CPU
e77b89f1
DM
716 struct cpufreq_governor *gov;
717
718 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
719 if (gov) {
720 policy->governor = gov;
2d06d8c4 721 pr_debug("Restoring governor %s for cpu %d\n",
ecf7e461
DJ
722 policy->governor->name, cpu);
723 }
724#endif
725
726 for_each_cpu(j, policy->cpus) {
727 struct cpufreq_policy *managed_policy;
728
729 if (cpu == j)
730 continue;
731
732 /* Check for existing affected CPUs.
733 * They may not be aware of it due to CPU Hotplug.
734 * cpufreq_cpu_put is called when the device is removed
735 * in __cpufreq_remove_dev()
736 */
737 managed_policy = cpufreq_cpu_get(j);
738 if (unlikely(managed_policy)) {
739
740 /* Set proper policy_cpu */
741 unlock_policy_rwsem_write(cpu);
f1625066 742 per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
ecf7e461
DJ
743
744 if (lock_policy_rwsem_write(cpu) < 0) {
745 /* Should not go through policy unlock path */
746 if (cpufreq_driver->exit)
747 cpufreq_driver->exit(policy);
748 cpufreq_cpu_put(managed_policy);
749 return -EBUSY;
750 }
751
752 spin_lock_irqsave(&cpufreq_driver_lock, flags);
753 cpumask_copy(managed_policy->cpus, policy->cpus);
754 per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
755 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
756
2d06d8c4 757 pr_debug("CPU already managed, adding link\n");
8a25a2fd 758 ret = sysfs_create_link(&dev->kobj,
ecf7e461
DJ
759 &managed_policy->kobj,
760 "cpufreq");
761 if (ret)
762 cpufreq_cpu_put(managed_policy);
763 /*
764 * Success. We only needed to be added to the mask.
765 * Call driver->exit() because only the cpu parent of
766 * the kobj needed to call init().
767 */
768 if (cpufreq_driver->exit)
769 cpufreq_driver->exit(policy);
4bfa042c
TR
770
771 if (!ret)
772 return 1;
773 else
774 return ret;
ecf7e461
DJ
775 }
776 }
777#endif
778 return ret;
779}
780
781
19d6f7ec 782/* symlink affected CPUs */
cf3289d0
AC
783static int cpufreq_add_dev_symlink(unsigned int cpu,
784 struct cpufreq_policy *policy)
19d6f7ec
DJ
785{
786 unsigned int j;
787 int ret = 0;
788
789 for_each_cpu(j, policy->cpus) {
790 struct cpufreq_policy *managed_policy;
8a25a2fd 791 struct device *cpu_dev;
19d6f7ec
DJ
792
793 if (j == cpu)
794 continue;
795 if (!cpu_online(j))
796 continue;
797
2d06d8c4 798 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 799 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
800 cpu_dev = get_cpu_device(j);
801 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
802 "cpufreq");
803 if (ret) {
804 cpufreq_cpu_put(managed_policy);
805 return ret;
806 }
807 }
808 return ret;
809}
810
cf3289d0
AC
811static int cpufreq_add_dev_interface(unsigned int cpu,
812 struct cpufreq_policy *policy,
8a25a2fd 813 struct device *dev)
909a694e 814{
ecf7e461 815 struct cpufreq_policy new_policy;
909a694e
DJ
816 struct freq_attr **drv_attr;
817 unsigned long flags;
818 int ret = 0;
819 unsigned int j;
820
821 /* prepare interface data */
822 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 823 &dev->kobj, "cpufreq");
909a694e
DJ
824 if (ret)
825 return ret;
826
827 /* set up files for this cpu device */
828 drv_attr = cpufreq_driver->attr;
829 while ((drv_attr) && (*drv_attr)) {
830 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
831 if (ret)
832 goto err_out_kobj_put;
833 drv_attr++;
834 }
835 if (cpufreq_driver->get) {
836 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
837 if (ret)
838 goto err_out_kobj_put;
839 }
840 if (cpufreq_driver->target) {
841 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
842 if (ret)
843 goto err_out_kobj_put;
844 }
e2f74f35
TR
845 if (cpufreq_driver->bios_limit) {
846 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
847 if (ret)
848 goto err_out_kobj_put;
849 }
909a694e
DJ
850
851 spin_lock_irqsave(&cpufreq_driver_lock, flags);
852 for_each_cpu(j, policy->cpus) {
bec037aa
JL
853 if (!cpu_online(j))
854 continue;
909a694e 855 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 856 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e
DJ
857 }
858 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
859
860 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
861 if (ret)
862 goto err_out_kobj_put;
863
864 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
865 /* assure that the starting sequence is run in __cpufreq_set_policy */
866 policy->governor = NULL;
867
868 /* set default policy */
869 ret = __cpufreq_set_policy(policy, &new_policy);
870 policy->user_policy.policy = policy->policy;
871 policy->user_policy.governor = policy->governor;
872
873 if (ret) {
2d06d8c4 874 pr_debug("setting policy failed\n");
ecf7e461
DJ
875 if (cpufreq_driver->exit)
876 cpufreq_driver->exit(policy);
877 }
909a694e
DJ
878 return ret;
879
880err_out_kobj_put:
881 kobject_put(&policy->kobj);
882 wait_for_completion(&policy->kobj_unregister);
883 return ret;
884}
885
1da177e4
LT
886
887/**
888 * cpufreq_add_dev - add a CPU device
889 *
32ee8c3e 890 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
891 *
892 * The Oracle says: try running cpufreq registration/unregistration concurrently
893 * with with cpu hotplugging and all hell will break loose. Tried to clean this
894 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 895 */
8a25a2fd 896static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 897{
8a25a2fd 898 unsigned int cpu = dev->id;
90e41bac 899 int ret = 0, found = 0;
1da177e4 900 struct cpufreq_policy *policy;
1da177e4
LT
901 unsigned long flags;
902 unsigned int j;
90e41bac
PB
903#ifdef CONFIG_HOTPLUG_CPU
904 int sibling;
905#endif
1da177e4 906
c32b6b8e
AR
907 if (cpu_is_offline(cpu))
908 return 0;
909
2d06d8c4 910 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
911
912#ifdef CONFIG_SMP
913 /* check whether a different CPU already registered this
914 * CPU because it is in the same boat. */
915 policy = cpufreq_cpu_get(cpu);
916 if (unlikely(policy)) {
8ff69732 917 cpufreq_cpu_put(policy);
1da177e4
LT
918 return 0;
919 }
920#endif
921
922 if (!try_module_get(cpufreq_driver->owner)) {
923 ret = -EINVAL;
924 goto module_out;
925 }
926
059019a3 927 ret = -ENOMEM;
e98df50c 928 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 929 if (!policy)
1da177e4 930 goto nomem_out;
059019a3
DJ
931
932 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 933 goto err_free_policy;
059019a3
DJ
934
935 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 936 goto err_free_cpumask;
1da177e4
LT
937
938 policy->cpu = cpu;
835481d9 939 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 940
5a01f2e8 941 /* Initially set CPU itself as the policy_cpu */
f1625066 942 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
3f4a782b
MD
943 ret = (lock_policy_rwsem_write(cpu) < 0);
944 WARN_ON(ret);
5a01f2e8 945
1da177e4 946 init_completion(&policy->kobj_unregister);
65f27f38 947 INIT_WORK(&policy->update, handle_update);
1da177e4 948
8122c6ce 949 /* Set governor before ->init, so that driver could check it */
90e41bac
PB
950#ifdef CONFIG_HOTPLUG_CPU
951 for_each_online_cpu(sibling) {
952 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
953 if (cp && cp->governor &&
954 (cpumask_test_cpu(cpu, cp->related_cpus))) {
955 policy->governor = cp->governor;
956 found = 1;
957 break;
958 }
959 }
960#endif
961 if (!found)
962 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1da177e4
LT
963 /* call driver. From then on the cpufreq must be able
964 * to accept all calls to ->verify and ->setpolicy for this CPU
965 */
966 ret = cpufreq_driver->init(policy);
967 if (ret) {
2d06d8c4 968 pr_debug("initialization failed\n");
3f4a782b 969 goto err_unlock_policy;
1da177e4 970 }
187d9f4e
MC
971 policy->user_policy.min = policy->min;
972 policy->user_policy.max = policy->max;
1da177e4 973
a1531acd
TR
974 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
975 CPUFREQ_START, policy);
976
8a25a2fd 977 ret = cpufreq_add_dev_policy(cpu, policy, dev);
4bfa042c
TR
978 if (ret) {
979 if (ret > 0)
980 /* This is a managed cpu, symlink created,
981 exit with 0 */
982 ret = 0;
ecf7e461 983 goto err_unlock_policy;
4bfa042c 984 }
1da177e4 985
8a25a2fd 986 ret = cpufreq_add_dev_interface(cpu, policy, dev);
19d6f7ec
DJ
987 if (ret)
988 goto err_out_unregister;
8ff69732 989
dca02613
LW
990 unlock_policy_rwsem_write(cpu);
991
038c5b3e 992 kobject_uevent(&policy->kobj, KOBJ_ADD);
1da177e4 993 module_put(cpufreq_driver->owner);
2d06d8c4 994 pr_debug("initialization complete\n");
87c32271 995
1da177e4
LT
996 return 0;
997
998
999err_out_unregister:
1000 spin_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 1001 for_each_cpu(j, policy->cpus)
7a6aedfa 1002 per_cpu(cpufreq_cpu_data, j) = NULL;
1da177e4
LT
1003 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1004
c10997f6 1005 kobject_put(&policy->kobj);
1da177e4
LT
1006 wait_for_completion(&policy->kobj_unregister);
1007
3f4a782b 1008err_unlock_policy:
45709118 1009 unlock_policy_rwsem_write(cpu);
cad70a6a 1010 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
1011err_free_cpumask:
1012 free_cpumask_var(policy->cpus);
1013err_free_policy:
1da177e4 1014 kfree(policy);
1da177e4
LT
1015nomem_out:
1016 module_put(cpufreq_driver->owner);
c32b6b8e 1017module_out:
1da177e4
LT
1018 return ret;
1019}
1020
1021
1022/**
5a01f2e8 1023 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1024 *
1025 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1026 * Caller should already have policy_rwsem in write mode for this CPU.
1027 * This routine frees the rwsem before returning.
1da177e4 1028 */
8a25a2fd 1029static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1030{
8a25a2fd 1031 unsigned int cpu = dev->id;
1da177e4
LT
1032 unsigned long flags;
1033 struct cpufreq_policy *data;
499bca9b
AW
1034 struct kobject *kobj;
1035 struct completion *cmp;
1da177e4 1036#ifdef CONFIG_SMP
8a25a2fd 1037 struct device *cpu_dev;
1da177e4
LT
1038 unsigned int j;
1039#endif
1040
2d06d8c4 1041 pr_debug("unregistering CPU %u\n", cpu);
1da177e4
LT
1042
1043 spin_lock_irqsave(&cpufreq_driver_lock, flags);
7a6aedfa 1044 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
1045
1046 if (!data) {
1047 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
5a01f2e8 1048 unlock_policy_rwsem_write(cpu);
1da177e4
LT
1049 return -EINVAL;
1050 }
7a6aedfa 1051 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1da177e4
LT
1052
1053
1054#ifdef CONFIG_SMP
1055 /* if this isn't the CPU which is the parent of the kobj, we
32ee8c3e 1056 * only need to unlink, put and exit
1da177e4
LT
1057 */
1058 if (unlikely(cpu != data->cpu)) {
2d06d8c4 1059 pr_debug("removing link\n");
835481d9 1060 cpumask_clear_cpu(cpu, data->cpus);
1da177e4 1061 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
8a25a2fd 1062 kobj = &dev->kobj;
1da177e4 1063 cpufreq_cpu_put(data);
5a01f2e8 1064 unlock_policy_rwsem_write(cpu);
499bca9b 1065 sysfs_remove_link(kobj, "cpufreq");
1da177e4
LT
1066 return 0;
1067 }
1068#endif
1069
1da177e4 1070#ifdef CONFIG_SMP
084f3493
TR
1071
1072#ifdef CONFIG_HOTPLUG_CPU
e77b89f1
DM
1073 strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
1074 CPUFREQ_NAME_LEN);
084f3493
TR
1075#endif
1076
1da177e4
LT
1077 /* if we have other CPUs still registered, we need to unlink them,
1078 * or else wait_for_completion below will lock up. Clean the
7a6aedfa
MT
1079 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1080 * the sysfs links afterwards.
1da177e4 1081 */
835481d9
RR
1082 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1083 for_each_cpu(j, data->cpus) {
1da177e4
LT
1084 if (j == cpu)
1085 continue;
7a6aedfa 1086 per_cpu(cpufreq_cpu_data, j) = NULL;
1da177e4
LT
1087 }
1088 }
1089
1090 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1091
835481d9
RR
1092 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1093 for_each_cpu(j, data->cpus) {
1da177e4
LT
1094 if (j == cpu)
1095 continue;
2d06d8c4 1096 pr_debug("removing link for cpu %u\n", j);
084f3493 1097#ifdef CONFIG_HOTPLUG_CPU
e77b89f1
DM
1098 strncpy(per_cpu(cpufreq_cpu_governor, j),
1099 data->governor->name, CPUFREQ_NAME_LEN);
084f3493 1100#endif
8a25a2fd
KS
1101 cpu_dev = get_cpu_device(j);
1102 kobj = &cpu_dev->kobj;
499bca9b
AW
1103 unlock_policy_rwsem_write(cpu);
1104 sysfs_remove_link(kobj, "cpufreq");
1105 lock_policy_rwsem_write(cpu);
1da177e4
LT
1106 cpufreq_cpu_put(data);
1107 }
1108 }
1109#else
1110 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1111#endif
1112
1da177e4
LT
1113 if (cpufreq_driver->target)
1114 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 1115
499bca9b
AW
1116 kobj = &data->kobj;
1117 cmp = &data->kobj_unregister;
1118 unlock_policy_rwsem_write(cpu);
1119 kobject_put(kobj);
1da177e4
LT
1120
1121 /* we need to make sure that the underlying kobj is actually
32ee8c3e 1122 * not referenced anymore by anybody before we proceed with
1da177e4
LT
1123 * unloading.
1124 */
2d06d8c4 1125 pr_debug("waiting for dropping of refcount\n");
499bca9b 1126 wait_for_completion(cmp);
2d06d8c4 1127 pr_debug("wait complete\n");
1da177e4 1128
499bca9b 1129 lock_policy_rwsem_write(cpu);
1da177e4
LT
1130 if (cpufreq_driver->exit)
1131 cpufreq_driver->exit(data);
7d26e2d5 1132 unlock_policy_rwsem_write(cpu);
1133
27ecddc2
JS
1134#ifdef CONFIG_HOTPLUG_CPU
1135 /* when the CPU which is the parent of the kobj is hotplugged
1136 * offline, check for siblings, and create cpufreq sysfs interface
1137 * and symlinks
1138 */
1139 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1140 /* first sibling now owns the new sysfs dir */
1141 cpumask_clear_cpu(cpu, data->cpus);
8a25a2fd 1142 cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL);
27ecddc2
JS
1143
1144 /* finally remove our own symlink */
1145 lock_policy_rwsem_write(cpu);
8a25a2fd 1146 __cpufreq_remove_dev(dev, sif);
27ecddc2
JS
1147 }
1148#endif
1149
835481d9
RR
1150 free_cpumask_var(data->related_cpus);
1151 free_cpumask_var(data->cpus);
1da177e4
LT
1152 kfree(data);
1153
1da177e4
LT
1154 return 0;
1155}
1156
1157
8a25a2fd 1158static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1159{
8a25a2fd 1160 unsigned int cpu = dev->id;
5a01f2e8 1161 int retval;
ec28297a
VP
1162
1163 if (cpu_is_offline(cpu))
1164 return 0;
1165
5a01f2e8
VP
1166 if (unlikely(lock_policy_rwsem_write(cpu)))
1167 BUG();
1168
8a25a2fd 1169 retval = __cpufreq_remove_dev(dev, sif);
5a01f2e8
VP
1170 return retval;
1171}
1172
1173
65f27f38 1174static void handle_update(struct work_struct *work)
1da177e4 1175{
65f27f38
DH
1176 struct cpufreq_policy *policy =
1177 container_of(work, struct cpufreq_policy, update);
1178 unsigned int cpu = policy->cpu;
2d06d8c4 1179 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1180 cpufreq_update_policy(cpu);
1181}
1182
1183/**
1184 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1185 * @cpu: cpu number
1186 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1187 * @new_freq: CPU frequency the CPU actually runs at
1188 *
29464f28
DJ
1189 * We adjust to current frequency first, and need to clean up later.
1190 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1191 */
e08f5f5b
GS
1192static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1193 unsigned int new_freq)
1da177e4
LT
1194{
1195 struct cpufreq_freqs freqs;
1196
2d06d8c4 1197 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1198 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1199
1200 freqs.cpu = cpu;
1201 freqs.old = old_freq;
1202 freqs.new = new_freq;
1203 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1204 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1205}
1206
1207
32ee8c3e 1208/**
4ab70df4 1209 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1210 * @cpu: CPU number
1211 *
1212 * This is the last known freq, without actually getting it from the driver.
1213 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1214 */
1215unsigned int cpufreq_quick_get(unsigned int cpu)
1216{
1217 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
e08f5f5b 1218 unsigned int ret_freq = 0;
95235ca2
VP
1219
1220 if (policy) {
e08f5f5b 1221 ret_freq = policy->cur;
95235ca2
VP
1222 cpufreq_cpu_put(policy);
1223 }
1224
4d34a67d 1225 return ret_freq;
95235ca2
VP
1226}
1227EXPORT_SYMBOL(cpufreq_quick_get);
1228
3d737108
JB
1229/**
1230 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1231 * @cpu: CPU number
1232 *
1233 * Just return the max possible frequency for a given CPU.
1234 */
1235unsigned int cpufreq_quick_get_max(unsigned int cpu)
1236{
1237 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1238 unsigned int ret_freq = 0;
1239
1240 if (policy) {
1241 ret_freq = policy->max;
1242 cpufreq_cpu_put(policy);
1243 }
1244
1245 return ret_freq;
1246}
1247EXPORT_SYMBOL(cpufreq_quick_get_max);
1248
95235ca2 1249
5a01f2e8 1250static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1251{
7a6aedfa 1252 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1253 unsigned int ret_freq = 0;
1da177e4 1254
1da177e4 1255 if (!cpufreq_driver->get)
4d34a67d 1256 return ret_freq;
1da177e4 1257
e08f5f5b 1258 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1259
e08f5f5b
GS
1260 if (ret_freq && policy->cur &&
1261 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1262 /* verify no discrepancy between actual and
1263 saved value exists */
1264 if (unlikely(ret_freq != policy->cur)) {
1265 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1266 schedule_work(&policy->update);
1267 }
1268 }
1269
4d34a67d 1270 return ret_freq;
5a01f2e8 1271}
1da177e4 1272
5a01f2e8
VP
1273/**
1274 * cpufreq_get - get the current CPU frequency (in kHz)
1275 * @cpu: CPU number
1276 *
1277 * Get the CPU current (static) CPU frequency
1278 */
1279unsigned int cpufreq_get(unsigned int cpu)
1280{
1281 unsigned int ret_freq = 0;
1282 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1283
1284 if (!policy)
1285 goto out;
1286
1287 if (unlikely(lock_policy_rwsem_read(cpu)))
1288 goto out_policy;
1289
1290 ret_freq = __cpufreq_get(cpu);
1291
1292 unlock_policy_rwsem_read(cpu);
1da177e4 1293
5a01f2e8
VP
1294out_policy:
1295 cpufreq_cpu_put(policy);
1296out:
4d34a67d 1297 return ret_freq;
1da177e4
LT
1298}
1299EXPORT_SYMBOL(cpufreq_get);
1300
8a25a2fd
KS
1301static struct subsys_interface cpufreq_interface = {
1302 .name = "cpufreq",
1303 .subsys = &cpu_subsys,
1304 .add_dev = cpufreq_add_dev,
1305 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1306};
1307
1da177e4 1308
42d4dc3f 1309/**
e00e56df
RW
1310 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1311 *
1312 * This function is only executed for the boot processor. The other CPUs
1313 * have been put offline by means of CPU hotplug.
42d4dc3f 1314 */
e00e56df 1315static int cpufreq_bp_suspend(void)
42d4dc3f 1316{
e08f5f5b 1317 int ret = 0;
4bc5d341 1318
e00e56df 1319 int cpu = smp_processor_id();
42d4dc3f
BH
1320 struct cpufreq_policy *cpu_policy;
1321
2d06d8c4 1322 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1323
e00e56df 1324 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1325 cpu_policy = cpufreq_cpu_get(cpu);
1326 if (!cpu_policy)
e00e56df 1327 return 0;
42d4dc3f
BH
1328
1329 if (cpufreq_driver->suspend) {
7ca64e2d 1330 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1331 if (ret)
42d4dc3f
BH
1332 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1333 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1334 }
1335
42d4dc3f 1336 cpufreq_cpu_put(cpu_policy);
c9060494 1337 return ret;
42d4dc3f
BH
1338}
1339
1da177e4 1340/**
e00e56df 1341 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1342 *
1343 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1344 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1345 * restored. It will verify that the current freq is in sync with
1346 * what we believe it to be. This is a bit later than when it
1347 * should be, but nonethteless it's better than calling
1348 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1349 *
1350 * This function is only executed for the boot CPU. The other CPUs have not
1351 * been turned on yet.
1da177e4 1352 */
e00e56df 1353static void cpufreq_bp_resume(void)
1da177e4 1354{
e08f5f5b 1355 int ret = 0;
4bc5d341 1356
e00e56df 1357 int cpu = smp_processor_id();
1da177e4
LT
1358 struct cpufreq_policy *cpu_policy;
1359
2d06d8c4 1360 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1361
e00e56df 1362 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1363 cpu_policy = cpufreq_cpu_get(cpu);
1364 if (!cpu_policy)
e00e56df 1365 return;
1da177e4
LT
1366
1367 if (cpufreq_driver->resume) {
1368 ret = cpufreq_driver->resume(cpu_policy);
1369 if (ret) {
1370 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1371 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1372 goto fail;
1da177e4
LT
1373 }
1374 }
1375
1da177e4 1376 schedule_work(&cpu_policy->update);
ce6c3997 1377
c9060494 1378fail:
1da177e4 1379 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1380}
1381
e00e56df
RW
1382static struct syscore_ops cpufreq_syscore_ops = {
1383 .suspend = cpufreq_bp_suspend,
1384 .resume = cpufreq_bp_resume,
1da177e4
LT
1385};
1386
1387
1388/*********************************************************************
1389 * NOTIFIER LISTS INTERFACE *
1390 *********************************************************************/
1391
1392/**
1393 * cpufreq_register_notifier - register a driver with cpufreq
1394 * @nb: notifier function to register
1395 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1396 *
32ee8c3e 1397 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1398 * are notified about clock rate changes (once before and once after
1399 * the transition), or a list of drivers that are notified about
1400 * changes in cpufreq policy.
1401 *
1402 * This function may sleep, and has the same return conditions as
e041c683 1403 * blocking_notifier_chain_register.
1da177e4
LT
1404 */
1405int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1406{
1407 int ret;
1408
74212ca4
CEB
1409 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1410
1da177e4
LT
1411 switch (list) {
1412 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1413 ret = srcu_notifier_chain_register(
e041c683 1414 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1415 break;
1416 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1417 ret = blocking_notifier_chain_register(
1418 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1419 break;
1420 default:
1421 ret = -EINVAL;
1422 }
1da177e4
LT
1423
1424 return ret;
1425}
1426EXPORT_SYMBOL(cpufreq_register_notifier);
1427
1428
1429/**
1430 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1431 * @nb: notifier block to be unregistered
1432 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1433 *
1434 * Remove a driver from the CPU frequency notifier list.
1435 *
1436 * This function may sleep, and has the same return conditions as
e041c683 1437 * blocking_notifier_chain_unregister.
1da177e4
LT
1438 */
1439int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1440{
1441 int ret;
1442
1da177e4
LT
1443 switch (list) {
1444 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1445 ret = srcu_notifier_chain_unregister(
e041c683 1446 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1447 break;
1448 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1449 ret = blocking_notifier_chain_unregister(
1450 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1451 break;
1452 default:
1453 ret = -EINVAL;
1454 }
1da177e4
LT
1455
1456 return ret;
1457}
1458EXPORT_SYMBOL(cpufreq_unregister_notifier);
1459
1460
1461/*********************************************************************
1462 * GOVERNORS *
1463 *********************************************************************/
1464
1465
1466int __cpufreq_driver_target(struct cpufreq_policy *policy,
1467 unsigned int target_freq,
1468 unsigned int relation)
1469{
1470 int retval = -EINVAL;
c32b6b8e 1471
a7b422cd
KRW
1472 if (cpufreq_disabled())
1473 return -ENODEV;
1474
2d06d8c4 1475 pr_debug("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1da177e4
LT
1476 target_freq, relation);
1477 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1478 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1479
1da177e4
LT
1480 return retval;
1481}
1482EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1483
1da177e4
LT
1484int cpufreq_driver_target(struct cpufreq_policy *policy,
1485 unsigned int target_freq,
1486 unsigned int relation)
1487{
f1829e4a 1488 int ret = -EINVAL;
1da177e4
LT
1489
1490 policy = cpufreq_cpu_get(policy->cpu);
1491 if (!policy)
f1829e4a 1492 goto no_policy;
1da177e4 1493
5a01f2e8 1494 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1495 goto fail;
1da177e4
LT
1496
1497 ret = __cpufreq_driver_target(policy, target_freq, relation);
1498
5a01f2e8 1499 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1500
f1829e4a 1501fail:
1da177e4 1502 cpufreq_cpu_put(policy);
f1829e4a 1503no_policy:
1da177e4
LT
1504 return ret;
1505}
1506EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1507
bf0b90e3 1508int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1509{
1510 int ret = 0;
1511
1512 policy = cpufreq_cpu_get(policy->cpu);
1513 if (!policy)
1514 return -EINVAL;
1515
bf0b90e3 1516 if (cpu_online(cpu) && cpufreq_driver->getavg)
1517 ret = cpufreq_driver->getavg(policy, cpu);
dfde5d62 1518
dfde5d62
VP
1519 cpufreq_cpu_put(policy);
1520 return ret;
1521}
5a01f2e8 1522EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1523
153d7f3f 1524/*
153d7f3f
AV
1525 * when "event" is CPUFREQ_GOV_LIMITS
1526 */
1da177e4 1527
e08f5f5b
GS
1528static int __cpufreq_governor(struct cpufreq_policy *policy,
1529 unsigned int event)
1da177e4 1530{
cc993cab 1531 int ret;
6afde10c
TR
1532
1533 /* Only must be defined when default governor is known to have latency
1534 restrictions, like e.g. conservative or ondemand.
1535 That this is the case is already ensured in Kconfig
1536 */
1537#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1538 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1539#else
1540 struct cpufreq_governor *gov = NULL;
1541#endif
1c256245
TR
1542
1543 if (policy->governor->max_transition_latency &&
1544 policy->cpuinfo.transition_latency >
1545 policy->governor->max_transition_latency) {
6afde10c
TR
1546 if (!gov)
1547 return -EINVAL;
1548 else {
1549 printk(KERN_WARNING "%s governor failed, too long"
1550 " transition latency of HW, fallback"
1551 " to %s governor\n",
1552 policy->governor->name,
1553 gov->name);
1554 policy->governor = gov;
1555 }
1c256245 1556 }
1da177e4
LT
1557
1558 if (!try_module_get(policy->governor->owner))
1559 return -EINVAL;
1560
2d06d8c4 1561 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1562 policy->cpu, event);
1da177e4
LT
1563 ret = policy->governor->governor(policy, event);
1564
e08f5f5b
GS
1565 /* we keep one module reference alive for
1566 each CPU governed by this CPU */
1da177e4
LT
1567 if ((event != CPUFREQ_GOV_START) || ret)
1568 module_put(policy->governor->owner);
1569 if ((event == CPUFREQ_GOV_STOP) && !ret)
1570 module_put(policy->governor->owner);
1571
1572 return ret;
1573}
1574
1575
1da177e4
LT
1576int cpufreq_register_governor(struct cpufreq_governor *governor)
1577{
3bcb09a3 1578 int err;
1da177e4
LT
1579
1580 if (!governor)
1581 return -EINVAL;
1582
a7b422cd
KRW
1583 if (cpufreq_disabled())
1584 return -ENODEV;
1585
3fc54d37 1586 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1587
3bcb09a3
JF
1588 err = -EBUSY;
1589 if (__find_governor(governor->name) == NULL) {
1590 err = 0;
1591 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1592 }
1da177e4 1593
32ee8c3e 1594 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1595 return err;
1da177e4
LT
1596}
1597EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1598
1599
1600void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1601{
90e41bac
PB
1602#ifdef CONFIG_HOTPLUG_CPU
1603 int cpu;
1604#endif
1605
1da177e4
LT
1606 if (!governor)
1607 return;
1608
a7b422cd
KRW
1609 if (cpufreq_disabled())
1610 return;
1611
90e41bac
PB
1612#ifdef CONFIG_HOTPLUG_CPU
1613 for_each_present_cpu(cpu) {
1614 if (cpu_online(cpu))
1615 continue;
1616 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1617 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1618 }
1619#endif
1620
3fc54d37 1621 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1622 list_del(&governor->governor_list);
3fc54d37 1623 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1624 return;
1625}
1626EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1627
1628
1629
1630/*********************************************************************
1631 * POLICY INTERFACE *
1632 *********************************************************************/
1633
1634/**
1635 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1636 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1637 * is written
1da177e4
LT
1638 *
1639 * Reads the current cpufreq policy.
1640 */
1641int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1642{
1643 struct cpufreq_policy *cpu_policy;
1644 if (!policy)
1645 return -EINVAL;
1646
1647 cpu_policy = cpufreq_cpu_get(cpu);
1648 if (!cpu_policy)
1649 return -EINVAL;
1650
1da177e4 1651 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1652
1653 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1654 return 0;
1655}
1656EXPORT_SYMBOL(cpufreq_get_policy);
1657
1658
153d7f3f 1659/*
e08f5f5b
GS
1660 * data : current policy.
1661 * policy : policy to be set.
153d7f3f 1662 */
e08f5f5b
GS
1663static int __cpufreq_set_policy(struct cpufreq_policy *data,
1664 struct cpufreq_policy *policy)
1da177e4
LT
1665{
1666 int ret = 0;
1667
2d06d8c4 1668 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1669 policy->min, policy->max);
1670
e08f5f5b
GS
1671 memcpy(&policy->cpuinfo, &data->cpuinfo,
1672 sizeof(struct cpufreq_cpuinfo));
1da177e4 1673
53391fa2 1674 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1675 ret = -EINVAL;
1676 goto error_out;
1677 }
1678
1da177e4
LT
1679 /* verify the cpu speed can be set within this limit */
1680 ret = cpufreq_driver->verify(policy);
1681 if (ret)
1682 goto error_out;
1683
1da177e4 1684 /* adjust if necessary - all reasons */
e041c683
AS
1685 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1686 CPUFREQ_ADJUST, policy);
1da177e4
LT
1687
1688 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1689 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1690 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1691
1692 /* verify the cpu speed can be set within this limit,
1693 which might be different to the first one */
1694 ret = cpufreq_driver->verify(policy);
e041c683 1695 if (ret)
1da177e4 1696 goto error_out;
1da177e4
LT
1697
1698 /* notification of the new policy */
e041c683
AS
1699 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1700 CPUFREQ_NOTIFY, policy);
1da177e4 1701
7d5e350f
DJ
1702 data->min = policy->min;
1703 data->max = policy->max;
1da177e4 1704
2d06d8c4 1705 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1706 data->min, data->max);
1da177e4
LT
1707
1708 if (cpufreq_driver->setpolicy) {
1709 data->policy = policy->policy;
2d06d8c4 1710 pr_debug("setting range\n");
1da177e4
LT
1711 ret = cpufreq_driver->setpolicy(policy);
1712 } else {
1713 if (policy->governor != data->governor) {
1714 /* save old, working values */
1715 struct cpufreq_governor *old_gov = data->governor;
1716
2d06d8c4 1717 pr_debug("governor switch\n");
1da177e4
LT
1718
1719 /* end old governor */
ffe6275f 1720 if (data->governor)
1da177e4
LT
1721 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1722
1723 /* start new governor */
1724 data->governor = policy->governor;
1725 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1726 /* new governor failed, so re-start old one */
2d06d8c4 1727 pr_debug("starting governor %s failed\n",
e08f5f5b 1728 data->governor->name);
1da177e4
LT
1729 if (old_gov) {
1730 data->governor = old_gov;
e08f5f5b
GS
1731 __cpufreq_governor(data,
1732 CPUFREQ_GOV_START);
1da177e4
LT
1733 }
1734 ret = -EINVAL;
1735 goto error_out;
1736 }
1737 /* might be a policy change, too, so fall through */
1738 }
2d06d8c4 1739 pr_debug("governor: change or update limits\n");
1da177e4
LT
1740 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1741 }
1742
7d5e350f 1743error_out:
1da177e4
LT
1744 return ret;
1745}
1746
1da177e4
LT
1747/**
1748 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1749 * @cpu: CPU which shall be re-evaluated
1750 *
25985edc 1751 * Useful for policy notifiers which have different necessities
1da177e4
LT
1752 * at different times.
1753 */
1754int cpufreq_update_policy(unsigned int cpu)
1755{
1756 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1757 struct cpufreq_policy policy;
f1829e4a 1758 int ret;
1da177e4 1759
f1829e4a
JL
1760 if (!data) {
1761 ret = -ENODEV;
1762 goto no_policy;
1763 }
1da177e4 1764
f1829e4a
JL
1765 if (unlikely(lock_policy_rwsem_write(cpu))) {
1766 ret = -EINVAL;
1767 goto fail;
1768 }
1da177e4 1769
2d06d8c4 1770 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1771 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1772 policy.min = data->user_policy.min;
1773 policy.max = data->user_policy.max;
1774 policy.policy = data->user_policy.policy;
1775 policy.governor = data->user_policy.governor;
1776
0961dd0d
TR
1777 /* BIOS might change freq behind our back
1778 -> ask driver for current freq and notify governors about a change */
1779 if (cpufreq_driver->get) {
1780 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3 1781 if (!data->cur) {
2d06d8c4 1782 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1783 data->cur = policy.cur;
1784 } else {
1785 if (data->cur != policy.cur)
e08f5f5b
GS
1786 cpufreq_out_of_sync(cpu, data->cur,
1787 policy.cur);
a85f7bd3 1788 }
0961dd0d
TR
1789 }
1790
1da177e4
LT
1791 ret = __cpufreq_set_policy(data, &policy);
1792
5a01f2e8
VP
1793 unlock_policy_rwsem_write(cpu);
1794
f1829e4a 1795fail:
1da177e4 1796 cpufreq_cpu_put(data);
f1829e4a 1797no_policy:
1da177e4
LT
1798 return ret;
1799}
1800EXPORT_SYMBOL(cpufreq_update_policy);
1801
dd184a01 1802static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1803 unsigned long action, void *hcpu)
1804{
1805 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1806 struct device *dev;
c32b6b8e 1807
8a25a2fd
KS
1808 dev = get_cpu_device(cpu);
1809 if (dev) {
c32b6b8e
AR
1810 switch (action) {
1811 case CPU_ONLINE:
8bb78442 1812 case CPU_ONLINE_FROZEN:
8a25a2fd 1813 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1814 break;
1815 case CPU_DOWN_PREPARE:
8bb78442 1816 case CPU_DOWN_PREPARE_FROZEN:
5a01f2e8
VP
1817 if (unlikely(lock_policy_rwsem_write(cpu)))
1818 BUG();
1819
8a25a2fd 1820 __cpufreq_remove_dev(dev, NULL);
c32b6b8e 1821 break;
5a01f2e8 1822 case CPU_DOWN_FAILED:
8bb78442 1823 case CPU_DOWN_FAILED_FROZEN:
8a25a2fd 1824 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1825 break;
1826 }
1827 }
1828 return NOTIFY_OK;
1829}
1830
9c36f746 1831static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1832 .notifier_call = cpufreq_cpu_callback,
1833};
1da177e4
LT
1834
1835/*********************************************************************
1836 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1837 *********************************************************************/
1838
1839/**
1840 * cpufreq_register_driver - register a CPU Frequency driver
1841 * @driver_data: A struct cpufreq_driver containing the values#
1842 * submitted by the CPU Frequency driver.
1843 *
32ee8c3e 1844 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1845 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1846 * (and isn't unregistered in the meantime).
1da177e4
LT
1847 *
1848 */
221dee28 1849int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1850{
1851 unsigned long flags;
1852 int ret;
1853
a7b422cd
KRW
1854 if (cpufreq_disabled())
1855 return -ENODEV;
1856
1da177e4
LT
1857 if (!driver_data || !driver_data->verify || !driver_data->init ||
1858 ((!driver_data->setpolicy) && (!driver_data->target)))
1859 return -EINVAL;
1860
2d06d8c4 1861 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
1862
1863 if (driver_data->setpolicy)
1864 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1865
1866 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1867 if (cpufreq_driver) {
1868 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1869 return -EBUSY;
1870 }
1871 cpufreq_driver = driver_data;
1872 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1873
8a25a2fd 1874 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
1875 if (ret)
1876 goto err_null_driver;
1da177e4 1877
8f5bc2ab 1878 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
1879 int i;
1880 ret = -ENODEV;
1881
1882 /* check for at least one working CPU */
7a6aedfa
MT
1883 for (i = 0; i < nr_cpu_ids; i++)
1884 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1885 ret = 0;
7a6aedfa
MT
1886 break;
1887 }
1da177e4
LT
1888
1889 /* if all ->init() calls failed, unregister */
1890 if (ret) {
2d06d8c4 1891 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 1892 driver_data->name);
8a25a2fd 1893 goto err_if_unreg;
1da177e4
LT
1894 }
1895 }
1896
8f5bc2ab 1897 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 1898 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 1899
8f5bc2ab 1900 return 0;
8a25a2fd
KS
1901err_if_unreg:
1902 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab
JS
1903err_null_driver:
1904 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1905 cpufreq_driver = NULL;
1906 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 1907 return ret;
1da177e4
LT
1908}
1909EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1910
1911
1912/**
1913 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1914 *
32ee8c3e 1915 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
1916 * the right to do so, i.e. if you have succeeded in initialising before!
1917 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1918 * currently not initialised.
1919 */
221dee28 1920int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
1921{
1922 unsigned long flags;
1923
2d06d8c4 1924 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 1925 return -EINVAL;
1da177e4 1926
2d06d8c4 1927 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 1928
8a25a2fd 1929 subsys_interface_unregister(&cpufreq_interface);
65edc68c 1930 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4
LT
1931
1932 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1933 cpufreq_driver = NULL;
1934 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1935
1936 return 0;
1937}
1938EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
1939
1940static int __init cpufreq_core_init(void)
1941{
1942 int cpu;
1943
a7b422cd
KRW
1944 if (cpufreq_disabled())
1945 return -ENODEV;
1946
5a01f2e8 1947 for_each_possible_cpu(cpu) {
f1625066 1948 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
1949 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1950 }
8aa84ad8 1951
8a25a2fd 1952 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
8aa84ad8 1953 BUG_ON(!cpufreq_global_kobject);
e00e56df 1954 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 1955
5a01f2e8
VP
1956 return 0;
1957}
5a01f2e8 1958core_initcall(cpufreq_core_init);