cpufreq: Revert "cpufreq: Don't use cpu removed during cpufreq_driver_unregister"
[linux-2.6-block.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
1da177e4
LT
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/notifier.h>
24#include <linux/cpufreq.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <linux/device.h>
29#include <linux/slab.h>
30#include <linux/cpu.h>
31#include <linux/completion.h>
3fc54d37 32#include <linux/mutex.h>
e00e56df 33#include <linux/syscore_ops.h>
1da177e4 34
6f4f2723
TR
35#include <trace/events/power.h>
36
1da177e4 37/**
cd878479 38 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
41 */
7d5e350f 42static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 43static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
44#ifdef CONFIG_HOTPLUG_CPU
45/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 47#endif
1da177e4
LT
48static DEFINE_SPINLOCK(cpufreq_driver_lock);
49
5a01f2e8
VP
50/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
62 * - All holders of the lock should check to make sure that the CPU they
63 * are concerned with are online after they get the lock.
64 * - Governor routines that can be called in cpufreq hotplug path should not
65 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
66 * - Lock should not be held across
67 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 68 */
f1625066 69static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
70static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
71
72#define lock_policy_rwsem(mode, cpu) \
226528c6 73static int lock_policy_rwsem_##mode \
5a01f2e8
VP
74(int cpu) \
75{ \
f1625066 76 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
77 BUG_ON(policy_cpu == -1); \
78 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
79 if (unlikely(!cpu_online(cpu))) { \
80 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
81 return -1; \
82 } \
83 \
84 return 0; \
85}
86
87lock_policy_rwsem(read, cpu);
5a01f2e8
VP
88
89lock_policy_rwsem(write, cpu);
5a01f2e8 90
226528c6 91static void unlock_policy_rwsem_read(int cpu)
5a01f2e8 92{
f1625066 93 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
5a01f2e8
VP
94 BUG_ON(policy_cpu == -1);
95 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
96}
5a01f2e8 97
226528c6 98static void unlock_policy_rwsem_write(int cpu)
5a01f2e8 99{
f1625066 100 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
5a01f2e8
VP
101 BUG_ON(policy_cpu == -1);
102 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
103}
5a01f2e8
VP
104
105
1da177e4 106/* internal prototypes */
29464f28
DJ
107static int __cpufreq_governor(struct cpufreq_policy *policy,
108 unsigned int event);
5a01f2e8 109static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 110static void handle_update(struct work_struct *work);
1da177e4
LT
111
112/**
32ee8c3e
DJ
113 * Two notifier lists: the "policy" list is involved in the
114 * validation process for a new CPU frequency policy; the
1da177e4
LT
115 * "transition" list for kernel code that needs to handle
116 * changes to devices when the CPU clock speed changes.
117 * The mutex locks both lists.
118 */
e041c683 119static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 120static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 121
74212ca4 122static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
123static int __init init_cpufreq_transition_notifier_list(void)
124{
125 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 126 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
127 return 0;
128}
b3438f82 129pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 130
a7b422cd 131static int off __read_mostly;
da584455 132static int cpufreq_disabled(void)
a7b422cd
KRW
133{
134 return off;
135}
136void disable_cpufreq(void)
137{
138 off = 1;
139}
1da177e4 140static LIST_HEAD(cpufreq_governor_list);
29464f28 141static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 142
a9144436 143static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
144{
145 struct cpufreq_policy *data;
146 unsigned long flags;
147
7a6aedfa 148 if (cpu >= nr_cpu_ids)
1da177e4
LT
149 goto err_out;
150
151 /* get the cpufreq driver */
152 spin_lock_irqsave(&cpufreq_driver_lock, flags);
153
154 if (!cpufreq_driver)
155 goto err_out_unlock;
156
157 if (!try_module_get(cpufreq_driver->owner))
158 goto err_out_unlock;
159
160
161 /* get the CPU */
7a6aedfa 162 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
163
164 if (!data)
165 goto err_out_put_module;
166
a9144436 167 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
168 goto err_out_put_module;
169
1da177e4 170 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
171 return data;
172
7d5e350f 173err_out_put_module:
1da177e4 174 module_put(cpufreq_driver->owner);
7d5e350f 175err_out_unlock:
1da177e4 176 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 177err_out:
1da177e4
LT
178 return NULL;
179}
a9144436
SB
180
181struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
182{
d5aaffa9
DB
183 if (cpufreq_disabled())
184 return NULL;
185
a9144436
SB
186 return __cpufreq_cpu_get(cpu, false);
187}
1da177e4
LT
188EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
189
a9144436
SB
190static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
191{
192 return __cpufreq_cpu_get(cpu, true);
193}
194
195static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
196{
197 if (!sysfs)
198 kobject_put(&data->kobj);
199 module_put(cpufreq_driver->owner);
200}
7d5e350f 201
1da177e4
LT
202void cpufreq_cpu_put(struct cpufreq_policy *data)
203{
d5aaffa9
DB
204 if (cpufreq_disabled())
205 return;
206
a9144436 207 __cpufreq_cpu_put(data, false);
1da177e4
LT
208}
209EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
210
a9144436
SB
211static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
212{
213 __cpufreq_cpu_put(data, true);
214}
1da177e4 215
1da177e4
LT
216/*********************************************************************
217 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
218 *********************************************************************/
219
220/**
221 * adjust_jiffies - adjust the system "loops_per_jiffy"
222 *
223 * This function alters the system "loops_per_jiffy" for the clock
224 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 225 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
226 * per-CPU loops_per_jiffy value wherever possible.
227 */
228#ifndef CONFIG_SMP
229static unsigned long l_p_j_ref;
230static unsigned int l_p_j_ref_freq;
231
858119e1 232static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
233{
234 if (ci->flags & CPUFREQ_CONST_LOOPS)
235 return;
236
237 if (!l_p_j_ref_freq) {
238 l_p_j_ref = loops_per_jiffy;
239 l_p_j_ref_freq = ci->old;
2d06d8c4 240 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 241 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 242 }
d08de0c1 243 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 244 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
245 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
246 ci->new);
2d06d8c4 247 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 248 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
249 }
250}
251#else
e08f5f5b
GS
252static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
253{
254 return;
255}
1da177e4
LT
256#endif
257
258
259/**
e4472cb3
DJ
260 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
261 * on frequency transition.
1da177e4 262 *
e4472cb3
DJ
263 * This function calls the transition notifiers and the "adjust_jiffies"
264 * function. It is called twice on all CPU frequency changes that have
32ee8c3e 265 * external effects.
1da177e4
LT
266 */
267void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
268{
e4472cb3
DJ
269 struct cpufreq_policy *policy;
270
1da177e4
LT
271 BUG_ON(irqs_disabled());
272
d5aaffa9
DB
273 if (cpufreq_disabled())
274 return;
275
1da177e4 276 freqs->flags = cpufreq_driver->flags;
2d06d8c4 277 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 278 state, freqs->new);
1da177e4 279
7a6aedfa 280 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
1da177e4 281 switch (state) {
e4472cb3 282
1da177e4 283 case CPUFREQ_PRECHANGE:
32ee8c3e 284 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
285 * which is not equal to what the cpufreq core thinks is
286 * "old frequency".
1da177e4
LT
287 */
288 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
289 if ((policy) && (policy->cpu == freqs->cpu) &&
290 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 291 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
292 " %u, cpufreq assumed %u kHz.\n",
293 freqs->old, policy->cur);
294 freqs->old = policy->cur;
1da177e4
LT
295 }
296 }
b4dfdbb3 297 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 298 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
299 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
300 break;
e4472cb3 301
1da177e4
LT
302 case CPUFREQ_POSTCHANGE:
303 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 304 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723
TR
305 (unsigned long)freqs->cpu);
306 trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
25e41933 307 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 308 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 309 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
310 if (likely(policy) && likely(policy->cpu == freqs->cpu))
311 policy->cur = freqs->new;
1da177e4
LT
312 break;
313 }
1da177e4
LT
314}
315EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
316
317
318
319/*********************************************************************
320 * SYSFS INTERFACE *
321 *********************************************************************/
322
3bcb09a3
JF
323static struct cpufreq_governor *__find_governor(const char *str_governor)
324{
325 struct cpufreq_governor *t;
326
327 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 328 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
329 return t;
330
331 return NULL;
332}
333
1da177e4
LT
334/**
335 * cpufreq_parse_governor - parse a governor string
336 */
905d77cd 337static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
338 struct cpufreq_governor **governor)
339{
3bcb09a3
JF
340 int err = -EINVAL;
341
1da177e4 342 if (!cpufreq_driver)
3bcb09a3
JF
343 goto out;
344
1da177e4
LT
345 if (cpufreq_driver->setpolicy) {
346 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
347 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 348 err = 0;
e08f5f5b
GS
349 } else if (!strnicmp(str_governor, "powersave",
350 CPUFREQ_NAME_LEN)) {
1da177e4 351 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 352 err = 0;
1da177e4 353 }
3bcb09a3 354 } else if (cpufreq_driver->target) {
1da177e4 355 struct cpufreq_governor *t;
3bcb09a3 356
3fc54d37 357 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
358
359 t = __find_governor(str_governor);
360
ea714970 361 if (t == NULL) {
1a8e1463 362 int ret;
ea714970 363
1a8e1463
KC
364 mutex_unlock(&cpufreq_governor_mutex);
365 ret = request_module("cpufreq_%s", str_governor);
366 mutex_lock(&cpufreq_governor_mutex);
ea714970 367
1a8e1463
KC
368 if (ret == 0)
369 t = __find_governor(str_governor);
ea714970
JF
370 }
371
3bcb09a3
JF
372 if (t != NULL) {
373 *governor = t;
374 err = 0;
1da177e4 375 }
3bcb09a3 376
3fc54d37 377 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 378 }
29464f28 379out:
3bcb09a3 380 return err;
1da177e4 381}
1da177e4
LT
382
383
1da177e4 384/**
e08f5f5b
GS
385 * cpufreq_per_cpu_attr_read() / show_##file_name() -
386 * print out cpufreq information
1da177e4
LT
387 *
388 * Write out information from cpufreq_driver->policy[cpu]; object must be
389 * "unsigned int".
390 */
391
32ee8c3e
DJ
392#define show_one(file_name, object) \
393static ssize_t show_##file_name \
905d77cd 394(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 395{ \
29464f28 396 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
397}
398
399show_one(cpuinfo_min_freq, cpuinfo.min_freq);
400show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 401show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
402show_one(scaling_min_freq, min);
403show_one(scaling_max_freq, max);
404show_one(scaling_cur_freq, cur);
405
e08f5f5b
GS
406static int __cpufreq_set_policy(struct cpufreq_policy *data,
407 struct cpufreq_policy *policy);
7970e08b 408
1da177e4
LT
409/**
410 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
411 */
412#define store_one(file_name, object) \
413static ssize_t store_##file_name \
905d77cd 414(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 415{ \
f55c9c26 416 unsigned int ret; \
1da177e4
LT
417 struct cpufreq_policy new_policy; \
418 \
419 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
420 if (ret) \
421 return -EINVAL; \
422 \
29464f28 423 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
424 if (ret != 1) \
425 return -EINVAL; \
426 \
7970e08b
TR
427 ret = __cpufreq_set_policy(policy, &new_policy); \
428 policy->user_policy.object = policy->object; \
1da177e4
LT
429 \
430 return ret ? ret : count; \
431}
432
29464f28
DJ
433store_one(scaling_min_freq, min);
434store_one(scaling_max_freq, max);
1da177e4
LT
435
436/**
437 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
438 */
905d77cd
DJ
439static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
440 char *buf)
1da177e4 441{
5a01f2e8 442 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
443 if (!cur_freq)
444 return sprintf(buf, "<unknown>");
445 return sprintf(buf, "%u\n", cur_freq);
446}
447
448
449/**
450 * show_scaling_governor - show the current policy for the specified CPU
451 */
905d77cd 452static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 453{
29464f28 454 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
455 return sprintf(buf, "powersave\n");
456 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
457 return sprintf(buf, "performance\n");
458 else if (policy->governor)
4b972f0b 459 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 460 policy->governor->name);
1da177e4
LT
461 return -EINVAL;
462}
463
464
465/**
466 * store_scaling_governor - store policy for the specified CPU
467 */
905d77cd
DJ
468static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
469 const char *buf, size_t count)
1da177e4 470{
f55c9c26 471 unsigned int ret;
1da177e4
LT
472 char str_governor[16];
473 struct cpufreq_policy new_policy;
474
475 ret = cpufreq_get_policy(&new_policy, policy->cpu);
476 if (ret)
477 return ret;
478
29464f28 479 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
480 if (ret != 1)
481 return -EINVAL;
482
e08f5f5b
GS
483 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
484 &new_policy.governor))
1da177e4
LT
485 return -EINVAL;
486
7970e08b
TR
487 /* Do not use cpufreq_set_policy here or the user_policy.max
488 will be wrongly overridden */
7970e08b
TR
489 ret = __cpufreq_set_policy(policy, &new_policy);
490
491 policy->user_policy.policy = policy->policy;
492 policy->user_policy.governor = policy->governor;
7970e08b 493
e08f5f5b
GS
494 if (ret)
495 return ret;
496 else
497 return count;
1da177e4
LT
498}
499
500/**
501 * show_scaling_driver - show the cpufreq driver currently loaded
502 */
905d77cd 503static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 504{
4b972f0b 505 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
506}
507
508/**
509 * show_scaling_available_governors - show the available CPUfreq governors
510 */
905d77cd
DJ
511static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
512 char *buf)
1da177e4
LT
513{
514 ssize_t i = 0;
515 struct cpufreq_governor *t;
516
517 if (!cpufreq_driver->target) {
518 i += sprintf(buf, "performance powersave");
519 goto out;
520 }
521
522 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
523 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
524 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 525 goto out;
4b972f0b 526 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 527 }
7d5e350f 528out:
1da177e4
LT
529 i += sprintf(&buf[i], "\n");
530 return i;
531}
e8628dd0 532
835481d9 533static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
534{
535 ssize_t i = 0;
536 unsigned int cpu;
537
835481d9 538 for_each_cpu(cpu, mask) {
1da177e4
LT
539 if (i)
540 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
541 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
542 if (i >= (PAGE_SIZE - 5))
29464f28 543 break;
1da177e4
LT
544 }
545 i += sprintf(&buf[i], "\n");
546 return i;
547}
548
e8628dd0
DW
549/**
550 * show_related_cpus - show the CPUs affected by each transition even if
551 * hw coordination is in use
552 */
553static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
554{
835481d9 555 if (cpumask_empty(policy->related_cpus))
e8628dd0
DW
556 return show_cpus(policy->cpus, buf);
557 return show_cpus(policy->related_cpus, buf);
558}
559
560/**
561 * show_affected_cpus - show the CPUs affected by each transition
562 */
563static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
564{
565 return show_cpus(policy->cpus, buf);
566}
567
9e76988e 568static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 569 const char *buf, size_t count)
9e76988e
VP
570{
571 unsigned int freq = 0;
572 unsigned int ret;
573
879000f9 574 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
575 return -EINVAL;
576
577 ret = sscanf(buf, "%u", &freq);
578 if (ret != 1)
579 return -EINVAL;
580
581 policy->governor->store_setspeed(policy, freq);
582
583 return count;
584}
585
586static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
587{
879000f9 588 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
589 return sprintf(buf, "<unsupported>\n");
590
591 return policy->governor->show_setspeed(policy, buf);
592}
1da177e4 593
e2f74f35 594/**
8bf1ac72 595 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
596 */
597static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
598{
599 unsigned int limit;
600 int ret;
601 if (cpufreq_driver->bios_limit) {
602 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
603 if (!ret)
604 return sprintf(buf, "%u\n", limit);
605 }
606 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
607}
608
6dad2a29
BP
609cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
610cpufreq_freq_attr_ro(cpuinfo_min_freq);
611cpufreq_freq_attr_ro(cpuinfo_max_freq);
612cpufreq_freq_attr_ro(cpuinfo_transition_latency);
613cpufreq_freq_attr_ro(scaling_available_governors);
614cpufreq_freq_attr_ro(scaling_driver);
615cpufreq_freq_attr_ro(scaling_cur_freq);
616cpufreq_freq_attr_ro(bios_limit);
617cpufreq_freq_attr_ro(related_cpus);
618cpufreq_freq_attr_ro(affected_cpus);
619cpufreq_freq_attr_rw(scaling_min_freq);
620cpufreq_freq_attr_rw(scaling_max_freq);
621cpufreq_freq_attr_rw(scaling_governor);
622cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 623
905d77cd 624static struct attribute *default_attrs[] = {
1da177e4
LT
625 &cpuinfo_min_freq.attr,
626 &cpuinfo_max_freq.attr,
ed129784 627 &cpuinfo_transition_latency.attr,
1da177e4
LT
628 &scaling_min_freq.attr,
629 &scaling_max_freq.attr,
630 &affected_cpus.attr,
e8628dd0 631 &related_cpus.attr,
1da177e4
LT
632 &scaling_governor.attr,
633 &scaling_driver.attr,
634 &scaling_available_governors.attr,
9e76988e 635 &scaling_setspeed.attr,
1da177e4
LT
636 NULL
637};
638
8aa84ad8
TR
639struct kobject *cpufreq_global_kobject;
640EXPORT_SYMBOL(cpufreq_global_kobject);
641
29464f28
DJ
642#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
643#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 644
29464f28 645static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 646{
905d77cd
DJ
647 struct cpufreq_policy *policy = to_policy(kobj);
648 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 649 ssize_t ret = -EINVAL;
a9144436 650 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 651 if (!policy)
0db4a8a9 652 goto no_policy;
5a01f2e8
VP
653
654 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 655 goto fail;
5a01f2e8 656
e08f5f5b
GS
657 if (fattr->show)
658 ret = fattr->show(policy, buf);
659 else
660 ret = -EIO;
661
5a01f2e8 662 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 663fail:
a9144436 664 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 665no_policy:
1da177e4
LT
666 return ret;
667}
668
905d77cd
DJ
669static ssize_t store(struct kobject *kobj, struct attribute *attr,
670 const char *buf, size_t count)
1da177e4 671{
905d77cd
DJ
672 struct cpufreq_policy *policy = to_policy(kobj);
673 struct freq_attr *fattr = to_attr(attr);
a07530b4 674 ssize_t ret = -EINVAL;
a9144436 675 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 676 if (!policy)
a07530b4 677 goto no_policy;
5a01f2e8
VP
678
679 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 680 goto fail;
5a01f2e8 681
e08f5f5b
GS
682 if (fattr->store)
683 ret = fattr->store(policy, buf, count);
684 else
685 ret = -EIO;
686
5a01f2e8 687 unlock_policy_rwsem_write(policy->cpu);
a07530b4 688fail:
a9144436 689 cpufreq_cpu_put_sysfs(policy);
a07530b4 690no_policy:
1da177e4
LT
691 return ret;
692}
693
905d77cd 694static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 695{
905d77cd 696 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 697 pr_debug("last reference is dropped\n");
1da177e4
LT
698 complete(&policy->kobj_unregister);
699}
700
52cf25d0 701static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
702 .show = show,
703 .store = store,
704};
705
706static struct kobj_type ktype_cpufreq = {
707 .sysfs_ops = &sysfs_ops,
708 .default_attrs = default_attrs,
709 .release = cpufreq_sysfs_release,
710};
711
4bfa042c
TR
712/*
713 * Returns:
714 * Negative: Failure
715 * 0: Success
716 * Positive: When we have a managed CPU and the sysfs got symlinked
717 */
cf3289d0
AC
718static int cpufreq_add_dev_policy(unsigned int cpu,
719 struct cpufreq_policy *policy,
8a25a2fd 720 struct device *dev)
ecf7e461
DJ
721{
722 int ret = 0;
723#ifdef CONFIG_SMP
724 unsigned long flags;
725 unsigned int j;
ecf7e461 726#ifdef CONFIG_HOTPLUG_CPU
e77b89f1
DM
727 struct cpufreq_governor *gov;
728
729 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
730 if (gov) {
731 policy->governor = gov;
2d06d8c4 732 pr_debug("Restoring governor %s for cpu %d\n",
ecf7e461
DJ
733 policy->governor->name, cpu);
734 }
735#endif
736
737 for_each_cpu(j, policy->cpus) {
738 struct cpufreq_policy *managed_policy;
739
740 if (cpu == j)
741 continue;
742
743 /* Check for existing affected CPUs.
744 * They may not be aware of it due to CPU Hotplug.
745 * cpufreq_cpu_put is called when the device is removed
746 * in __cpufreq_remove_dev()
747 */
748 managed_policy = cpufreq_cpu_get(j);
749 if (unlikely(managed_policy)) {
750
751 /* Set proper policy_cpu */
752 unlock_policy_rwsem_write(cpu);
f1625066 753 per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
ecf7e461
DJ
754
755 if (lock_policy_rwsem_write(cpu) < 0) {
756 /* Should not go through policy unlock path */
757 if (cpufreq_driver->exit)
758 cpufreq_driver->exit(policy);
759 cpufreq_cpu_put(managed_policy);
760 return -EBUSY;
761 }
762
f6a7409c
VK
763 __cpufreq_governor(managed_policy, CPUFREQ_GOV_STOP);
764
ecf7e461
DJ
765 spin_lock_irqsave(&cpufreq_driver_lock, flags);
766 cpumask_copy(managed_policy->cpus, policy->cpus);
767 per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
768 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
769
f6a7409c
VK
770 __cpufreq_governor(managed_policy, CPUFREQ_GOV_START);
771 __cpufreq_governor(managed_policy, CPUFREQ_GOV_LIMITS);
772
2d06d8c4 773 pr_debug("CPU already managed, adding link\n");
8a25a2fd 774 ret = sysfs_create_link(&dev->kobj,
ecf7e461
DJ
775 &managed_policy->kobj,
776 "cpufreq");
777 if (ret)
778 cpufreq_cpu_put(managed_policy);
779 /*
780 * Success. We only needed to be added to the mask.
781 * Call driver->exit() because only the cpu parent of
782 * the kobj needed to call init().
783 */
784 if (cpufreq_driver->exit)
785 cpufreq_driver->exit(policy);
4bfa042c
TR
786
787 if (!ret)
788 return 1;
789 else
790 return ret;
ecf7e461
DJ
791 }
792 }
793#endif
794 return ret;
795}
796
797
19d6f7ec 798/* symlink affected CPUs */
cf3289d0
AC
799static int cpufreq_add_dev_symlink(unsigned int cpu,
800 struct cpufreq_policy *policy)
19d6f7ec
DJ
801{
802 unsigned int j;
803 int ret = 0;
804
805 for_each_cpu(j, policy->cpus) {
806 struct cpufreq_policy *managed_policy;
8a25a2fd 807 struct device *cpu_dev;
19d6f7ec
DJ
808
809 if (j == cpu)
810 continue;
811 if (!cpu_online(j))
812 continue;
813
2d06d8c4 814 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 815 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
816 cpu_dev = get_cpu_device(j);
817 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
818 "cpufreq");
819 if (ret) {
820 cpufreq_cpu_put(managed_policy);
821 return ret;
822 }
823 }
824 return ret;
825}
826
cf3289d0
AC
827static int cpufreq_add_dev_interface(unsigned int cpu,
828 struct cpufreq_policy *policy,
8a25a2fd 829 struct device *dev)
909a694e 830{
ecf7e461 831 struct cpufreq_policy new_policy;
909a694e
DJ
832 struct freq_attr **drv_attr;
833 unsigned long flags;
834 int ret = 0;
835 unsigned int j;
836
837 /* prepare interface data */
838 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 839 &dev->kobj, "cpufreq");
909a694e
DJ
840 if (ret)
841 return ret;
842
843 /* set up files for this cpu device */
844 drv_attr = cpufreq_driver->attr;
845 while ((drv_attr) && (*drv_attr)) {
846 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
847 if (ret)
848 goto err_out_kobj_put;
849 drv_attr++;
850 }
851 if (cpufreq_driver->get) {
852 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
853 if (ret)
854 goto err_out_kobj_put;
855 }
856 if (cpufreq_driver->target) {
857 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
858 if (ret)
859 goto err_out_kobj_put;
860 }
e2f74f35
TR
861 if (cpufreq_driver->bios_limit) {
862 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
863 if (ret)
864 goto err_out_kobj_put;
865 }
909a694e
DJ
866
867 spin_lock_irqsave(&cpufreq_driver_lock, flags);
868 for_each_cpu(j, policy->cpus) {
bec037aa
JL
869 if (!cpu_online(j))
870 continue;
909a694e 871 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 872 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e
DJ
873 }
874 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
875
876 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
877 if (ret)
878 goto err_out_kobj_put;
879
880 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
881 /* assure that the starting sequence is run in __cpufreq_set_policy */
882 policy->governor = NULL;
883
884 /* set default policy */
885 ret = __cpufreq_set_policy(policy, &new_policy);
886 policy->user_policy.policy = policy->policy;
887 policy->user_policy.governor = policy->governor;
888
889 if (ret) {
2d06d8c4 890 pr_debug("setting policy failed\n");
ecf7e461
DJ
891 if (cpufreq_driver->exit)
892 cpufreq_driver->exit(policy);
893 }
909a694e
DJ
894 return ret;
895
896err_out_kobj_put:
897 kobject_put(&policy->kobj);
898 wait_for_completion(&policy->kobj_unregister);
899 return ret;
900}
901
1da177e4
LT
902
903/**
904 * cpufreq_add_dev - add a CPU device
905 *
32ee8c3e 906 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
907 *
908 * The Oracle says: try running cpufreq registration/unregistration concurrently
909 * with with cpu hotplugging and all hell will break loose. Tried to clean this
910 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 911 */
8a25a2fd 912static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 913{
8a25a2fd 914 unsigned int cpu = dev->id;
90e41bac 915 int ret = 0, found = 0;
1da177e4 916 struct cpufreq_policy *policy;
1da177e4
LT
917 unsigned long flags;
918 unsigned int j;
90e41bac
PB
919#ifdef CONFIG_HOTPLUG_CPU
920 int sibling;
921#endif
1da177e4 922
c32b6b8e
AR
923 if (cpu_is_offline(cpu))
924 return 0;
925
2d06d8c4 926 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
927
928#ifdef CONFIG_SMP
929 /* check whether a different CPU already registered this
930 * CPU because it is in the same boat. */
931 policy = cpufreq_cpu_get(cpu);
932 if (unlikely(policy)) {
8ff69732 933 cpufreq_cpu_put(policy);
1da177e4
LT
934 return 0;
935 }
936#endif
937
938 if (!try_module_get(cpufreq_driver->owner)) {
939 ret = -EINVAL;
940 goto module_out;
941 }
942
059019a3 943 ret = -ENOMEM;
e98df50c 944 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 945 if (!policy)
1da177e4 946 goto nomem_out;
059019a3
DJ
947
948 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 949 goto err_free_policy;
059019a3
DJ
950
951 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 952 goto err_free_cpumask;
1da177e4
LT
953
954 policy->cpu = cpu;
835481d9 955 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 956
5a01f2e8 957 /* Initially set CPU itself as the policy_cpu */
f1625066 958 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
3f4a782b
MD
959 ret = (lock_policy_rwsem_write(cpu) < 0);
960 WARN_ON(ret);
5a01f2e8 961
1da177e4 962 init_completion(&policy->kobj_unregister);
65f27f38 963 INIT_WORK(&policy->update, handle_update);
1da177e4 964
8122c6ce 965 /* Set governor before ->init, so that driver could check it */
90e41bac
PB
966#ifdef CONFIG_HOTPLUG_CPU
967 for_each_online_cpu(sibling) {
968 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
969 if (cp && cp->governor &&
970 (cpumask_test_cpu(cpu, cp->related_cpus))) {
971 policy->governor = cp->governor;
972 found = 1;
973 break;
974 }
975 }
976#endif
977 if (!found)
978 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1da177e4
LT
979 /* call driver. From then on the cpufreq must be able
980 * to accept all calls to ->verify and ->setpolicy for this CPU
981 */
982 ret = cpufreq_driver->init(policy);
983 if (ret) {
2d06d8c4 984 pr_debug("initialization failed\n");
3f4a782b 985 goto err_unlock_policy;
1da177e4 986 }
643ae6e8
VK
987
988 /*
989 * affected cpus must always be the one, which are online. We aren't
990 * managing offline cpus here.
991 */
992 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
993
187d9f4e
MC
994 policy->user_policy.min = policy->min;
995 policy->user_policy.max = policy->max;
1da177e4 996
a1531acd
TR
997 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
998 CPUFREQ_START, policy);
999
8a25a2fd 1000 ret = cpufreq_add_dev_policy(cpu, policy, dev);
4bfa042c
TR
1001 if (ret) {
1002 if (ret > 0)
1003 /* This is a managed cpu, symlink created,
1004 exit with 0 */
1005 ret = 0;
ecf7e461 1006 goto err_unlock_policy;
4bfa042c 1007 }
1da177e4 1008
8a25a2fd 1009 ret = cpufreq_add_dev_interface(cpu, policy, dev);
19d6f7ec
DJ
1010 if (ret)
1011 goto err_out_unregister;
8ff69732 1012
dca02613
LW
1013 unlock_policy_rwsem_write(cpu);
1014
038c5b3e 1015 kobject_uevent(&policy->kobj, KOBJ_ADD);
1da177e4 1016 module_put(cpufreq_driver->owner);
2d06d8c4 1017 pr_debug("initialization complete\n");
87c32271 1018
1da177e4
LT
1019 return 0;
1020
1021
1022err_out_unregister:
1023 spin_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 1024 for_each_cpu(j, policy->cpus)
7a6aedfa 1025 per_cpu(cpufreq_cpu_data, j) = NULL;
1da177e4
LT
1026 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1027
c10997f6 1028 kobject_put(&policy->kobj);
1da177e4
LT
1029 wait_for_completion(&policy->kobj_unregister);
1030
3f4a782b 1031err_unlock_policy:
45709118 1032 unlock_policy_rwsem_write(cpu);
cad70a6a 1033 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
1034err_free_cpumask:
1035 free_cpumask_var(policy->cpus);
1036err_free_policy:
1da177e4 1037 kfree(policy);
1da177e4
LT
1038nomem_out:
1039 module_put(cpufreq_driver->owner);
c32b6b8e 1040module_out:
1da177e4
LT
1041 return ret;
1042}
1043
b8eed8af
VK
1044static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1045{
1046 int j;
1047
1048 policy->last_cpu = policy->cpu;
1049 policy->cpu = cpu;
1050
1051 for_each_cpu(j, policy->cpus) {
1052 if (!cpu_online(j))
1053 continue;
1054 per_cpu(cpufreq_policy_cpu, j) = cpu;
1055 }
1056
1057#ifdef CONFIG_CPU_FREQ_TABLE
1058 cpufreq_frequency_table_update_policy_cpu(policy);
1059#endif
1060 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1061 CPUFREQ_UPDATE_POLICY_CPU, policy);
1062}
1da177e4
LT
1063
1064/**
5a01f2e8 1065 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1066 *
1067 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1068 * Caller should already have policy_rwsem in write mode for this CPU.
1069 * This routine frees the rwsem before returning.
1da177e4 1070 */
8a25a2fd 1071static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1072{
b8eed8af 1073 unsigned int cpu = dev->id, ret, cpus;
1da177e4
LT
1074 unsigned long flags;
1075 struct cpufreq_policy *data;
499bca9b
AW
1076 struct kobject *kobj;
1077 struct completion *cmp;
8a25a2fd 1078 struct device *cpu_dev;
1da177e4 1079
b8eed8af 1080 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4
LT
1081
1082 spin_lock_irqsave(&cpufreq_driver_lock, flags);
7a6aedfa 1083 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
1084
1085 if (!data) {
b8eed8af 1086 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4 1087 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
5a01f2e8 1088 unlock_policy_rwsem_write(cpu);
1da177e4
LT
1089 return -EINVAL;
1090 }
1da177e4 1091
b8eed8af 1092 if (cpufreq_driver->target)
f6a7409c 1093 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
084f3493
TR
1094
1095#ifdef CONFIG_HOTPLUG_CPU
e77b89f1
DM
1096 strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
1097 CPUFREQ_NAME_LEN);
084f3493
TR
1098#endif
1099
b8eed8af
VK
1100 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1101 cpus = cpumask_weight(data->cpus);
1102 cpumask_clear_cpu(cpu, data->cpus);
1da177e4 1103
b8eed8af
VK
1104 if (unlikely((cpu == data->cpu) && (cpus > 1))) {
1105 /* first sibling now owns the new sysfs dir */
1106 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1107 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1108 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1109 if (ret) {
1110 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1111 cpumask_set_cpu(cpu, data->cpus);
1112 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1113 "cpufreq");
1114 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
499bca9b 1115 unlock_policy_rwsem_write(cpu);
b8eed8af 1116 return -EINVAL;
1da177e4 1117 }
b8eed8af
VK
1118
1119 update_policy_cpu(data, cpu_dev->id);
1120 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1121 __func__, cpu_dev->id, cpu);
1da177e4 1122 }
1da177e4 1123
b8eed8af 1124 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
5a01f2e8 1125
b8eed8af
VK
1126 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1127 cpufreq_cpu_put(data);
499bca9b 1128 unlock_policy_rwsem_write(cpu);
b8eed8af 1129 sysfs_remove_link(&dev->kobj, "cpufreq");
1da177e4 1130
b8eed8af
VK
1131 /* If cpu is last user of policy, free policy */
1132 if (cpus == 1) {
1133 lock_policy_rwsem_write(cpu);
1134 kobj = &data->kobj;
1135 cmp = &data->kobj_unregister;
1136 unlock_policy_rwsem_write(cpu);
1137 kobject_put(kobj);
7d26e2d5 1138
b8eed8af
VK
1139 /* we need to make sure that the underlying kobj is actually
1140 * not referenced anymore by anybody before we proceed with
1141 * unloading.
1142 */
1143 pr_debug("waiting for dropping of refcount\n");
1144 wait_for_completion(cmp);
1145 pr_debug("wait complete\n");
27ecddc2 1146
27ecddc2 1147 lock_policy_rwsem_write(cpu);
b8eed8af
VK
1148 if (cpufreq_driver->exit)
1149 cpufreq_driver->exit(data);
1150 unlock_policy_rwsem_write(cpu);
27ecddc2 1151
b8eed8af
VK
1152 free_cpumask_var(data->related_cpus);
1153 free_cpumask_var(data->cpus);
1154 kfree(data);
1155 } else if (cpufreq_driver->target) {
1156 __cpufreq_governor(data, CPUFREQ_GOV_START);
1157 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1158 }
1da177e4 1159
1da177e4
LT
1160 return 0;
1161}
1162
1163
8a25a2fd 1164static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1165{
8a25a2fd 1166 unsigned int cpu = dev->id;
5a01f2e8 1167 int retval;
ec28297a
VP
1168
1169 if (cpu_is_offline(cpu))
1170 return 0;
1171
5a01f2e8
VP
1172 if (unlikely(lock_policy_rwsem_write(cpu)))
1173 BUG();
1174
8a25a2fd 1175 retval = __cpufreq_remove_dev(dev, sif);
5a01f2e8
VP
1176 return retval;
1177}
1178
1179
65f27f38 1180static void handle_update(struct work_struct *work)
1da177e4 1181{
65f27f38
DH
1182 struct cpufreq_policy *policy =
1183 container_of(work, struct cpufreq_policy, update);
1184 unsigned int cpu = policy->cpu;
2d06d8c4 1185 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1186 cpufreq_update_policy(cpu);
1187}
1188
1189/**
1190 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1191 * @cpu: cpu number
1192 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1193 * @new_freq: CPU frequency the CPU actually runs at
1194 *
29464f28
DJ
1195 * We adjust to current frequency first, and need to clean up later.
1196 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1197 */
e08f5f5b
GS
1198static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1199 unsigned int new_freq)
1da177e4
LT
1200{
1201 struct cpufreq_freqs freqs;
1202
2d06d8c4 1203 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1204 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1205
1206 freqs.cpu = cpu;
1207 freqs.old = old_freq;
1208 freqs.new = new_freq;
1209 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1210 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1211}
1212
1213
32ee8c3e 1214/**
4ab70df4 1215 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1216 * @cpu: CPU number
1217 *
1218 * This is the last known freq, without actually getting it from the driver.
1219 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1220 */
1221unsigned int cpufreq_quick_get(unsigned int cpu)
1222{
1223 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
e08f5f5b 1224 unsigned int ret_freq = 0;
95235ca2
VP
1225
1226 if (policy) {
e08f5f5b 1227 ret_freq = policy->cur;
95235ca2
VP
1228 cpufreq_cpu_put(policy);
1229 }
1230
4d34a67d 1231 return ret_freq;
95235ca2
VP
1232}
1233EXPORT_SYMBOL(cpufreq_quick_get);
1234
3d737108
JB
1235/**
1236 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1237 * @cpu: CPU number
1238 *
1239 * Just return the max possible frequency for a given CPU.
1240 */
1241unsigned int cpufreq_quick_get_max(unsigned int cpu)
1242{
1243 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1244 unsigned int ret_freq = 0;
1245
1246 if (policy) {
1247 ret_freq = policy->max;
1248 cpufreq_cpu_put(policy);
1249 }
1250
1251 return ret_freq;
1252}
1253EXPORT_SYMBOL(cpufreq_quick_get_max);
1254
95235ca2 1255
5a01f2e8 1256static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1257{
7a6aedfa 1258 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1259 unsigned int ret_freq = 0;
1da177e4 1260
1da177e4 1261 if (!cpufreq_driver->get)
4d34a67d 1262 return ret_freq;
1da177e4 1263
e08f5f5b 1264 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1265
e08f5f5b
GS
1266 if (ret_freq && policy->cur &&
1267 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1268 /* verify no discrepancy between actual and
1269 saved value exists */
1270 if (unlikely(ret_freq != policy->cur)) {
1271 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1272 schedule_work(&policy->update);
1273 }
1274 }
1275
4d34a67d 1276 return ret_freq;
5a01f2e8 1277}
1da177e4 1278
5a01f2e8
VP
1279/**
1280 * cpufreq_get - get the current CPU frequency (in kHz)
1281 * @cpu: CPU number
1282 *
1283 * Get the CPU current (static) CPU frequency
1284 */
1285unsigned int cpufreq_get(unsigned int cpu)
1286{
1287 unsigned int ret_freq = 0;
1288 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1289
1290 if (!policy)
1291 goto out;
1292
1293 if (unlikely(lock_policy_rwsem_read(cpu)))
1294 goto out_policy;
1295
1296 ret_freq = __cpufreq_get(cpu);
1297
1298 unlock_policy_rwsem_read(cpu);
1da177e4 1299
5a01f2e8
VP
1300out_policy:
1301 cpufreq_cpu_put(policy);
1302out:
4d34a67d 1303 return ret_freq;
1da177e4
LT
1304}
1305EXPORT_SYMBOL(cpufreq_get);
1306
8a25a2fd
KS
1307static struct subsys_interface cpufreq_interface = {
1308 .name = "cpufreq",
1309 .subsys = &cpu_subsys,
1310 .add_dev = cpufreq_add_dev,
1311 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1312};
1313
1da177e4 1314
42d4dc3f 1315/**
e00e56df
RW
1316 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1317 *
1318 * This function is only executed for the boot processor. The other CPUs
1319 * have been put offline by means of CPU hotplug.
42d4dc3f 1320 */
e00e56df 1321static int cpufreq_bp_suspend(void)
42d4dc3f 1322{
e08f5f5b 1323 int ret = 0;
4bc5d341 1324
e00e56df 1325 int cpu = smp_processor_id();
42d4dc3f
BH
1326 struct cpufreq_policy *cpu_policy;
1327
2d06d8c4 1328 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1329
e00e56df 1330 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1331 cpu_policy = cpufreq_cpu_get(cpu);
1332 if (!cpu_policy)
e00e56df 1333 return 0;
42d4dc3f
BH
1334
1335 if (cpufreq_driver->suspend) {
7ca64e2d 1336 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1337 if (ret)
42d4dc3f
BH
1338 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1339 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1340 }
1341
42d4dc3f 1342 cpufreq_cpu_put(cpu_policy);
c9060494 1343 return ret;
42d4dc3f
BH
1344}
1345
1da177e4 1346/**
e00e56df 1347 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1348 *
1349 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1350 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1351 * restored. It will verify that the current freq is in sync with
1352 * what we believe it to be. This is a bit later than when it
1353 * should be, but nonethteless it's better than calling
1354 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1355 *
1356 * This function is only executed for the boot CPU. The other CPUs have not
1357 * been turned on yet.
1da177e4 1358 */
e00e56df 1359static void cpufreq_bp_resume(void)
1da177e4 1360{
e08f5f5b 1361 int ret = 0;
4bc5d341 1362
e00e56df 1363 int cpu = smp_processor_id();
1da177e4
LT
1364 struct cpufreq_policy *cpu_policy;
1365
2d06d8c4 1366 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1367
e00e56df 1368 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1369 cpu_policy = cpufreq_cpu_get(cpu);
1370 if (!cpu_policy)
e00e56df 1371 return;
1da177e4
LT
1372
1373 if (cpufreq_driver->resume) {
1374 ret = cpufreq_driver->resume(cpu_policy);
1375 if (ret) {
1376 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1377 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1378 goto fail;
1da177e4
LT
1379 }
1380 }
1381
1da177e4 1382 schedule_work(&cpu_policy->update);
ce6c3997 1383
c9060494 1384fail:
1da177e4 1385 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1386}
1387
e00e56df
RW
1388static struct syscore_ops cpufreq_syscore_ops = {
1389 .suspend = cpufreq_bp_suspend,
1390 .resume = cpufreq_bp_resume,
1da177e4
LT
1391};
1392
9d95046e
BP
1393/**
1394 * cpufreq_get_current_driver - return current driver's name
1395 *
1396 * Return the name string of the currently loaded cpufreq driver
1397 * or NULL, if none.
1398 */
1399const char *cpufreq_get_current_driver(void)
1400{
1401 if (cpufreq_driver)
1402 return cpufreq_driver->name;
1403
1404 return NULL;
1405}
1406EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1407
1408/*********************************************************************
1409 * NOTIFIER LISTS INTERFACE *
1410 *********************************************************************/
1411
1412/**
1413 * cpufreq_register_notifier - register a driver with cpufreq
1414 * @nb: notifier function to register
1415 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1416 *
32ee8c3e 1417 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1418 * are notified about clock rate changes (once before and once after
1419 * the transition), or a list of drivers that are notified about
1420 * changes in cpufreq policy.
1421 *
1422 * This function may sleep, and has the same return conditions as
e041c683 1423 * blocking_notifier_chain_register.
1da177e4
LT
1424 */
1425int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1426{
1427 int ret;
1428
d5aaffa9
DB
1429 if (cpufreq_disabled())
1430 return -EINVAL;
1431
74212ca4
CEB
1432 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1433
1da177e4
LT
1434 switch (list) {
1435 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1436 ret = srcu_notifier_chain_register(
e041c683 1437 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1438 break;
1439 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1440 ret = blocking_notifier_chain_register(
1441 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1442 break;
1443 default:
1444 ret = -EINVAL;
1445 }
1da177e4
LT
1446
1447 return ret;
1448}
1449EXPORT_SYMBOL(cpufreq_register_notifier);
1450
1451
1452/**
1453 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1454 * @nb: notifier block to be unregistered
1455 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1456 *
1457 * Remove a driver from the CPU frequency notifier list.
1458 *
1459 * This function may sleep, and has the same return conditions as
e041c683 1460 * blocking_notifier_chain_unregister.
1da177e4
LT
1461 */
1462int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1463{
1464 int ret;
1465
d5aaffa9
DB
1466 if (cpufreq_disabled())
1467 return -EINVAL;
1468
1da177e4
LT
1469 switch (list) {
1470 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1471 ret = srcu_notifier_chain_unregister(
e041c683 1472 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1473 break;
1474 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1475 ret = blocking_notifier_chain_unregister(
1476 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1477 break;
1478 default:
1479 ret = -EINVAL;
1480 }
1da177e4
LT
1481
1482 return ret;
1483}
1484EXPORT_SYMBOL(cpufreq_unregister_notifier);
1485
1486
1487/*********************************************************************
1488 * GOVERNORS *
1489 *********************************************************************/
1490
1491
1492int __cpufreq_driver_target(struct cpufreq_policy *policy,
1493 unsigned int target_freq,
1494 unsigned int relation)
1495{
1496 int retval = -EINVAL;
7249924e 1497 unsigned int old_target_freq = target_freq;
c32b6b8e 1498
a7b422cd
KRW
1499 if (cpufreq_disabled())
1500 return -ENODEV;
1501
7249924e
VK
1502 /* Make sure that target_freq is within supported range */
1503 if (target_freq > policy->max)
1504 target_freq = policy->max;
1505 if (target_freq < policy->min)
1506 target_freq = policy->min;
1507
1508 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1509 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1510
1511 if (target_freq == policy->cur)
1512 return 0;
1513
1da177e4
LT
1514 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1515 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1516
1da177e4
LT
1517 return retval;
1518}
1519EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1520
1da177e4
LT
1521int cpufreq_driver_target(struct cpufreq_policy *policy,
1522 unsigned int target_freq,
1523 unsigned int relation)
1524{
f1829e4a 1525 int ret = -EINVAL;
1da177e4
LT
1526
1527 policy = cpufreq_cpu_get(policy->cpu);
1528 if (!policy)
f1829e4a 1529 goto no_policy;
1da177e4 1530
5a01f2e8 1531 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1532 goto fail;
1da177e4
LT
1533
1534 ret = __cpufreq_driver_target(policy, target_freq, relation);
1535
5a01f2e8 1536 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1537
f1829e4a 1538fail:
1da177e4 1539 cpufreq_cpu_put(policy);
f1829e4a 1540no_policy:
1da177e4
LT
1541 return ret;
1542}
1543EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1544
bf0b90e3 1545int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1546{
1547 int ret = 0;
1548
d5aaffa9
DB
1549 if (cpufreq_disabled())
1550 return ret;
1551
0676f7f2
VK
1552 if (!(cpu_online(cpu) && cpufreq_driver->getavg))
1553 return 0;
1554
dfde5d62
VP
1555 policy = cpufreq_cpu_get(policy->cpu);
1556 if (!policy)
1557 return -EINVAL;
1558
0676f7f2 1559 ret = cpufreq_driver->getavg(policy, cpu);
dfde5d62 1560
dfde5d62
VP
1561 cpufreq_cpu_put(policy);
1562 return ret;
1563}
5a01f2e8 1564EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1565
153d7f3f 1566/*
153d7f3f
AV
1567 * when "event" is CPUFREQ_GOV_LIMITS
1568 */
1da177e4 1569
e08f5f5b
GS
1570static int __cpufreq_governor(struct cpufreq_policy *policy,
1571 unsigned int event)
1da177e4 1572{
cc993cab 1573 int ret;
6afde10c
TR
1574
1575 /* Only must be defined when default governor is known to have latency
1576 restrictions, like e.g. conservative or ondemand.
1577 That this is the case is already ensured in Kconfig
1578 */
1579#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1580 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1581#else
1582 struct cpufreq_governor *gov = NULL;
1583#endif
1c256245
TR
1584
1585 if (policy->governor->max_transition_latency &&
1586 policy->cpuinfo.transition_latency >
1587 policy->governor->max_transition_latency) {
6afde10c
TR
1588 if (!gov)
1589 return -EINVAL;
1590 else {
1591 printk(KERN_WARNING "%s governor failed, too long"
1592 " transition latency of HW, fallback"
1593 " to %s governor\n",
1594 policy->governor->name,
1595 gov->name);
1596 policy->governor = gov;
1597 }
1c256245 1598 }
1da177e4
LT
1599
1600 if (!try_module_get(policy->governor->owner))
1601 return -EINVAL;
1602
2d06d8c4 1603 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1604 policy->cpu, event);
1da177e4
LT
1605 ret = policy->governor->governor(policy, event);
1606
e08f5f5b
GS
1607 /* we keep one module reference alive for
1608 each CPU governed by this CPU */
1da177e4
LT
1609 if ((event != CPUFREQ_GOV_START) || ret)
1610 module_put(policy->governor->owner);
1611 if ((event == CPUFREQ_GOV_STOP) && !ret)
1612 module_put(policy->governor->owner);
1613
1614 return ret;
1615}
1616
1617
1da177e4
LT
1618int cpufreq_register_governor(struct cpufreq_governor *governor)
1619{
3bcb09a3 1620 int err;
1da177e4
LT
1621
1622 if (!governor)
1623 return -EINVAL;
1624
a7b422cd
KRW
1625 if (cpufreq_disabled())
1626 return -ENODEV;
1627
3fc54d37 1628 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1629
3bcb09a3
JF
1630 err = -EBUSY;
1631 if (__find_governor(governor->name) == NULL) {
1632 err = 0;
1633 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1634 }
1da177e4 1635
32ee8c3e 1636 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1637 return err;
1da177e4
LT
1638}
1639EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1640
1641
1642void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1643{
90e41bac
PB
1644#ifdef CONFIG_HOTPLUG_CPU
1645 int cpu;
1646#endif
1647
1da177e4
LT
1648 if (!governor)
1649 return;
1650
a7b422cd
KRW
1651 if (cpufreq_disabled())
1652 return;
1653
90e41bac
PB
1654#ifdef CONFIG_HOTPLUG_CPU
1655 for_each_present_cpu(cpu) {
1656 if (cpu_online(cpu))
1657 continue;
1658 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1659 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1660 }
1661#endif
1662
3fc54d37 1663 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1664 list_del(&governor->governor_list);
3fc54d37 1665 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1666 return;
1667}
1668EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1669
1670
1671
1672/*********************************************************************
1673 * POLICY INTERFACE *
1674 *********************************************************************/
1675
1676/**
1677 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1678 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1679 * is written
1da177e4
LT
1680 *
1681 * Reads the current cpufreq policy.
1682 */
1683int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1684{
1685 struct cpufreq_policy *cpu_policy;
1686 if (!policy)
1687 return -EINVAL;
1688
1689 cpu_policy = cpufreq_cpu_get(cpu);
1690 if (!cpu_policy)
1691 return -EINVAL;
1692
1da177e4 1693 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1694
1695 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1696 return 0;
1697}
1698EXPORT_SYMBOL(cpufreq_get_policy);
1699
1700
153d7f3f 1701/*
e08f5f5b
GS
1702 * data : current policy.
1703 * policy : policy to be set.
153d7f3f 1704 */
e08f5f5b
GS
1705static int __cpufreq_set_policy(struct cpufreq_policy *data,
1706 struct cpufreq_policy *policy)
1da177e4
LT
1707{
1708 int ret = 0;
1709
2d06d8c4 1710 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1711 policy->min, policy->max);
1712
e08f5f5b
GS
1713 memcpy(&policy->cpuinfo, &data->cpuinfo,
1714 sizeof(struct cpufreq_cpuinfo));
1da177e4 1715
53391fa2 1716 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1717 ret = -EINVAL;
1718 goto error_out;
1719 }
1720
1da177e4
LT
1721 /* verify the cpu speed can be set within this limit */
1722 ret = cpufreq_driver->verify(policy);
1723 if (ret)
1724 goto error_out;
1725
1da177e4 1726 /* adjust if necessary - all reasons */
e041c683
AS
1727 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1728 CPUFREQ_ADJUST, policy);
1da177e4
LT
1729
1730 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1731 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1732 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1733
1734 /* verify the cpu speed can be set within this limit,
1735 which might be different to the first one */
1736 ret = cpufreq_driver->verify(policy);
e041c683 1737 if (ret)
1da177e4 1738 goto error_out;
1da177e4
LT
1739
1740 /* notification of the new policy */
e041c683
AS
1741 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1742 CPUFREQ_NOTIFY, policy);
1da177e4 1743
7d5e350f
DJ
1744 data->min = policy->min;
1745 data->max = policy->max;
1da177e4 1746
2d06d8c4 1747 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1748 data->min, data->max);
1da177e4
LT
1749
1750 if (cpufreq_driver->setpolicy) {
1751 data->policy = policy->policy;
2d06d8c4 1752 pr_debug("setting range\n");
1da177e4
LT
1753 ret = cpufreq_driver->setpolicy(policy);
1754 } else {
1755 if (policy->governor != data->governor) {
1756 /* save old, working values */
1757 struct cpufreq_governor *old_gov = data->governor;
1758
2d06d8c4 1759 pr_debug("governor switch\n");
1da177e4
LT
1760
1761 /* end old governor */
ffe6275f 1762 if (data->governor)
1da177e4
LT
1763 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1764
1765 /* start new governor */
1766 data->governor = policy->governor;
1767 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1768 /* new governor failed, so re-start old one */
2d06d8c4 1769 pr_debug("starting governor %s failed\n",
e08f5f5b 1770 data->governor->name);
1da177e4
LT
1771 if (old_gov) {
1772 data->governor = old_gov;
e08f5f5b
GS
1773 __cpufreq_governor(data,
1774 CPUFREQ_GOV_START);
1da177e4
LT
1775 }
1776 ret = -EINVAL;
1777 goto error_out;
1778 }
1779 /* might be a policy change, too, so fall through */
1780 }
2d06d8c4 1781 pr_debug("governor: change or update limits\n");
1da177e4
LT
1782 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1783 }
1784
7d5e350f 1785error_out:
1da177e4
LT
1786 return ret;
1787}
1788
1da177e4
LT
1789/**
1790 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1791 * @cpu: CPU which shall be re-evaluated
1792 *
25985edc 1793 * Useful for policy notifiers which have different necessities
1da177e4
LT
1794 * at different times.
1795 */
1796int cpufreq_update_policy(unsigned int cpu)
1797{
1798 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1799 struct cpufreq_policy policy;
f1829e4a 1800 int ret;
1da177e4 1801
f1829e4a
JL
1802 if (!data) {
1803 ret = -ENODEV;
1804 goto no_policy;
1805 }
1da177e4 1806
f1829e4a
JL
1807 if (unlikely(lock_policy_rwsem_write(cpu))) {
1808 ret = -EINVAL;
1809 goto fail;
1810 }
1da177e4 1811
2d06d8c4 1812 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1813 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1814 policy.min = data->user_policy.min;
1815 policy.max = data->user_policy.max;
1816 policy.policy = data->user_policy.policy;
1817 policy.governor = data->user_policy.governor;
1818
0961dd0d
TR
1819 /* BIOS might change freq behind our back
1820 -> ask driver for current freq and notify governors about a change */
1821 if (cpufreq_driver->get) {
1822 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3 1823 if (!data->cur) {
2d06d8c4 1824 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1825 data->cur = policy.cur;
1826 } else {
1827 if (data->cur != policy.cur)
e08f5f5b
GS
1828 cpufreq_out_of_sync(cpu, data->cur,
1829 policy.cur);
a85f7bd3 1830 }
0961dd0d
TR
1831 }
1832
1da177e4
LT
1833 ret = __cpufreq_set_policy(data, &policy);
1834
5a01f2e8
VP
1835 unlock_policy_rwsem_write(cpu);
1836
f1829e4a 1837fail:
1da177e4 1838 cpufreq_cpu_put(data);
f1829e4a 1839no_policy:
1da177e4
LT
1840 return ret;
1841}
1842EXPORT_SYMBOL(cpufreq_update_policy);
1843
dd184a01 1844static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1845 unsigned long action, void *hcpu)
1846{
1847 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1848 struct device *dev;
c32b6b8e 1849
8a25a2fd
KS
1850 dev = get_cpu_device(cpu);
1851 if (dev) {
c32b6b8e
AR
1852 switch (action) {
1853 case CPU_ONLINE:
8bb78442 1854 case CPU_ONLINE_FROZEN:
8a25a2fd 1855 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1856 break;
1857 case CPU_DOWN_PREPARE:
8bb78442 1858 case CPU_DOWN_PREPARE_FROZEN:
5a01f2e8
VP
1859 if (unlikely(lock_policy_rwsem_write(cpu)))
1860 BUG();
1861
8a25a2fd 1862 __cpufreq_remove_dev(dev, NULL);
c32b6b8e 1863 break;
5a01f2e8 1864 case CPU_DOWN_FAILED:
8bb78442 1865 case CPU_DOWN_FAILED_FROZEN:
8a25a2fd 1866 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1867 break;
1868 }
1869 }
1870 return NOTIFY_OK;
1871}
1872
9c36f746 1873static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1874 .notifier_call = cpufreq_cpu_callback,
1875};
1da177e4
LT
1876
1877/*********************************************************************
1878 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1879 *********************************************************************/
1880
1881/**
1882 * cpufreq_register_driver - register a CPU Frequency driver
1883 * @driver_data: A struct cpufreq_driver containing the values#
1884 * submitted by the CPU Frequency driver.
1885 *
32ee8c3e 1886 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1887 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1888 * (and isn't unregistered in the meantime).
1da177e4
LT
1889 *
1890 */
221dee28 1891int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1892{
1893 unsigned long flags;
1894 int ret;
1895
a7b422cd
KRW
1896 if (cpufreq_disabled())
1897 return -ENODEV;
1898
1da177e4
LT
1899 if (!driver_data || !driver_data->verify || !driver_data->init ||
1900 ((!driver_data->setpolicy) && (!driver_data->target)))
1901 return -EINVAL;
1902
2d06d8c4 1903 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
1904
1905 if (driver_data->setpolicy)
1906 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1907
1908 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1909 if (cpufreq_driver) {
1910 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1911 return -EBUSY;
1912 }
1913 cpufreq_driver = driver_data;
1914 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1915
8a25a2fd 1916 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
1917 if (ret)
1918 goto err_null_driver;
1da177e4 1919
8f5bc2ab 1920 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
1921 int i;
1922 ret = -ENODEV;
1923
1924 /* check for at least one working CPU */
7a6aedfa
MT
1925 for (i = 0; i < nr_cpu_ids; i++)
1926 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1927 ret = 0;
7a6aedfa
MT
1928 break;
1929 }
1da177e4
LT
1930
1931 /* if all ->init() calls failed, unregister */
1932 if (ret) {
2d06d8c4 1933 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 1934 driver_data->name);
8a25a2fd 1935 goto err_if_unreg;
1da177e4
LT
1936 }
1937 }
1938
8f5bc2ab 1939 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 1940 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 1941
8f5bc2ab 1942 return 0;
8a25a2fd
KS
1943err_if_unreg:
1944 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab
JS
1945err_null_driver:
1946 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1947 cpufreq_driver = NULL;
1948 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 1949 return ret;
1da177e4
LT
1950}
1951EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1952
1953
1954/**
1955 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1956 *
32ee8c3e 1957 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
1958 * the right to do so, i.e. if you have succeeded in initialising before!
1959 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1960 * currently not initialised.
1961 */
221dee28 1962int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
1963{
1964 unsigned long flags;
1965
2d06d8c4 1966 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 1967 return -EINVAL;
1da177e4 1968
2d06d8c4 1969 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 1970
8a25a2fd 1971 subsys_interface_unregister(&cpufreq_interface);
65edc68c 1972 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4
LT
1973
1974 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1975 cpufreq_driver = NULL;
1976 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1977
1978 return 0;
1979}
1980EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
1981
1982static int __init cpufreq_core_init(void)
1983{
1984 int cpu;
1985
a7b422cd
KRW
1986 if (cpufreq_disabled())
1987 return -ENODEV;
1988
5a01f2e8 1989 for_each_possible_cpu(cpu) {
f1625066 1990 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
1991 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1992 }
8aa84ad8 1993
8a25a2fd 1994 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
8aa84ad8 1995 BUG_ON(!cpufreq_global_kobject);
e00e56df 1996 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 1997
5a01f2e8
VP
1998 return 0;
1999}
5a01f2e8 2000core_initcall(cpufreq_core_init);