6eca12ab71d7a5efa3b89dc2032e647ec0b3bc84
[linux-2.6-block.git] / drivers / cpufreq / cpufreq.c
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7  *
8  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9  *      Added handling for CPU hotplug
10  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11  *      Fix handling for CPU hotplug -- affected CPUs
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2 as
15  * published by the Free Software Foundation.
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
33
34 static LIST_HEAD(cpufreq_policy_list);
35
36 static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37 {
38         return cpumask_empty(policy->cpus);
39 }
40
41 static bool suitable_policy(struct cpufreq_policy *policy, bool active)
42 {
43         return active == !policy_is_inactive(policy);
44 }
45
46 /* Finds Next Acive/Inactive policy */
47 static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
48                                           bool active)
49 {
50         do {
51                 /* No more policies in the list */
52                 if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
53                         return NULL;
54
55                 policy = list_next_entry(policy, policy_list);
56         } while (!suitable_policy(policy, active));
57
58         return policy;
59 }
60
61 static struct cpufreq_policy *first_policy(bool active)
62 {
63         struct cpufreq_policy *policy;
64
65         /* No policies in the list */
66         if (list_empty(&cpufreq_policy_list))
67                 return NULL;
68
69         policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
70                                   policy_list);
71
72         if (!suitable_policy(policy, active))
73                 policy = next_policy(policy, active);
74
75         return policy;
76 }
77
78 /* Macros to iterate over CPU policies */
79 #define for_each_suitable_policy(__policy, __active)    \
80         for (__policy = first_policy(__active);         \
81              __policy;                                  \
82              __policy = next_policy(__policy, __active))
83
84 #define for_each_active_policy(__policy)                \
85         for_each_suitable_policy(__policy, true)
86 #define for_each_inactive_policy(__policy)              \
87         for_each_suitable_policy(__policy, false)
88
89 #define for_each_policy(__policy)                       \
90         list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
91
92 /* Iterate over governors */
93 static LIST_HEAD(cpufreq_governor_list);
94 #define for_each_governor(__governor)                           \
95         list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
96
97 /**
98  * The "cpufreq driver" - the arch- or hardware-dependent low
99  * level driver of CPUFreq support, and its spinlock. This lock
100  * also protects the cpufreq_cpu_data array.
101  */
102 static struct cpufreq_driver *cpufreq_driver;
103 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
104 static DEFINE_RWLOCK(cpufreq_driver_lock);
105
106 static DEFINE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
107
108 /**
109  * cpufreq_set_update_util_data - Populate the CPU's update_util_data pointer.
110  * @cpu: The CPU to set the pointer for.
111  * @data: New pointer value.
112  *
113  * Set and publish the update_util_data pointer for the given CPU.  That pointer
114  * points to a struct update_util_data object containing a callback function
115  * to call from cpufreq_update_util().  That function will be called from an RCU
116  * read-side critical section, so it must not sleep.
117  *
118  * Callers must use RCU-sched callbacks to free any memory that might be
119  * accessed via the old update_util_data pointer or invoke synchronize_sched()
120  * right after this function to avoid use-after-free.
121  */
122 void cpufreq_set_update_util_data(int cpu, struct update_util_data *data)
123 {
124         if (WARN_ON(data && !data->func))
125                 return;
126
127         rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), data);
128 }
129 EXPORT_SYMBOL_GPL(cpufreq_set_update_util_data);
130
131 /**
132  * cpufreq_update_util - Take a note about CPU utilization changes.
133  * @time: Current time.
134  * @util: Current utilization.
135  * @max: Utilization ceiling.
136  *
137  * This function is called by the scheduler on every invocation of
138  * update_load_avg() on the CPU whose utilization is being updated.
139  *
140  * It can only be called from RCU-sched read-side critical sections.
141  */
142 void cpufreq_update_util(u64 time, unsigned long util, unsigned long max)
143 {
144         struct update_util_data *data;
145
146 #ifdef CONFIG_LOCKDEP
147         WARN_ON(debug_locks && !rcu_read_lock_sched_held());
148 #endif
149
150         data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
151         /*
152          * If this isn't inside of an RCU-sched read-side critical section, data
153          * may become NULL after the check below.
154          */
155         if (data)
156                 data->func(data, time, util, max);
157 }
158
159 /* Flag to suspend/resume CPUFreq governors */
160 static bool cpufreq_suspended;
161
162 static inline bool has_target(void)
163 {
164         return cpufreq_driver->target_index || cpufreq_driver->target;
165 }
166
167 /* internal prototypes */
168 static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
169 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
170
171 /**
172  * Two notifier lists: the "policy" list is involved in the
173  * validation process for a new CPU frequency policy; the
174  * "transition" list for kernel code that needs to handle
175  * changes to devices when the CPU clock speed changes.
176  * The mutex locks both lists.
177  */
178 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
179 static struct srcu_notifier_head cpufreq_transition_notifier_list;
180
181 static bool init_cpufreq_transition_notifier_list_called;
182 static int __init init_cpufreq_transition_notifier_list(void)
183 {
184         srcu_init_notifier_head(&cpufreq_transition_notifier_list);
185         init_cpufreq_transition_notifier_list_called = true;
186         return 0;
187 }
188 pure_initcall(init_cpufreq_transition_notifier_list);
189
190 static int off __read_mostly;
191 static int cpufreq_disabled(void)
192 {
193         return off;
194 }
195 void disable_cpufreq(void)
196 {
197         off = 1;
198 }
199 static DEFINE_MUTEX(cpufreq_governor_mutex);
200
201 bool have_governor_per_policy(void)
202 {
203         return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
204 }
205 EXPORT_SYMBOL_GPL(have_governor_per_policy);
206
207 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
208 {
209         if (have_governor_per_policy())
210                 return &policy->kobj;
211         else
212                 return cpufreq_global_kobject;
213 }
214 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
215
216 struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
217 {
218         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
219
220         return policy && !policy_is_inactive(policy) ?
221                 policy->freq_table : NULL;
222 }
223 EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
224
225 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
226 {
227         u64 idle_time;
228         u64 cur_wall_time;
229         u64 busy_time;
230
231         cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
232
233         busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
234         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
235         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
236         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
237         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
238         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
239
240         idle_time = cur_wall_time - busy_time;
241         if (wall)
242                 *wall = cputime_to_usecs(cur_wall_time);
243
244         return cputime_to_usecs(idle_time);
245 }
246
247 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
248 {
249         u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
250
251         if (idle_time == -1ULL)
252                 return get_cpu_idle_time_jiffy(cpu, wall);
253         else if (!io_busy)
254                 idle_time += get_cpu_iowait_time_us(cpu, wall);
255
256         return idle_time;
257 }
258 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
259
260 /*
261  * This is a generic cpufreq init() routine which can be used by cpufreq
262  * drivers of SMP systems. It will do following:
263  * - validate & show freq table passed
264  * - set policies transition latency
265  * - policy->cpus with all possible CPUs
266  */
267 int cpufreq_generic_init(struct cpufreq_policy *policy,
268                 struct cpufreq_frequency_table *table,
269                 unsigned int transition_latency)
270 {
271         int ret;
272
273         ret = cpufreq_table_validate_and_show(policy, table);
274         if (ret) {
275                 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
276                 return ret;
277         }
278
279         policy->cpuinfo.transition_latency = transition_latency;
280
281         /*
282          * The driver only supports the SMP configuration where all processors
283          * share the clock and voltage and clock.
284          */
285         cpumask_setall(policy->cpus);
286
287         return 0;
288 }
289 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
290
291 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
292 {
293         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
294
295         return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
296 }
297 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
298
299 unsigned int cpufreq_generic_get(unsigned int cpu)
300 {
301         struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
302
303         if (!policy || IS_ERR(policy->clk)) {
304                 pr_err("%s: No %s associated to cpu: %d\n",
305                        __func__, policy ? "clk" : "policy", cpu);
306                 return 0;
307         }
308
309         return clk_get_rate(policy->clk) / 1000;
310 }
311 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
312
313 /**
314  * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
315  *
316  * @cpu: cpu to find policy for.
317  *
318  * This returns policy for 'cpu', returns NULL if it doesn't exist.
319  * It also increments the kobject reference count to mark it busy and so would
320  * require a corresponding call to cpufreq_cpu_put() to decrement it back.
321  * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
322  * freed as that depends on the kobj count.
323  *
324  * Return: A valid policy on success, otherwise NULL on failure.
325  */
326 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
327 {
328         struct cpufreq_policy *policy = NULL;
329         unsigned long flags;
330
331         if (WARN_ON(cpu >= nr_cpu_ids))
332                 return NULL;
333
334         /* get the cpufreq driver */
335         read_lock_irqsave(&cpufreq_driver_lock, flags);
336
337         if (cpufreq_driver) {
338                 /* get the CPU */
339                 policy = cpufreq_cpu_get_raw(cpu);
340                 if (policy)
341                         kobject_get(&policy->kobj);
342         }
343
344         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
345
346         return policy;
347 }
348 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
349
350 /**
351  * cpufreq_cpu_put: Decrements the usage count of a policy
352  *
353  * @policy: policy earlier returned by cpufreq_cpu_get().
354  *
355  * This decrements the kobject reference count incremented earlier by calling
356  * cpufreq_cpu_get().
357  */
358 void cpufreq_cpu_put(struct cpufreq_policy *policy)
359 {
360         kobject_put(&policy->kobj);
361 }
362 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
363
364 /*********************************************************************
365  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
366  *********************************************************************/
367
368 /**
369  * adjust_jiffies - adjust the system "loops_per_jiffy"
370  *
371  * This function alters the system "loops_per_jiffy" for the clock
372  * speed change. Note that loops_per_jiffy cannot be updated on SMP
373  * systems as each CPU might be scaled differently. So, use the arch
374  * per-CPU loops_per_jiffy value wherever possible.
375  */
376 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
377 {
378 #ifndef CONFIG_SMP
379         static unsigned long l_p_j_ref;
380         static unsigned int l_p_j_ref_freq;
381
382         if (ci->flags & CPUFREQ_CONST_LOOPS)
383                 return;
384
385         if (!l_p_j_ref_freq) {
386                 l_p_j_ref = loops_per_jiffy;
387                 l_p_j_ref_freq = ci->old;
388                 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
389                          l_p_j_ref, l_p_j_ref_freq);
390         }
391         if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
392                 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
393                                                                 ci->new);
394                 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
395                          loops_per_jiffy, ci->new);
396         }
397 #endif
398 }
399
400 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
401                 struct cpufreq_freqs *freqs, unsigned int state)
402 {
403         BUG_ON(irqs_disabled());
404
405         if (cpufreq_disabled())
406                 return;
407
408         freqs->flags = cpufreq_driver->flags;
409         pr_debug("notification %u of frequency transition to %u kHz\n",
410                  state, freqs->new);
411
412         switch (state) {
413
414         case CPUFREQ_PRECHANGE:
415                 /* detect if the driver reported a value as "old frequency"
416                  * which is not equal to what the cpufreq core thinks is
417                  * "old frequency".
418                  */
419                 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
420                         if ((policy) && (policy->cpu == freqs->cpu) &&
421                             (policy->cur) && (policy->cur != freqs->old)) {
422                                 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
423                                          freqs->old, policy->cur);
424                                 freqs->old = policy->cur;
425                         }
426                 }
427                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
428                                 CPUFREQ_PRECHANGE, freqs);
429                 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
430                 break;
431
432         case CPUFREQ_POSTCHANGE:
433                 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
434                 pr_debug("FREQ: %lu - CPU: %lu\n",
435                          (unsigned long)freqs->new, (unsigned long)freqs->cpu);
436                 trace_cpu_frequency(freqs->new, freqs->cpu);
437                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
438                                 CPUFREQ_POSTCHANGE, freqs);
439                 if (likely(policy) && likely(policy->cpu == freqs->cpu))
440                         policy->cur = freqs->new;
441                 break;
442         }
443 }
444
445 /**
446  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
447  * on frequency transition.
448  *
449  * This function calls the transition notifiers and the "adjust_jiffies"
450  * function. It is called twice on all CPU frequency changes that have
451  * external effects.
452  */
453 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
454                 struct cpufreq_freqs *freqs, unsigned int state)
455 {
456         for_each_cpu(freqs->cpu, policy->cpus)
457                 __cpufreq_notify_transition(policy, freqs, state);
458 }
459
460 /* Do post notifications when there are chances that transition has failed */
461 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
462                 struct cpufreq_freqs *freqs, int transition_failed)
463 {
464         cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
465         if (!transition_failed)
466                 return;
467
468         swap(freqs->old, freqs->new);
469         cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
470         cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
471 }
472
473 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
474                 struct cpufreq_freqs *freqs)
475 {
476
477         /*
478          * Catch double invocations of _begin() which lead to self-deadlock.
479          * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
480          * doesn't invoke _begin() on their behalf, and hence the chances of
481          * double invocations are very low. Moreover, there are scenarios
482          * where these checks can emit false-positive warnings in these
483          * drivers; so we avoid that by skipping them altogether.
484          */
485         WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
486                                 && current == policy->transition_task);
487
488 wait:
489         wait_event(policy->transition_wait, !policy->transition_ongoing);
490
491         spin_lock(&policy->transition_lock);
492
493         if (unlikely(policy->transition_ongoing)) {
494                 spin_unlock(&policy->transition_lock);
495                 goto wait;
496         }
497
498         policy->transition_ongoing = true;
499         policy->transition_task = current;
500
501         spin_unlock(&policy->transition_lock);
502
503         cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
504 }
505 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
506
507 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
508                 struct cpufreq_freqs *freqs, int transition_failed)
509 {
510         if (unlikely(WARN_ON(!policy->transition_ongoing)))
511                 return;
512
513         cpufreq_notify_post_transition(policy, freqs, transition_failed);
514
515         policy->transition_ongoing = false;
516         policy->transition_task = NULL;
517
518         wake_up(&policy->transition_wait);
519 }
520 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
521
522
523 /*********************************************************************
524  *                          SYSFS INTERFACE                          *
525  *********************************************************************/
526 static ssize_t show_boost(struct kobject *kobj,
527                                  struct attribute *attr, char *buf)
528 {
529         return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
530 }
531
532 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
533                                   const char *buf, size_t count)
534 {
535         int ret, enable;
536
537         ret = sscanf(buf, "%d", &enable);
538         if (ret != 1 || enable < 0 || enable > 1)
539                 return -EINVAL;
540
541         if (cpufreq_boost_trigger_state(enable)) {
542                 pr_err("%s: Cannot %s BOOST!\n",
543                        __func__, enable ? "enable" : "disable");
544                 return -EINVAL;
545         }
546
547         pr_debug("%s: cpufreq BOOST %s\n",
548                  __func__, enable ? "enabled" : "disabled");
549
550         return count;
551 }
552 define_one_global_rw(boost);
553
554 static struct cpufreq_governor *find_governor(const char *str_governor)
555 {
556         struct cpufreq_governor *t;
557
558         for_each_governor(t)
559                 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
560                         return t;
561
562         return NULL;
563 }
564
565 /**
566  * cpufreq_parse_governor - parse a governor string
567  */
568 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
569                                 struct cpufreq_governor **governor)
570 {
571         int err = -EINVAL;
572
573         if (cpufreq_driver->setpolicy) {
574                 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
575                         *policy = CPUFREQ_POLICY_PERFORMANCE;
576                         err = 0;
577                 } else if (!strncasecmp(str_governor, "powersave",
578                                                 CPUFREQ_NAME_LEN)) {
579                         *policy = CPUFREQ_POLICY_POWERSAVE;
580                         err = 0;
581                 }
582         } else {
583                 struct cpufreq_governor *t;
584
585                 mutex_lock(&cpufreq_governor_mutex);
586
587                 t = find_governor(str_governor);
588
589                 if (t == NULL) {
590                         int ret;
591
592                         mutex_unlock(&cpufreq_governor_mutex);
593                         ret = request_module("cpufreq_%s", str_governor);
594                         mutex_lock(&cpufreq_governor_mutex);
595
596                         if (ret == 0)
597                                 t = find_governor(str_governor);
598                 }
599
600                 if (t != NULL) {
601                         *governor = t;
602                         err = 0;
603                 }
604
605                 mutex_unlock(&cpufreq_governor_mutex);
606         }
607         return err;
608 }
609
610 /**
611  * cpufreq_per_cpu_attr_read() / show_##file_name() -
612  * print out cpufreq information
613  *
614  * Write out information from cpufreq_driver->policy[cpu]; object must be
615  * "unsigned int".
616  */
617
618 #define show_one(file_name, object)                     \
619 static ssize_t show_##file_name                         \
620 (struct cpufreq_policy *policy, char *buf)              \
621 {                                                       \
622         return sprintf(buf, "%u\n", policy->object);    \
623 }
624
625 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
626 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
627 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
628 show_one(scaling_min_freq, min);
629 show_one(scaling_max_freq, max);
630
631 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
632 {
633         ssize_t ret;
634
635         if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
636                 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
637         else
638                 ret = sprintf(buf, "%u\n", policy->cur);
639         return ret;
640 }
641
642 static int cpufreq_set_policy(struct cpufreq_policy *policy,
643                                 struct cpufreq_policy *new_policy);
644
645 /**
646  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
647  */
648 #define store_one(file_name, object)                    \
649 static ssize_t store_##file_name                                        \
650 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
651 {                                                                       \
652         int ret, temp;                                                  \
653         struct cpufreq_policy new_policy;                               \
654                                                                         \
655         memcpy(&new_policy, policy, sizeof(*policy));                   \
656                                                                         \
657         ret = sscanf(buf, "%u", &new_policy.object);                    \
658         if (ret != 1)                                                   \
659                 return -EINVAL;                                         \
660                                                                         \
661         temp = new_policy.object;                                       \
662         ret = cpufreq_set_policy(policy, &new_policy);          \
663         if (!ret)                                                       \
664                 policy->user_policy.object = temp;                      \
665                                                                         \
666         return ret ? ret : count;                                       \
667 }
668
669 store_one(scaling_min_freq, min);
670 store_one(scaling_max_freq, max);
671
672 /**
673  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
674  */
675 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
676                                         char *buf)
677 {
678         unsigned int cur_freq = __cpufreq_get(policy);
679         if (!cur_freq)
680                 return sprintf(buf, "<unknown>");
681         return sprintf(buf, "%u\n", cur_freq);
682 }
683
684 /**
685  * show_scaling_governor - show the current policy for the specified CPU
686  */
687 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
688 {
689         if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
690                 return sprintf(buf, "powersave\n");
691         else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
692                 return sprintf(buf, "performance\n");
693         else if (policy->governor)
694                 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
695                                 policy->governor->name);
696         return -EINVAL;
697 }
698
699 /**
700  * store_scaling_governor - store policy for the specified CPU
701  */
702 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
703                                         const char *buf, size_t count)
704 {
705         int ret;
706         char    str_governor[16];
707         struct cpufreq_policy new_policy;
708
709         memcpy(&new_policy, policy, sizeof(*policy));
710
711         ret = sscanf(buf, "%15s", str_governor);
712         if (ret != 1)
713                 return -EINVAL;
714
715         if (cpufreq_parse_governor(str_governor, &new_policy.policy,
716                                                 &new_policy.governor))
717                 return -EINVAL;
718
719         ret = cpufreq_set_policy(policy, &new_policy);
720         return ret ? ret : count;
721 }
722
723 /**
724  * show_scaling_driver - show the cpufreq driver currently loaded
725  */
726 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
727 {
728         return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
729 }
730
731 /**
732  * show_scaling_available_governors - show the available CPUfreq governors
733  */
734 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
735                                                 char *buf)
736 {
737         ssize_t i = 0;
738         struct cpufreq_governor *t;
739
740         if (!has_target()) {
741                 i += sprintf(buf, "performance powersave");
742                 goto out;
743         }
744
745         for_each_governor(t) {
746                 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
747                     - (CPUFREQ_NAME_LEN + 2)))
748                         goto out;
749                 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
750         }
751 out:
752         i += sprintf(&buf[i], "\n");
753         return i;
754 }
755
756 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
757 {
758         ssize_t i = 0;
759         unsigned int cpu;
760
761         for_each_cpu(cpu, mask) {
762                 if (i)
763                         i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
764                 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
765                 if (i >= (PAGE_SIZE - 5))
766                         break;
767         }
768         i += sprintf(&buf[i], "\n");
769         return i;
770 }
771 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
772
773 /**
774  * show_related_cpus - show the CPUs affected by each transition even if
775  * hw coordination is in use
776  */
777 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
778 {
779         return cpufreq_show_cpus(policy->related_cpus, buf);
780 }
781
782 /**
783  * show_affected_cpus - show the CPUs affected by each transition
784  */
785 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
786 {
787         return cpufreq_show_cpus(policy->cpus, buf);
788 }
789
790 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
791                                         const char *buf, size_t count)
792 {
793         unsigned int freq = 0;
794         unsigned int ret;
795
796         if (!policy->governor || !policy->governor->store_setspeed)
797                 return -EINVAL;
798
799         ret = sscanf(buf, "%u", &freq);
800         if (ret != 1)
801                 return -EINVAL;
802
803         policy->governor->store_setspeed(policy, freq);
804
805         return count;
806 }
807
808 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
809 {
810         if (!policy->governor || !policy->governor->show_setspeed)
811                 return sprintf(buf, "<unsupported>\n");
812
813         return policy->governor->show_setspeed(policy, buf);
814 }
815
816 /**
817  * show_bios_limit - show the current cpufreq HW/BIOS limitation
818  */
819 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
820 {
821         unsigned int limit;
822         int ret;
823         if (cpufreq_driver->bios_limit) {
824                 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
825                 if (!ret)
826                         return sprintf(buf, "%u\n", limit);
827         }
828         return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
829 }
830
831 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
832 cpufreq_freq_attr_ro(cpuinfo_min_freq);
833 cpufreq_freq_attr_ro(cpuinfo_max_freq);
834 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
835 cpufreq_freq_attr_ro(scaling_available_governors);
836 cpufreq_freq_attr_ro(scaling_driver);
837 cpufreq_freq_attr_ro(scaling_cur_freq);
838 cpufreq_freq_attr_ro(bios_limit);
839 cpufreq_freq_attr_ro(related_cpus);
840 cpufreq_freq_attr_ro(affected_cpus);
841 cpufreq_freq_attr_rw(scaling_min_freq);
842 cpufreq_freq_attr_rw(scaling_max_freq);
843 cpufreq_freq_attr_rw(scaling_governor);
844 cpufreq_freq_attr_rw(scaling_setspeed);
845
846 static struct attribute *default_attrs[] = {
847         &cpuinfo_min_freq.attr,
848         &cpuinfo_max_freq.attr,
849         &cpuinfo_transition_latency.attr,
850         &scaling_min_freq.attr,
851         &scaling_max_freq.attr,
852         &affected_cpus.attr,
853         &related_cpus.attr,
854         &scaling_governor.attr,
855         &scaling_driver.attr,
856         &scaling_available_governors.attr,
857         &scaling_setspeed.attr,
858         NULL
859 };
860
861 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
862 #define to_attr(a) container_of(a, struct freq_attr, attr)
863
864 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
865 {
866         struct cpufreq_policy *policy = to_policy(kobj);
867         struct freq_attr *fattr = to_attr(attr);
868         ssize_t ret;
869
870         down_read(&policy->rwsem);
871
872         if (fattr->show)
873                 ret = fattr->show(policy, buf);
874         else
875                 ret = -EIO;
876
877         up_read(&policy->rwsem);
878
879         return ret;
880 }
881
882 static ssize_t store(struct kobject *kobj, struct attribute *attr,
883                      const char *buf, size_t count)
884 {
885         struct cpufreq_policy *policy = to_policy(kobj);
886         struct freq_attr *fattr = to_attr(attr);
887         ssize_t ret = -EINVAL;
888
889         get_online_cpus();
890
891         if (!cpu_online(policy->cpu))
892                 goto unlock;
893
894         down_write(&policy->rwsem);
895
896         if (fattr->store)
897                 ret = fattr->store(policy, buf, count);
898         else
899                 ret = -EIO;
900
901         up_write(&policy->rwsem);
902 unlock:
903         put_online_cpus();
904
905         return ret;
906 }
907
908 static void cpufreq_sysfs_release(struct kobject *kobj)
909 {
910         struct cpufreq_policy *policy = to_policy(kobj);
911         pr_debug("last reference is dropped\n");
912         complete(&policy->kobj_unregister);
913 }
914
915 static const struct sysfs_ops sysfs_ops = {
916         .show   = show,
917         .store  = store,
918 };
919
920 static struct kobj_type ktype_cpufreq = {
921         .sysfs_ops      = &sysfs_ops,
922         .default_attrs  = default_attrs,
923         .release        = cpufreq_sysfs_release,
924 };
925
926 static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
927 {
928         struct device *cpu_dev;
929
930         pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
931
932         if (!policy)
933                 return 0;
934
935         cpu_dev = get_cpu_device(cpu);
936         if (WARN_ON(!cpu_dev))
937                 return 0;
938
939         return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
940 }
941
942 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
943 {
944         struct device *cpu_dev;
945
946         pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
947
948         cpu_dev = get_cpu_device(cpu);
949         if (WARN_ON(!cpu_dev))
950                 return;
951
952         sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
953 }
954
955 /* Add/remove symlinks for all related CPUs */
956 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
957 {
958         unsigned int j;
959         int ret = 0;
960
961         /* Some related CPUs might not be present (physically hotplugged) */
962         for_each_cpu(j, policy->real_cpus) {
963                 ret = add_cpu_dev_symlink(policy, j);
964                 if (ret)
965                         break;
966         }
967
968         return ret;
969 }
970
971 static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
972 {
973         unsigned int j;
974
975         /* Some related CPUs might not be present (physically hotplugged) */
976         for_each_cpu(j, policy->real_cpus)
977                 remove_cpu_dev_symlink(policy, j);
978 }
979
980 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
981 {
982         struct freq_attr **drv_attr;
983         int ret = 0;
984
985         /* set up files for this cpu device */
986         drv_attr = cpufreq_driver->attr;
987         while (drv_attr && *drv_attr) {
988                 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
989                 if (ret)
990                         return ret;
991                 drv_attr++;
992         }
993         if (cpufreq_driver->get) {
994                 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
995                 if (ret)
996                         return ret;
997         }
998
999         ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1000         if (ret)
1001                 return ret;
1002
1003         if (cpufreq_driver->bios_limit) {
1004                 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1005                 if (ret)
1006                         return ret;
1007         }
1008
1009         return cpufreq_add_dev_symlink(policy);
1010 }
1011
1012 __weak struct cpufreq_governor *cpufreq_default_governor(void)
1013 {
1014         return NULL;
1015 }
1016
1017 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1018 {
1019         struct cpufreq_governor *gov = NULL;
1020         struct cpufreq_policy new_policy;
1021
1022         memcpy(&new_policy, policy, sizeof(*policy));
1023
1024         /* Update governor of new_policy to the governor used before hotplug */
1025         gov = find_governor(policy->last_governor);
1026         if (gov) {
1027                 pr_debug("Restoring governor %s for cpu %d\n",
1028                                 policy->governor->name, policy->cpu);
1029         } else {
1030                 gov = cpufreq_default_governor();
1031                 if (!gov)
1032                         return -ENODATA;
1033         }
1034
1035         new_policy.governor = gov;
1036
1037         /* Use the default policy if there is no last_policy. */
1038         if (cpufreq_driver->setpolicy) {
1039                 if (policy->last_policy)
1040                         new_policy.policy = policy->last_policy;
1041                 else
1042                         cpufreq_parse_governor(gov->name, &new_policy.policy,
1043                                                NULL);
1044         }
1045         /* set default policy */
1046         return cpufreq_set_policy(policy, &new_policy);
1047 }
1048
1049 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1050 {
1051         int ret = 0;
1052
1053         /* Has this CPU been taken care of already? */
1054         if (cpumask_test_cpu(cpu, policy->cpus))
1055                 return 0;
1056
1057         down_write(&policy->rwsem);
1058         if (has_target()) {
1059                 ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1060                 if (ret) {
1061                         pr_err("%s: Failed to stop governor\n", __func__);
1062                         goto unlock;
1063                 }
1064         }
1065
1066         cpumask_set_cpu(cpu, policy->cpus);
1067
1068         if (has_target()) {
1069                 ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
1070                 if (!ret)
1071                         ret = cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1072
1073                 if (ret)
1074                         pr_err("%s: Failed to start governor\n", __func__);
1075         }
1076
1077 unlock:
1078         up_write(&policy->rwsem);
1079         return ret;
1080 }
1081
1082 static void handle_update(struct work_struct *work)
1083 {
1084         struct cpufreq_policy *policy =
1085                 container_of(work, struct cpufreq_policy, update);
1086         unsigned int cpu = policy->cpu;
1087         pr_debug("handle_update for cpu %u called\n", cpu);
1088         cpufreq_update_policy(cpu);
1089 }
1090
1091 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1092 {
1093         struct device *dev = get_cpu_device(cpu);
1094         struct cpufreq_policy *policy;
1095
1096         if (WARN_ON(!dev))
1097                 return NULL;
1098
1099         policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1100         if (!policy)
1101                 return NULL;
1102
1103         if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1104                 goto err_free_policy;
1105
1106         if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1107                 goto err_free_cpumask;
1108
1109         if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1110                 goto err_free_rcpumask;
1111
1112         kobject_init(&policy->kobj, &ktype_cpufreq);
1113         INIT_LIST_HEAD(&policy->policy_list);
1114         init_rwsem(&policy->rwsem);
1115         spin_lock_init(&policy->transition_lock);
1116         init_waitqueue_head(&policy->transition_wait);
1117         init_completion(&policy->kobj_unregister);
1118         INIT_WORK(&policy->update, handle_update);
1119
1120         policy->cpu = cpu;
1121         return policy;
1122
1123 err_free_rcpumask:
1124         free_cpumask_var(policy->related_cpus);
1125 err_free_cpumask:
1126         free_cpumask_var(policy->cpus);
1127 err_free_policy:
1128         kfree(policy);
1129
1130         return NULL;
1131 }
1132
1133 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
1134 {
1135         struct kobject *kobj;
1136         struct completion *cmp;
1137
1138         if (notify)
1139                 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1140                                              CPUFREQ_REMOVE_POLICY, policy);
1141
1142         down_write(&policy->rwsem);
1143         cpufreq_remove_dev_symlink(policy);
1144         kobj = &policy->kobj;
1145         cmp = &policy->kobj_unregister;
1146         up_write(&policy->rwsem);
1147         kobject_put(kobj);
1148
1149         /*
1150          * We need to make sure that the underlying kobj is
1151          * actually not referenced anymore by anybody before we
1152          * proceed with unloading.
1153          */
1154         pr_debug("waiting for dropping of refcount\n");
1155         wait_for_completion(cmp);
1156         pr_debug("wait complete\n");
1157 }
1158
1159 static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1160 {
1161         unsigned long flags;
1162         int cpu;
1163
1164         /* Remove policy from list */
1165         write_lock_irqsave(&cpufreq_driver_lock, flags);
1166         list_del(&policy->policy_list);
1167
1168         for_each_cpu(cpu, policy->related_cpus)
1169                 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1170         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1171
1172         cpufreq_policy_put_kobj(policy, notify);
1173         free_cpumask_var(policy->real_cpus);
1174         free_cpumask_var(policy->related_cpus);
1175         free_cpumask_var(policy->cpus);
1176         kfree(policy);
1177 }
1178
1179 static int cpufreq_online(unsigned int cpu)
1180 {
1181         struct cpufreq_policy *policy;
1182         bool new_policy;
1183         unsigned long flags;
1184         unsigned int j;
1185         int ret;
1186
1187         pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1188
1189         /* Check if this CPU already has a policy to manage it */
1190         policy = per_cpu(cpufreq_cpu_data, cpu);
1191         if (policy) {
1192                 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1193                 if (!policy_is_inactive(policy))
1194                         return cpufreq_add_policy_cpu(policy, cpu);
1195
1196                 /* This is the only online CPU for the policy.  Start over. */
1197                 new_policy = false;
1198                 down_write(&policy->rwsem);
1199                 policy->cpu = cpu;
1200                 policy->governor = NULL;
1201                 up_write(&policy->rwsem);
1202         } else {
1203                 new_policy = true;
1204                 policy = cpufreq_policy_alloc(cpu);
1205                 if (!policy)
1206                         return -ENOMEM;
1207         }
1208
1209         cpumask_copy(policy->cpus, cpumask_of(cpu));
1210
1211         /* call driver. From then on the cpufreq must be able
1212          * to accept all calls to ->verify and ->setpolicy for this CPU
1213          */
1214         ret = cpufreq_driver->init(policy);
1215         if (ret) {
1216                 pr_debug("initialization failed\n");
1217                 goto out_free_policy;
1218         }
1219
1220         down_write(&policy->rwsem);
1221
1222         if (new_policy) {
1223                 /* related_cpus should at least include policy->cpus. */
1224                 cpumask_copy(policy->related_cpus, policy->cpus);
1225                 /* Remember CPUs present at the policy creation time. */
1226                 cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
1227
1228                 /* Name and add the kobject */
1229                 ret = kobject_add(&policy->kobj, cpufreq_global_kobject,
1230                                   "policy%u",
1231                                   cpumask_first(policy->related_cpus));
1232                 if (ret) {
1233                         pr_err("%s: failed to add policy->kobj: %d\n", __func__,
1234                                ret);
1235                         goto out_exit_policy;
1236                 }
1237         }
1238
1239         /*
1240          * affected cpus must always be the one, which are online. We aren't
1241          * managing offline cpus here.
1242          */
1243         cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1244
1245         if (new_policy) {
1246                 policy->user_policy.min = policy->min;
1247                 policy->user_policy.max = policy->max;
1248
1249                 write_lock_irqsave(&cpufreq_driver_lock, flags);
1250                 for_each_cpu(j, policy->related_cpus)
1251                         per_cpu(cpufreq_cpu_data, j) = policy;
1252                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1253         }
1254
1255         if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1256                 policy->cur = cpufreq_driver->get(policy->cpu);
1257                 if (!policy->cur) {
1258                         pr_err("%s: ->get() failed\n", __func__);
1259                         goto out_exit_policy;
1260                 }
1261         }
1262
1263         /*
1264          * Sometimes boot loaders set CPU frequency to a value outside of
1265          * frequency table present with cpufreq core. In such cases CPU might be
1266          * unstable if it has to run on that frequency for long duration of time
1267          * and so its better to set it to a frequency which is specified in
1268          * freq-table. This also makes cpufreq stats inconsistent as
1269          * cpufreq-stats would fail to register because current frequency of CPU
1270          * isn't found in freq-table.
1271          *
1272          * Because we don't want this change to effect boot process badly, we go
1273          * for the next freq which is >= policy->cur ('cur' must be set by now,
1274          * otherwise we will end up setting freq to lowest of the table as 'cur'
1275          * is initialized to zero).
1276          *
1277          * We are passing target-freq as "policy->cur - 1" otherwise
1278          * __cpufreq_driver_target() would simply fail, as policy->cur will be
1279          * equal to target-freq.
1280          */
1281         if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1282             && has_target()) {
1283                 /* Are we running at unknown frequency ? */
1284                 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1285                 if (ret == -EINVAL) {
1286                         /* Warn user and fix it */
1287                         pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1288                                 __func__, policy->cpu, policy->cur);
1289                         ret = __cpufreq_driver_target(policy, policy->cur - 1,
1290                                 CPUFREQ_RELATION_L);
1291
1292                         /*
1293                          * Reaching here after boot in a few seconds may not
1294                          * mean that system will remain stable at "unknown"
1295                          * frequency for longer duration. Hence, a BUG_ON().
1296                          */
1297                         BUG_ON(ret);
1298                         pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1299                                 __func__, policy->cpu, policy->cur);
1300                 }
1301         }
1302
1303         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1304                                      CPUFREQ_START, policy);
1305
1306         if (new_policy) {
1307                 ret = cpufreq_add_dev_interface(policy);
1308                 if (ret)
1309                         goto out_exit_policy;
1310                 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1311                                 CPUFREQ_CREATE_POLICY, policy);
1312
1313                 write_lock_irqsave(&cpufreq_driver_lock, flags);
1314                 list_add(&policy->policy_list, &cpufreq_policy_list);
1315                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1316         }
1317
1318         ret = cpufreq_init_policy(policy);
1319         if (ret) {
1320                 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1321                        __func__, cpu, ret);
1322                 /* cpufreq_policy_free() will notify based on this */
1323                 new_policy = false;
1324                 goto out_exit_policy;
1325         }
1326
1327         up_write(&policy->rwsem);
1328
1329         kobject_uevent(&policy->kobj, KOBJ_ADD);
1330
1331         /* Callback for handling stuff after policy is ready */
1332         if (cpufreq_driver->ready)
1333                 cpufreq_driver->ready(policy);
1334
1335         pr_debug("initialization complete\n");
1336
1337         return 0;
1338
1339 out_exit_policy:
1340         up_write(&policy->rwsem);
1341
1342         if (cpufreq_driver->exit)
1343                 cpufreq_driver->exit(policy);
1344 out_free_policy:
1345         cpufreq_policy_free(policy, !new_policy);
1346         return ret;
1347 }
1348
1349 /**
1350  * cpufreq_add_dev - the cpufreq interface for a CPU device.
1351  * @dev: CPU device.
1352  * @sif: Subsystem interface structure pointer (not used)
1353  */
1354 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1355 {
1356         unsigned cpu = dev->id;
1357         int ret;
1358
1359         dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1360
1361         if (cpu_online(cpu)) {
1362                 ret = cpufreq_online(cpu);
1363         } else {
1364                 /*
1365                  * A hotplug notifier will follow and we will handle it as CPU
1366                  * online then.  For now, just create the sysfs link, unless
1367                  * there is no policy or the link is already present.
1368                  */
1369                 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1370
1371                 ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
1372                         ? add_cpu_dev_symlink(policy, cpu) : 0;
1373         }
1374
1375         return ret;
1376 }
1377
1378 static void cpufreq_offline(unsigned int cpu)
1379 {
1380         struct cpufreq_policy *policy;
1381         int ret;
1382
1383         pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1384
1385         policy = cpufreq_cpu_get_raw(cpu);
1386         if (!policy) {
1387                 pr_debug("%s: No cpu_data found\n", __func__);
1388                 return;
1389         }
1390
1391         down_write(&policy->rwsem);
1392         if (has_target()) {
1393                 ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1394                 if (ret)
1395                         pr_err("%s: Failed to stop governor\n", __func__);
1396         }
1397
1398         cpumask_clear_cpu(cpu, policy->cpus);
1399
1400         if (policy_is_inactive(policy)) {
1401                 if (has_target())
1402                         strncpy(policy->last_governor, policy->governor->name,
1403                                 CPUFREQ_NAME_LEN);
1404                 else
1405                         policy->last_policy = policy->policy;
1406         } else if (cpu == policy->cpu) {
1407                 /* Nominate new CPU */
1408                 policy->cpu = cpumask_any(policy->cpus);
1409         }
1410
1411         /* Start governor again for active policy */
1412         if (!policy_is_inactive(policy)) {
1413                 if (has_target()) {
1414                         ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
1415                         if (!ret)
1416                                 ret = cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1417
1418                         if (ret)
1419                                 pr_err("%s: Failed to start governor\n", __func__);
1420                 }
1421
1422                 goto unlock;
1423         }
1424
1425         if (cpufreq_driver->stop_cpu)
1426                 cpufreq_driver->stop_cpu(policy);
1427
1428         /* If cpu is last user of policy, free policy */
1429         if (has_target()) {
1430                 ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1431                 if (ret)
1432                         pr_err("%s: Failed to exit governor\n", __func__);
1433         }
1434
1435         /*
1436          * Perform the ->exit() even during light-weight tear-down,
1437          * since this is a core component, and is essential for the
1438          * subsequent light-weight ->init() to succeed.
1439          */
1440         if (cpufreq_driver->exit) {
1441                 cpufreq_driver->exit(policy);
1442                 policy->freq_table = NULL;
1443         }
1444
1445 unlock:
1446         up_write(&policy->rwsem);
1447 }
1448
1449 /**
1450  * cpufreq_remove_dev - remove a CPU device
1451  *
1452  * Removes the cpufreq interface for a CPU device.
1453  */
1454 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1455 {
1456         unsigned int cpu = dev->id;
1457         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1458
1459         if (!policy)
1460                 return;
1461
1462         if (cpu_online(cpu))
1463                 cpufreq_offline(cpu);
1464
1465         cpumask_clear_cpu(cpu, policy->real_cpus);
1466         remove_cpu_dev_symlink(policy, cpu);
1467
1468         if (cpumask_empty(policy->real_cpus))
1469                 cpufreq_policy_free(policy, true);
1470 }
1471
1472 /**
1473  *      cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1474  *      in deep trouble.
1475  *      @policy: policy managing CPUs
1476  *      @new_freq: CPU frequency the CPU actually runs at
1477  *
1478  *      We adjust to current frequency first, and need to clean up later.
1479  *      So either call to cpufreq_update_policy() or schedule handle_update()).
1480  */
1481 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1482                                 unsigned int new_freq)
1483 {
1484         struct cpufreq_freqs freqs;
1485
1486         pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1487                  policy->cur, new_freq);
1488
1489         freqs.old = policy->cur;
1490         freqs.new = new_freq;
1491
1492         cpufreq_freq_transition_begin(policy, &freqs);
1493         cpufreq_freq_transition_end(policy, &freqs, 0);
1494 }
1495
1496 /**
1497  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1498  * @cpu: CPU number
1499  *
1500  * This is the last known freq, without actually getting it from the driver.
1501  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1502  */
1503 unsigned int cpufreq_quick_get(unsigned int cpu)
1504 {
1505         struct cpufreq_policy *policy;
1506         unsigned int ret_freq = 0;
1507
1508         if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1509                 return cpufreq_driver->get(cpu);
1510
1511         policy = cpufreq_cpu_get(cpu);
1512         if (policy) {
1513                 ret_freq = policy->cur;
1514                 cpufreq_cpu_put(policy);
1515         }
1516
1517         return ret_freq;
1518 }
1519 EXPORT_SYMBOL(cpufreq_quick_get);
1520
1521 /**
1522  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1523  * @cpu: CPU number
1524  *
1525  * Just return the max possible frequency for a given CPU.
1526  */
1527 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1528 {
1529         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1530         unsigned int ret_freq = 0;
1531
1532         if (policy) {
1533                 ret_freq = policy->max;
1534                 cpufreq_cpu_put(policy);
1535         }
1536
1537         return ret_freq;
1538 }
1539 EXPORT_SYMBOL(cpufreq_quick_get_max);
1540
1541 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1542 {
1543         unsigned int ret_freq = 0;
1544
1545         if (!cpufreq_driver->get)
1546                 return ret_freq;
1547
1548         ret_freq = cpufreq_driver->get(policy->cpu);
1549
1550         /* Updating inactive policies is invalid, so avoid doing that. */
1551         if (unlikely(policy_is_inactive(policy)))
1552                 return ret_freq;
1553
1554         if (ret_freq && policy->cur &&
1555                 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1556                 /* verify no discrepancy between actual and
1557                                         saved value exists */
1558                 if (unlikely(ret_freq != policy->cur)) {
1559                         cpufreq_out_of_sync(policy, ret_freq);
1560                         schedule_work(&policy->update);
1561                 }
1562         }
1563
1564         return ret_freq;
1565 }
1566
1567 /**
1568  * cpufreq_get - get the current CPU frequency (in kHz)
1569  * @cpu: CPU number
1570  *
1571  * Get the CPU current (static) CPU frequency
1572  */
1573 unsigned int cpufreq_get(unsigned int cpu)
1574 {
1575         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1576         unsigned int ret_freq = 0;
1577
1578         if (policy) {
1579                 down_read(&policy->rwsem);
1580                 ret_freq = __cpufreq_get(policy);
1581                 up_read(&policy->rwsem);
1582
1583                 cpufreq_cpu_put(policy);
1584         }
1585
1586         return ret_freq;
1587 }
1588 EXPORT_SYMBOL(cpufreq_get);
1589
1590 static struct subsys_interface cpufreq_interface = {
1591         .name           = "cpufreq",
1592         .subsys         = &cpu_subsys,
1593         .add_dev        = cpufreq_add_dev,
1594         .remove_dev     = cpufreq_remove_dev,
1595 };
1596
1597 /*
1598  * In case platform wants some specific frequency to be configured
1599  * during suspend..
1600  */
1601 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1602 {
1603         int ret;
1604
1605         if (!policy->suspend_freq) {
1606                 pr_debug("%s: suspend_freq not defined\n", __func__);
1607                 return 0;
1608         }
1609
1610         pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1611                         policy->suspend_freq);
1612
1613         ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1614                         CPUFREQ_RELATION_H);
1615         if (ret)
1616                 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1617                                 __func__, policy->suspend_freq, ret);
1618
1619         return ret;
1620 }
1621 EXPORT_SYMBOL(cpufreq_generic_suspend);
1622
1623 /**
1624  * cpufreq_suspend() - Suspend CPUFreq governors
1625  *
1626  * Called during system wide Suspend/Hibernate cycles for suspending governors
1627  * as some platforms can't change frequency after this point in suspend cycle.
1628  * Because some of the devices (like: i2c, regulators, etc) they use for
1629  * changing frequency are suspended quickly after this point.
1630  */
1631 void cpufreq_suspend(void)
1632 {
1633         struct cpufreq_policy *policy;
1634         int ret;
1635
1636         if (!cpufreq_driver)
1637                 return;
1638
1639         if (!has_target())
1640                 goto suspend;
1641
1642         pr_debug("%s: Suspending Governors\n", __func__);
1643
1644         for_each_active_policy(policy) {
1645                 down_write(&policy->rwsem);
1646                 ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1647                 up_write(&policy->rwsem);
1648
1649                 if (ret)
1650                         pr_err("%s: Failed to stop governor for policy: %p\n",
1651                                 __func__, policy);
1652                 else if (cpufreq_driver->suspend
1653                     && cpufreq_driver->suspend(policy))
1654                         pr_err("%s: Failed to suspend driver: %p\n", __func__,
1655                                 policy);
1656         }
1657
1658 suspend:
1659         cpufreq_suspended = true;
1660 }
1661
1662 /**
1663  * cpufreq_resume() - Resume CPUFreq governors
1664  *
1665  * Called during system wide Suspend/Hibernate cycle for resuming governors that
1666  * are suspended with cpufreq_suspend().
1667  */
1668 void cpufreq_resume(void)
1669 {
1670         struct cpufreq_policy *policy;
1671         int ret;
1672
1673         if (!cpufreq_driver)
1674                 return;
1675
1676         cpufreq_suspended = false;
1677
1678         if (!has_target())
1679                 return;
1680
1681         pr_debug("%s: Resuming Governors\n", __func__);
1682
1683         for_each_active_policy(policy) {
1684                 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1685                         pr_err("%s: Failed to resume driver: %p\n", __func__,
1686                                 policy);
1687                 } else {
1688                         down_write(&policy->rwsem);
1689                         ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
1690                         if (!ret)
1691                                 cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1692                         up_write(&policy->rwsem);
1693
1694                         if (ret)
1695                                 pr_err("%s: Failed to start governor for policy: %p\n",
1696                                        __func__, policy);
1697                 }
1698         }
1699
1700         /*
1701          * schedule call cpufreq_update_policy() for first-online CPU, as that
1702          * wouldn't be hotplugged-out on suspend. It will verify that the
1703          * current freq is in sync with what we believe it to be.
1704          */
1705         policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1706         if (WARN_ON(!policy))
1707                 return;
1708
1709         schedule_work(&policy->update);
1710 }
1711
1712 /**
1713  *      cpufreq_get_current_driver - return current driver's name
1714  *
1715  *      Return the name string of the currently loaded cpufreq driver
1716  *      or NULL, if none.
1717  */
1718 const char *cpufreq_get_current_driver(void)
1719 {
1720         if (cpufreq_driver)
1721                 return cpufreq_driver->name;
1722
1723         return NULL;
1724 }
1725 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1726
1727 /**
1728  *      cpufreq_get_driver_data - return current driver data
1729  *
1730  *      Return the private data of the currently loaded cpufreq
1731  *      driver, or NULL if no cpufreq driver is loaded.
1732  */
1733 void *cpufreq_get_driver_data(void)
1734 {
1735         if (cpufreq_driver)
1736                 return cpufreq_driver->driver_data;
1737
1738         return NULL;
1739 }
1740 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1741
1742 /*********************************************************************
1743  *                     NOTIFIER LISTS INTERFACE                      *
1744  *********************************************************************/
1745
1746 /**
1747  *      cpufreq_register_notifier - register a driver with cpufreq
1748  *      @nb: notifier function to register
1749  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1750  *
1751  *      Add a driver to one of two lists: either a list of drivers that
1752  *      are notified about clock rate changes (once before and once after
1753  *      the transition), or a list of drivers that are notified about
1754  *      changes in cpufreq policy.
1755  *
1756  *      This function may sleep, and has the same return conditions as
1757  *      blocking_notifier_chain_register.
1758  */
1759 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1760 {
1761         int ret;
1762
1763         if (cpufreq_disabled())
1764                 return -EINVAL;
1765
1766         WARN_ON(!init_cpufreq_transition_notifier_list_called);
1767
1768         switch (list) {
1769         case CPUFREQ_TRANSITION_NOTIFIER:
1770                 ret = srcu_notifier_chain_register(
1771                                 &cpufreq_transition_notifier_list, nb);
1772                 break;
1773         case CPUFREQ_POLICY_NOTIFIER:
1774                 ret = blocking_notifier_chain_register(
1775                                 &cpufreq_policy_notifier_list, nb);
1776                 break;
1777         default:
1778                 ret = -EINVAL;
1779         }
1780
1781         return ret;
1782 }
1783 EXPORT_SYMBOL(cpufreq_register_notifier);
1784
1785 /**
1786  *      cpufreq_unregister_notifier - unregister a driver with cpufreq
1787  *      @nb: notifier block to be unregistered
1788  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1789  *
1790  *      Remove a driver from the CPU frequency notifier list.
1791  *
1792  *      This function may sleep, and has the same return conditions as
1793  *      blocking_notifier_chain_unregister.
1794  */
1795 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1796 {
1797         int ret;
1798
1799         if (cpufreq_disabled())
1800                 return -EINVAL;
1801
1802         switch (list) {
1803         case CPUFREQ_TRANSITION_NOTIFIER:
1804                 ret = srcu_notifier_chain_unregister(
1805                                 &cpufreq_transition_notifier_list, nb);
1806                 break;
1807         case CPUFREQ_POLICY_NOTIFIER:
1808                 ret = blocking_notifier_chain_unregister(
1809                                 &cpufreq_policy_notifier_list, nb);
1810                 break;
1811         default:
1812                 ret = -EINVAL;
1813         }
1814
1815         return ret;
1816 }
1817 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1818
1819
1820 /*********************************************************************
1821  *                              GOVERNORS                            *
1822  *********************************************************************/
1823
1824 /* Must set freqs->new to intermediate frequency */
1825 static int __target_intermediate(struct cpufreq_policy *policy,
1826                                  struct cpufreq_freqs *freqs, int index)
1827 {
1828         int ret;
1829
1830         freqs->new = cpufreq_driver->get_intermediate(policy, index);
1831
1832         /* We don't need to switch to intermediate freq */
1833         if (!freqs->new)
1834                 return 0;
1835
1836         pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1837                  __func__, policy->cpu, freqs->old, freqs->new);
1838
1839         cpufreq_freq_transition_begin(policy, freqs);
1840         ret = cpufreq_driver->target_intermediate(policy, index);
1841         cpufreq_freq_transition_end(policy, freqs, ret);
1842
1843         if (ret)
1844                 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1845                        __func__, ret);
1846
1847         return ret;
1848 }
1849
1850 static int __target_index(struct cpufreq_policy *policy,
1851                           struct cpufreq_frequency_table *freq_table, int index)
1852 {
1853         struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1854         unsigned int intermediate_freq = 0;
1855         int retval = -EINVAL;
1856         bool notify;
1857
1858         notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1859         if (notify) {
1860                 /* Handle switching to intermediate frequency */
1861                 if (cpufreq_driver->get_intermediate) {
1862                         retval = __target_intermediate(policy, &freqs, index);
1863                         if (retval)
1864                                 return retval;
1865
1866                         intermediate_freq = freqs.new;
1867                         /* Set old freq to intermediate */
1868                         if (intermediate_freq)
1869                                 freqs.old = freqs.new;
1870                 }
1871
1872                 freqs.new = freq_table[index].frequency;
1873                 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1874                          __func__, policy->cpu, freqs.old, freqs.new);
1875
1876                 cpufreq_freq_transition_begin(policy, &freqs);
1877         }
1878
1879         retval = cpufreq_driver->target_index(policy, index);
1880         if (retval)
1881                 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1882                        retval);
1883
1884         if (notify) {
1885                 cpufreq_freq_transition_end(policy, &freqs, retval);
1886
1887                 /*
1888                  * Failed after setting to intermediate freq? Driver should have
1889                  * reverted back to initial frequency and so should we. Check
1890                  * here for intermediate_freq instead of get_intermediate, in
1891                  * case we haven't switched to intermediate freq at all.
1892                  */
1893                 if (unlikely(retval && intermediate_freq)) {
1894                         freqs.old = intermediate_freq;
1895                         freqs.new = policy->restore_freq;
1896                         cpufreq_freq_transition_begin(policy, &freqs);
1897                         cpufreq_freq_transition_end(policy, &freqs, 0);
1898                 }
1899         }
1900
1901         return retval;
1902 }
1903
1904 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1905                             unsigned int target_freq,
1906                             unsigned int relation)
1907 {
1908         unsigned int old_target_freq = target_freq;
1909         int retval = -EINVAL;
1910
1911         if (cpufreq_disabled())
1912                 return -ENODEV;
1913
1914         /* Make sure that target_freq is within supported range */
1915         if (target_freq > policy->max)
1916                 target_freq = policy->max;
1917         if (target_freq < policy->min)
1918                 target_freq = policy->min;
1919
1920         pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1921                  policy->cpu, target_freq, relation, old_target_freq);
1922
1923         /*
1924          * This might look like a redundant call as we are checking it again
1925          * after finding index. But it is left intentionally for cases where
1926          * exactly same freq is called again and so we can save on few function
1927          * calls.
1928          */
1929         if (target_freq == policy->cur)
1930                 return 0;
1931
1932         /* Save last value to restore later on errors */
1933         policy->restore_freq = policy->cur;
1934
1935         if (cpufreq_driver->target)
1936                 retval = cpufreq_driver->target(policy, target_freq, relation);
1937         else if (cpufreq_driver->target_index) {
1938                 struct cpufreq_frequency_table *freq_table;
1939                 int index;
1940
1941                 freq_table = cpufreq_frequency_get_table(policy->cpu);
1942                 if (unlikely(!freq_table)) {
1943                         pr_err("%s: Unable to find freq_table\n", __func__);
1944                         goto out;
1945                 }
1946
1947                 retval = cpufreq_frequency_table_target(policy, freq_table,
1948                                 target_freq, relation, &index);
1949                 if (unlikely(retval)) {
1950                         pr_err("%s: Unable to find matching freq\n", __func__);
1951                         goto out;
1952                 }
1953
1954                 if (freq_table[index].frequency == policy->cur) {
1955                         retval = 0;
1956                         goto out;
1957                 }
1958
1959                 retval = __target_index(policy, freq_table, index);
1960         }
1961
1962 out:
1963         return retval;
1964 }
1965 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1966
1967 int cpufreq_driver_target(struct cpufreq_policy *policy,
1968                           unsigned int target_freq,
1969                           unsigned int relation)
1970 {
1971         int ret = -EINVAL;
1972
1973         down_write(&policy->rwsem);
1974
1975         ret = __cpufreq_driver_target(policy, target_freq, relation);
1976
1977         up_write(&policy->rwsem);
1978
1979         return ret;
1980 }
1981 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1982
1983 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
1984 {
1985         return NULL;
1986 }
1987
1988 static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
1989 {
1990         int ret;
1991
1992         /* Don't start any governor operations if we are entering suspend */
1993         if (cpufreq_suspended)
1994                 return 0;
1995         /*
1996          * Governor might not be initiated here if ACPI _PPC changed
1997          * notification happened, so check it.
1998          */
1999         if (!policy->governor)
2000                 return -EINVAL;
2001
2002         if (policy->governor->max_transition_latency &&
2003             policy->cpuinfo.transition_latency >
2004             policy->governor->max_transition_latency) {
2005                 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2006
2007                 if (gov) {
2008                         pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2009                                 policy->governor->name, gov->name);
2010                         policy->governor = gov;
2011                 } else {
2012                         return -EINVAL;
2013                 }
2014         }
2015
2016         if (event == CPUFREQ_GOV_POLICY_INIT)
2017                 if (!try_module_get(policy->governor->owner))
2018                         return -EINVAL;
2019
2020         pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
2021
2022         ret = policy->governor->governor(policy, event);
2023
2024         if (!ret) {
2025                 if (event == CPUFREQ_GOV_POLICY_INIT)
2026                         policy->governor->initialized++;
2027                 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2028                         policy->governor->initialized--;
2029         }
2030
2031         if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2032                         ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
2033                 module_put(policy->governor->owner);
2034
2035         return ret;
2036 }
2037
2038 int cpufreq_register_governor(struct cpufreq_governor *governor)
2039 {
2040         int err;
2041
2042         if (!governor)
2043                 return -EINVAL;
2044
2045         if (cpufreq_disabled())
2046                 return -ENODEV;
2047
2048         mutex_lock(&cpufreq_governor_mutex);
2049
2050         governor->initialized = 0;
2051         err = -EBUSY;
2052         if (!find_governor(governor->name)) {
2053                 err = 0;
2054                 list_add(&governor->governor_list, &cpufreq_governor_list);
2055         }
2056
2057         mutex_unlock(&cpufreq_governor_mutex);
2058         return err;
2059 }
2060 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2061
2062 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2063 {
2064         struct cpufreq_policy *policy;
2065         unsigned long flags;
2066
2067         if (!governor)
2068                 return;
2069
2070         if (cpufreq_disabled())
2071                 return;
2072
2073         /* clear last_governor for all inactive policies */
2074         read_lock_irqsave(&cpufreq_driver_lock, flags);
2075         for_each_inactive_policy(policy) {
2076                 if (!strcmp(policy->last_governor, governor->name)) {
2077                         policy->governor = NULL;
2078                         strcpy(policy->last_governor, "\0");
2079                 }
2080         }
2081         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2082
2083         mutex_lock(&cpufreq_governor_mutex);
2084         list_del(&governor->governor_list);
2085         mutex_unlock(&cpufreq_governor_mutex);
2086         return;
2087 }
2088 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2089
2090
2091 /*********************************************************************
2092  *                          POLICY INTERFACE                         *
2093  *********************************************************************/
2094
2095 /**
2096  * cpufreq_get_policy - get the current cpufreq_policy
2097  * @policy: struct cpufreq_policy into which the current cpufreq_policy
2098  *      is written
2099  *
2100  * Reads the current cpufreq policy.
2101  */
2102 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2103 {
2104         struct cpufreq_policy *cpu_policy;
2105         if (!policy)
2106                 return -EINVAL;
2107
2108         cpu_policy = cpufreq_cpu_get(cpu);
2109         if (!cpu_policy)
2110                 return -EINVAL;
2111
2112         memcpy(policy, cpu_policy, sizeof(*policy));
2113
2114         cpufreq_cpu_put(cpu_policy);
2115         return 0;
2116 }
2117 EXPORT_SYMBOL(cpufreq_get_policy);
2118
2119 /*
2120  * policy : current policy.
2121  * new_policy: policy to be set.
2122  */
2123 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2124                                 struct cpufreq_policy *new_policy)
2125 {
2126         struct cpufreq_governor *old_gov;
2127         int ret;
2128
2129         pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2130                  new_policy->cpu, new_policy->min, new_policy->max);
2131
2132         memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2133
2134         /*
2135         * This check works well when we store new min/max freq attributes,
2136         * because new_policy is a copy of policy with one field updated.
2137         */
2138         if (new_policy->min > new_policy->max)
2139                 return -EINVAL;
2140
2141         /* verify the cpu speed can be set within this limit */
2142         ret = cpufreq_driver->verify(new_policy);
2143         if (ret)
2144                 return ret;
2145
2146         /* adjust if necessary - all reasons */
2147         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2148                         CPUFREQ_ADJUST, new_policy);
2149
2150         /*
2151          * verify the cpu speed can be set within this limit, which might be
2152          * different to the first one
2153          */
2154         ret = cpufreq_driver->verify(new_policy);
2155         if (ret)
2156                 return ret;
2157
2158         /* notification of the new policy */
2159         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2160                         CPUFREQ_NOTIFY, new_policy);
2161
2162         policy->min = new_policy->min;
2163         policy->max = new_policy->max;
2164
2165         pr_debug("new min and max freqs are %u - %u kHz\n",
2166                  policy->min, policy->max);
2167
2168         if (cpufreq_driver->setpolicy) {
2169                 policy->policy = new_policy->policy;
2170                 pr_debug("setting range\n");
2171                 return cpufreq_driver->setpolicy(new_policy);
2172         }
2173
2174         if (new_policy->governor == policy->governor)
2175                 goto out;
2176
2177         pr_debug("governor switch\n");
2178
2179         /* save old, working values */
2180         old_gov = policy->governor;
2181         /* end old governor */
2182         if (old_gov) {
2183                 ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2184                 if (ret) {
2185                         /* This can happen due to race with other operations */
2186                         pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
2187                                  __func__, old_gov->name, ret);
2188                         return ret;
2189                 }
2190
2191                 ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2192                 if (ret) {
2193                         pr_err("%s: Failed to Exit Governor: %s (%d)\n",
2194                                __func__, old_gov->name, ret);
2195                         return ret;
2196                 }
2197         }
2198
2199         /* start new governor */
2200         policy->governor = new_policy->governor;
2201         ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2202         if (!ret) {
2203                 ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
2204                 if (!ret)
2205                         goto out;
2206
2207                 cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2208         }
2209
2210         /* new governor failed, so re-start old one */
2211         pr_debug("starting governor %s failed\n", policy->governor->name);
2212         if (old_gov) {
2213                 policy->governor = old_gov;
2214                 if (cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
2215                         policy->governor = NULL;
2216                 else
2217                         cpufreq_governor(policy, CPUFREQ_GOV_START);
2218         }
2219
2220         return ret;
2221
2222  out:
2223         pr_debug("governor: change or update limits\n");
2224         return cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2225 }
2226
2227 /**
2228  *      cpufreq_update_policy - re-evaluate an existing cpufreq policy
2229  *      @cpu: CPU which shall be re-evaluated
2230  *
2231  *      Useful for policy notifiers which have different necessities
2232  *      at different times.
2233  */
2234 int cpufreq_update_policy(unsigned int cpu)
2235 {
2236         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2237         struct cpufreq_policy new_policy;
2238         int ret;
2239
2240         if (!policy)
2241                 return -ENODEV;
2242
2243         down_write(&policy->rwsem);
2244
2245         pr_debug("updating policy for CPU %u\n", cpu);
2246         memcpy(&new_policy, policy, sizeof(*policy));
2247         new_policy.min = policy->user_policy.min;
2248         new_policy.max = policy->user_policy.max;
2249
2250         /*
2251          * BIOS might change freq behind our back
2252          * -> ask driver for current freq and notify governors about a change
2253          */
2254         if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2255                 new_policy.cur = cpufreq_driver->get(cpu);
2256                 if (WARN_ON(!new_policy.cur)) {
2257                         ret = -EIO;
2258                         goto unlock;
2259                 }
2260
2261                 if (!policy->cur) {
2262                         pr_debug("Driver did not initialize current freq\n");
2263                         policy->cur = new_policy.cur;
2264                 } else {
2265                         if (policy->cur != new_policy.cur && has_target())
2266                                 cpufreq_out_of_sync(policy, new_policy.cur);
2267                 }
2268         }
2269
2270         ret = cpufreq_set_policy(policy, &new_policy);
2271
2272 unlock:
2273         up_write(&policy->rwsem);
2274
2275         cpufreq_cpu_put(policy);
2276         return ret;
2277 }
2278 EXPORT_SYMBOL(cpufreq_update_policy);
2279
2280 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2281                                         unsigned long action, void *hcpu)
2282 {
2283         unsigned int cpu = (unsigned long)hcpu;
2284
2285         switch (action & ~CPU_TASKS_FROZEN) {
2286         case CPU_ONLINE:
2287                 cpufreq_online(cpu);
2288                 break;
2289
2290         case CPU_DOWN_PREPARE:
2291                 cpufreq_offline(cpu);
2292                 break;
2293
2294         case CPU_DOWN_FAILED:
2295                 cpufreq_online(cpu);
2296                 break;
2297         }
2298         return NOTIFY_OK;
2299 }
2300
2301 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2302         .notifier_call = cpufreq_cpu_callback,
2303 };
2304
2305 /*********************************************************************
2306  *               BOOST                                               *
2307  *********************************************************************/
2308 static int cpufreq_boost_set_sw(int state)
2309 {
2310         struct cpufreq_frequency_table *freq_table;
2311         struct cpufreq_policy *policy;
2312         int ret = -EINVAL;
2313
2314         for_each_active_policy(policy) {
2315                 freq_table = cpufreq_frequency_get_table(policy->cpu);
2316                 if (freq_table) {
2317                         ret = cpufreq_frequency_table_cpuinfo(policy,
2318                                                         freq_table);
2319                         if (ret) {
2320                                 pr_err("%s: Policy frequency update failed\n",
2321                                        __func__);
2322                                 break;
2323                         }
2324
2325                         down_write(&policy->rwsem);
2326                         policy->user_policy.max = policy->max;
2327                         cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2328                         up_write(&policy->rwsem);
2329                 }
2330         }
2331
2332         return ret;
2333 }
2334
2335 int cpufreq_boost_trigger_state(int state)
2336 {
2337         unsigned long flags;
2338         int ret = 0;
2339
2340         if (cpufreq_driver->boost_enabled == state)
2341                 return 0;
2342
2343         write_lock_irqsave(&cpufreq_driver_lock, flags);
2344         cpufreq_driver->boost_enabled = state;
2345         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2346
2347         ret = cpufreq_driver->set_boost(state);
2348         if (ret) {
2349                 write_lock_irqsave(&cpufreq_driver_lock, flags);
2350                 cpufreq_driver->boost_enabled = !state;
2351                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2352
2353                 pr_err("%s: Cannot %s BOOST\n",
2354                        __func__, state ? "enable" : "disable");
2355         }
2356
2357         return ret;
2358 }
2359
2360 static bool cpufreq_boost_supported(void)
2361 {
2362         return likely(cpufreq_driver) && cpufreq_driver->set_boost;
2363 }
2364
2365 static int create_boost_sysfs_file(void)
2366 {
2367         int ret;
2368
2369         ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2370         if (ret)
2371                 pr_err("%s: cannot register global BOOST sysfs file\n",
2372                        __func__);
2373
2374         return ret;
2375 }
2376
2377 static void remove_boost_sysfs_file(void)
2378 {
2379         if (cpufreq_boost_supported())
2380                 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2381 }
2382
2383 int cpufreq_enable_boost_support(void)
2384 {
2385         if (!cpufreq_driver)
2386                 return -EINVAL;
2387
2388         if (cpufreq_boost_supported())
2389                 return 0;
2390
2391         cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2392
2393         /* This will get removed on driver unregister */
2394         return create_boost_sysfs_file();
2395 }
2396 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2397
2398 int cpufreq_boost_enabled(void)
2399 {
2400         return cpufreq_driver->boost_enabled;
2401 }
2402 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2403
2404 /*********************************************************************
2405  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
2406  *********************************************************************/
2407
2408 /**
2409  * cpufreq_register_driver - register a CPU Frequency driver
2410  * @driver_data: A struct cpufreq_driver containing the values#
2411  * submitted by the CPU Frequency driver.
2412  *
2413  * Registers a CPU Frequency driver to this core code. This code
2414  * returns zero on success, -EBUSY when another driver got here first
2415  * (and isn't unregistered in the meantime).
2416  *
2417  */
2418 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2419 {
2420         unsigned long flags;
2421         int ret;
2422
2423         if (cpufreq_disabled())
2424                 return -ENODEV;
2425
2426         if (!driver_data || !driver_data->verify || !driver_data->init ||
2427             !(driver_data->setpolicy || driver_data->target_index ||
2428                     driver_data->target) ||
2429              (driver_data->setpolicy && (driver_data->target_index ||
2430                     driver_data->target)) ||
2431              (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2432                 return -EINVAL;
2433
2434         pr_debug("trying to register driver %s\n", driver_data->name);
2435
2436         /* Protect against concurrent CPU online/offline. */
2437         get_online_cpus();
2438
2439         write_lock_irqsave(&cpufreq_driver_lock, flags);
2440         if (cpufreq_driver) {
2441                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2442                 ret = -EEXIST;
2443                 goto out;
2444         }
2445         cpufreq_driver = driver_data;
2446         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2447
2448         if (driver_data->setpolicy)
2449                 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2450
2451         if (cpufreq_boost_supported()) {
2452                 ret = create_boost_sysfs_file();
2453                 if (ret)
2454                         goto err_null_driver;
2455         }
2456
2457         ret = subsys_interface_register(&cpufreq_interface);
2458         if (ret)
2459                 goto err_boost_unreg;
2460
2461         if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2462             list_empty(&cpufreq_policy_list)) {
2463                 /* if all ->init() calls failed, unregister */
2464                 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2465                          driver_data->name);
2466                 goto err_if_unreg;
2467         }
2468
2469         register_hotcpu_notifier(&cpufreq_cpu_notifier);
2470         pr_debug("driver %s up and running\n", driver_data->name);
2471
2472 out:
2473         put_online_cpus();
2474         return ret;
2475
2476 err_if_unreg:
2477         subsys_interface_unregister(&cpufreq_interface);
2478 err_boost_unreg:
2479         remove_boost_sysfs_file();
2480 err_null_driver:
2481         write_lock_irqsave(&cpufreq_driver_lock, flags);
2482         cpufreq_driver = NULL;
2483         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2484         goto out;
2485 }
2486 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2487
2488 /**
2489  * cpufreq_unregister_driver - unregister the current CPUFreq driver
2490  *
2491  * Unregister the current CPUFreq driver. Only call this if you have
2492  * the right to do so, i.e. if you have succeeded in initialising before!
2493  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2494  * currently not initialised.
2495  */
2496 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2497 {
2498         unsigned long flags;
2499
2500         if (!cpufreq_driver || (driver != cpufreq_driver))
2501                 return -EINVAL;
2502
2503         pr_debug("unregistering driver %s\n", driver->name);
2504
2505         /* Protect against concurrent cpu hotplug */
2506         get_online_cpus();
2507         subsys_interface_unregister(&cpufreq_interface);
2508         remove_boost_sysfs_file();
2509         unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2510
2511         write_lock_irqsave(&cpufreq_driver_lock, flags);
2512
2513         cpufreq_driver = NULL;
2514
2515         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2516         put_online_cpus();
2517
2518         return 0;
2519 }
2520 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2521
2522 /*
2523  * Stop cpufreq at shutdown to make sure it isn't holding any locks
2524  * or mutexes when secondary CPUs are halted.
2525  */
2526 static struct syscore_ops cpufreq_syscore_ops = {
2527         .shutdown = cpufreq_suspend,
2528 };
2529
2530 struct kobject *cpufreq_global_kobject;
2531 EXPORT_SYMBOL(cpufreq_global_kobject);
2532
2533 static int __init cpufreq_core_init(void)
2534 {
2535         if (cpufreq_disabled())
2536                 return -ENODEV;
2537
2538         cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2539         BUG_ON(!cpufreq_global_kobject);
2540
2541         register_syscore_ops(&cpufreq_syscore_ops);
2542
2543         return 0;
2544 }
2545 core_initcall(cpufreq_core_init);