2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
35 * The "cpufreq driver" - the arch- or hardware-dependent low
36 * level driver of CPUFreq support, and its spinlock. This lock
37 * also protects the cpufreq_cpu_data array.
39 static struct cpufreq_driver *cpufreq_driver;
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
41 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
42 static DEFINE_RWLOCK(cpufreq_driver_lock);
43 DEFINE_MUTEX(cpufreq_governor_lock);
44 static LIST_HEAD(cpufreq_policy_list);
46 /* This one keeps track of the previously set governor of a removed CPU */
47 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
49 /* Flag to suspend/resume CPUFreq governors */
50 static bool cpufreq_suspended;
52 static inline bool has_target(void)
54 return cpufreq_driver->target_index || cpufreq_driver->target;
58 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
61 static DECLARE_RWSEM(cpufreq_rwsem);
63 /* internal prototypes */
64 static int __cpufreq_governor(struct cpufreq_policy *policy,
66 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
67 static void handle_update(struct work_struct *work);
70 * Two notifier lists: the "policy" list is involved in the
71 * validation process for a new CPU frequency policy; the
72 * "transition" list for kernel code that needs to handle
73 * changes to devices when the CPU clock speed changes.
74 * The mutex locks both lists.
76 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
77 static struct srcu_notifier_head cpufreq_transition_notifier_list;
79 static bool init_cpufreq_transition_notifier_list_called;
80 static int __init init_cpufreq_transition_notifier_list(void)
82 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
83 init_cpufreq_transition_notifier_list_called = true;
86 pure_initcall(init_cpufreq_transition_notifier_list);
88 static int off __read_mostly;
89 static int cpufreq_disabled(void)
93 void disable_cpufreq(void)
97 static LIST_HEAD(cpufreq_governor_list);
98 static DEFINE_MUTEX(cpufreq_governor_mutex);
100 bool have_governor_per_policy(void)
102 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
104 EXPORT_SYMBOL_GPL(have_governor_per_policy);
106 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
108 if (have_governor_per_policy())
109 return &policy->kobj;
111 return cpufreq_global_kobject;
113 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
115 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
121 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
123 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
124 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
127 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
128 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
130 idle_time = cur_wall_time - busy_time;
132 *wall = cputime_to_usecs(cur_wall_time);
134 return cputime_to_usecs(idle_time);
137 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
139 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
141 if (idle_time == -1ULL)
142 return get_cpu_idle_time_jiffy(cpu, wall);
144 idle_time += get_cpu_iowait_time_us(cpu, wall);
148 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
151 * This is a generic cpufreq init() routine which can be used by cpufreq
152 * drivers of SMP systems. It will do following:
153 * - validate & show freq table passed
154 * - set policies transition latency
155 * - policy->cpus with all possible CPUs
157 int cpufreq_generic_init(struct cpufreq_policy *policy,
158 struct cpufreq_frequency_table *table,
159 unsigned int transition_latency)
163 ret = cpufreq_table_validate_and_show(policy, table);
165 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
169 policy->cpuinfo.transition_latency = transition_latency;
172 * The driver only supports the SMP configuartion where all processors
173 * share the clock and voltage and clock.
175 cpumask_setall(policy->cpus);
179 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
181 unsigned int cpufreq_generic_get(unsigned int cpu)
183 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
185 if (!policy || IS_ERR(policy->clk)) {
186 pr_err("%s: No %s associated to cpu: %d\n",
187 __func__, policy ? "clk" : "policy", cpu);
191 return clk_get_rate(policy->clk) / 1000;
193 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
195 /* Only for cpufreq core internal use */
196 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
198 return per_cpu(cpufreq_cpu_data, cpu);
201 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
203 struct cpufreq_policy *policy = NULL;
206 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
209 if (!down_read_trylock(&cpufreq_rwsem))
212 /* get the cpufreq driver */
213 read_lock_irqsave(&cpufreq_driver_lock, flags);
215 if (cpufreq_driver) {
217 policy = per_cpu(cpufreq_cpu_data, cpu);
219 kobject_get(&policy->kobj);
222 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
225 up_read(&cpufreq_rwsem);
229 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
231 void cpufreq_cpu_put(struct cpufreq_policy *policy)
233 if (cpufreq_disabled())
236 kobject_put(&policy->kobj);
237 up_read(&cpufreq_rwsem);
239 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
241 /*********************************************************************
242 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
243 *********************************************************************/
246 * adjust_jiffies - adjust the system "loops_per_jiffy"
248 * This function alters the system "loops_per_jiffy" for the clock
249 * speed change. Note that loops_per_jiffy cannot be updated on SMP
250 * systems as each CPU might be scaled differently. So, use the arch
251 * per-CPU loops_per_jiffy value wherever possible.
254 static unsigned long l_p_j_ref;
255 static unsigned int l_p_j_ref_freq;
257 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
259 if (ci->flags & CPUFREQ_CONST_LOOPS)
262 if (!l_p_j_ref_freq) {
263 l_p_j_ref = loops_per_jiffy;
264 l_p_j_ref_freq = ci->old;
265 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
266 l_p_j_ref, l_p_j_ref_freq);
268 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
269 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
271 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
272 loops_per_jiffy, ci->new);
276 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
282 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
283 struct cpufreq_freqs *freqs, unsigned int state)
285 BUG_ON(irqs_disabled());
287 if (cpufreq_disabled())
290 freqs->flags = cpufreq_driver->flags;
291 pr_debug("notification %u of frequency transition to %u kHz\n",
296 case CPUFREQ_PRECHANGE:
297 /* detect if the driver reported a value as "old frequency"
298 * which is not equal to what the cpufreq core thinks is
301 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
302 if ((policy) && (policy->cpu == freqs->cpu) &&
303 (policy->cur) && (policy->cur != freqs->old)) {
304 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
305 freqs->old, policy->cur);
306 freqs->old = policy->cur;
309 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
310 CPUFREQ_PRECHANGE, freqs);
311 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
314 case CPUFREQ_POSTCHANGE:
315 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
316 pr_debug("FREQ: %lu - CPU: %lu\n",
317 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
318 trace_cpu_frequency(freqs->new, freqs->cpu);
319 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
320 CPUFREQ_POSTCHANGE, freqs);
321 if (likely(policy) && likely(policy->cpu == freqs->cpu))
322 policy->cur = freqs->new;
328 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
329 * on frequency transition.
331 * This function calls the transition notifiers and the "adjust_jiffies"
332 * function. It is called twice on all CPU frequency changes that have
335 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
336 struct cpufreq_freqs *freqs, unsigned int state)
338 for_each_cpu(freqs->cpu, policy->cpus)
339 __cpufreq_notify_transition(policy, freqs, state);
342 /* Do post notifications when there are chances that transition has failed */
343 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
344 struct cpufreq_freqs *freqs, int transition_failed)
346 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
347 if (!transition_failed)
350 swap(freqs->old, freqs->new);
351 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
352 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
355 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
356 struct cpufreq_freqs *freqs)
360 * Catch double invocations of _begin() which lead to self-deadlock.
361 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
362 * doesn't invoke _begin() on their behalf, and hence the chances of
363 * double invocations are very low. Moreover, there are scenarios
364 * where these checks can emit false-positive warnings in these
365 * drivers; so we avoid that by skipping them altogether.
367 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
368 && current == policy->transition_task);
371 wait_event(policy->transition_wait, !policy->transition_ongoing);
373 spin_lock(&policy->transition_lock);
375 if (unlikely(policy->transition_ongoing)) {
376 spin_unlock(&policy->transition_lock);
380 policy->transition_ongoing = true;
381 policy->transition_task = current;
383 spin_unlock(&policy->transition_lock);
385 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
387 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
389 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
390 struct cpufreq_freqs *freqs, int transition_failed)
392 if (unlikely(WARN_ON(!policy->transition_ongoing)))
395 cpufreq_notify_post_transition(policy, freqs, transition_failed);
397 policy->transition_ongoing = false;
398 policy->transition_task = NULL;
400 wake_up(&policy->transition_wait);
402 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
405 /*********************************************************************
407 *********************************************************************/
408 static ssize_t show_boost(struct kobject *kobj,
409 struct attribute *attr, char *buf)
411 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
414 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
415 const char *buf, size_t count)
419 ret = sscanf(buf, "%d", &enable);
420 if (ret != 1 || enable < 0 || enable > 1)
423 if (cpufreq_boost_trigger_state(enable)) {
424 pr_err("%s: Cannot %s BOOST!\n",
425 __func__, enable ? "enable" : "disable");
429 pr_debug("%s: cpufreq BOOST %s\n",
430 __func__, enable ? "enabled" : "disabled");
434 define_one_global_rw(boost);
436 static struct cpufreq_governor *find_governor(const char *str_governor)
438 struct cpufreq_governor *t;
440 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
441 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
448 * cpufreq_parse_governor - parse a governor string
450 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
451 struct cpufreq_governor **governor)
458 if (cpufreq_driver->setpolicy) {
459 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
460 *policy = CPUFREQ_POLICY_PERFORMANCE;
462 } else if (!strncasecmp(str_governor, "powersave",
464 *policy = CPUFREQ_POLICY_POWERSAVE;
468 struct cpufreq_governor *t;
470 mutex_lock(&cpufreq_governor_mutex);
472 t = find_governor(str_governor);
477 mutex_unlock(&cpufreq_governor_mutex);
478 ret = request_module("cpufreq_%s", str_governor);
479 mutex_lock(&cpufreq_governor_mutex);
482 t = find_governor(str_governor);
490 mutex_unlock(&cpufreq_governor_mutex);
497 * cpufreq_per_cpu_attr_read() / show_##file_name() -
498 * print out cpufreq information
500 * Write out information from cpufreq_driver->policy[cpu]; object must be
504 #define show_one(file_name, object) \
505 static ssize_t show_##file_name \
506 (struct cpufreq_policy *policy, char *buf) \
508 return sprintf(buf, "%u\n", policy->object); \
511 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
512 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
513 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
514 show_one(scaling_min_freq, min);
515 show_one(scaling_max_freq, max);
517 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
521 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
522 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
524 ret = sprintf(buf, "%u\n", policy->cur);
528 static int cpufreq_set_policy(struct cpufreq_policy *policy,
529 struct cpufreq_policy *new_policy);
532 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
534 #define store_one(file_name, object) \
535 static ssize_t store_##file_name \
536 (struct cpufreq_policy *policy, const char *buf, size_t count) \
539 struct cpufreq_policy new_policy; \
541 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
545 ret = sscanf(buf, "%u", &new_policy.object); \
549 temp = new_policy.object; \
550 ret = cpufreq_set_policy(policy, &new_policy); \
552 policy->user_policy.object = temp; \
554 return ret ? ret : count; \
557 store_one(scaling_min_freq, min);
558 store_one(scaling_max_freq, max);
561 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
563 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
566 unsigned int cur_freq = __cpufreq_get(policy);
568 return sprintf(buf, "<unknown>");
569 return sprintf(buf, "%u\n", cur_freq);
573 * show_scaling_governor - show the current policy for the specified CPU
575 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
577 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
578 return sprintf(buf, "powersave\n");
579 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
580 return sprintf(buf, "performance\n");
581 else if (policy->governor)
582 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
583 policy->governor->name);
588 * store_scaling_governor - store policy for the specified CPU
590 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
591 const char *buf, size_t count)
594 char str_governor[16];
595 struct cpufreq_policy new_policy;
597 ret = cpufreq_get_policy(&new_policy, policy->cpu);
601 ret = sscanf(buf, "%15s", str_governor);
605 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
606 &new_policy.governor))
609 ret = cpufreq_set_policy(policy, &new_policy);
611 policy->user_policy.policy = policy->policy;
612 policy->user_policy.governor = policy->governor;
621 * show_scaling_driver - show the cpufreq driver currently loaded
623 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
625 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
629 * show_scaling_available_governors - show the available CPUfreq governors
631 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
635 struct cpufreq_governor *t;
638 i += sprintf(buf, "performance powersave");
642 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
643 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
644 - (CPUFREQ_NAME_LEN + 2)))
646 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
649 i += sprintf(&buf[i], "\n");
653 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
658 for_each_cpu(cpu, mask) {
660 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
661 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
662 if (i >= (PAGE_SIZE - 5))
665 i += sprintf(&buf[i], "\n");
668 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
671 * show_related_cpus - show the CPUs affected by each transition even if
672 * hw coordination is in use
674 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
676 return cpufreq_show_cpus(policy->related_cpus, buf);
680 * show_affected_cpus - show the CPUs affected by each transition
682 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
684 return cpufreq_show_cpus(policy->cpus, buf);
687 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
688 const char *buf, size_t count)
690 unsigned int freq = 0;
693 if (!policy->governor || !policy->governor->store_setspeed)
696 ret = sscanf(buf, "%u", &freq);
700 policy->governor->store_setspeed(policy, freq);
705 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
707 if (!policy->governor || !policy->governor->show_setspeed)
708 return sprintf(buf, "<unsupported>\n");
710 return policy->governor->show_setspeed(policy, buf);
714 * show_bios_limit - show the current cpufreq HW/BIOS limitation
716 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
720 if (cpufreq_driver->bios_limit) {
721 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
723 return sprintf(buf, "%u\n", limit);
725 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
728 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
729 cpufreq_freq_attr_ro(cpuinfo_min_freq);
730 cpufreq_freq_attr_ro(cpuinfo_max_freq);
731 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
732 cpufreq_freq_attr_ro(scaling_available_governors);
733 cpufreq_freq_attr_ro(scaling_driver);
734 cpufreq_freq_attr_ro(scaling_cur_freq);
735 cpufreq_freq_attr_ro(bios_limit);
736 cpufreq_freq_attr_ro(related_cpus);
737 cpufreq_freq_attr_ro(affected_cpus);
738 cpufreq_freq_attr_rw(scaling_min_freq);
739 cpufreq_freq_attr_rw(scaling_max_freq);
740 cpufreq_freq_attr_rw(scaling_governor);
741 cpufreq_freq_attr_rw(scaling_setspeed);
743 static struct attribute *default_attrs[] = {
744 &cpuinfo_min_freq.attr,
745 &cpuinfo_max_freq.attr,
746 &cpuinfo_transition_latency.attr,
747 &scaling_min_freq.attr,
748 &scaling_max_freq.attr,
751 &scaling_governor.attr,
752 &scaling_driver.attr,
753 &scaling_available_governors.attr,
754 &scaling_setspeed.attr,
758 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
759 #define to_attr(a) container_of(a, struct freq_attr, attr)
761 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
763 struct cpufreq_policy *policy = to_policy(kobj);
764 struct freq_attr *fattr = to_attr(attr);
767 if (!down_read_trylock(&cpufreq_rwsem))
770 down_read(&policy->rwsem);
773 ret = fattr->show(policy, buf);
777 up_read(&policy->rwsem);
778 up_read(&cpufreq_rwsem);
783 static ssize_t store(struct kobject *kobj, struct attribute *attr,
784 const char *buf, size_t count)
786 struct cpufreq_policy *policy = to_policy(kobj);
787 struct freq_attr *fattr = to_attr(attr);
788 ssize_t ret = -EINVAL;
792 if (!cpu_online(policy->cpu))
795 if (!down_read_trylock(&cpufreq_rwsem))
798 down_write(&policy->rwsem);
801 ret = fattr->store(policy, buf, count);
805 up_write(&policy->rwsem);
807 up_read(&cpufreq_rwsem);
814 static void cpufreq_sysfs_release(struct kobject *kobj)
816 struct cpufreq_policy *policy = to_policy(kobj);
817 pr_debug("last reference is dropped\n");
818 complete(&policy->kobj_unregister);
821 static const struct sysfs_ops sysfs_ops = {
826 static struct kobj_type ktype_cpufreq = {
827 .sysfs_ops = &sysfs_ops,
828 .default_attrs = default_attrs,
829 .release = cpufreq_sysfs_release,
832 struct kobject *cpufreq_global_kobject;
833 EXPORT_SYMBOL(cpufreq_global_kobject);
835 static int cpufreq_global_kobject_usage;
837 int cpufreq_get_global_kobject(void)
839 if (!cpufreq_global_kobject_usage++)
840 return kobject_add(cpufreq_global_kobject,
841 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
845 EXPORT_SYMBOL(cpufreq_get_global_kobject);
847 void cpufreq_put_global_kobject(void)
849 if (!--cpufreq_global_kobject_usage)
850 kobject_del(cpufreq_global_kobject);
852 EXPORT_SYMBOL(cpufreq_put_global_kobject);
854 int cpufreq_sysfs_create_file(const struct attribute *attr)
856 int ret = cpufreq_get_global_kobject();
859 ret = sysfs_create_file(cpufreq_global_kobject, attr);
861 cpufreq_put_global_kobject();
866 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
868 void cpufreq_sysfs_remove_file(const struct attribute *attr)
870 sysfs_remove_file(cpufreq_global_kobject, attr);
871 cpufreq_put_global_kobject();
873 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
875 /* symlink affected CPUs */
876 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
881 for_each_cpu(j, policy->cpus) {
882 struct device *cpu_dev;
884 if (j == policy->cpu)
887 pr_debug("Adding link for CPU: %u\n", j);
888 cpu_dev = get_cpu_device(j);
889 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
897 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
900 struct freq_attr **drv_attr;
903 /* set up files for this cpu device */
904 drv_attr = cpufreq_driver->attr;
905 while (drv_attr && *drv_attr) {
906 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
911 if (cpufreq_driver->get) {
912 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
917 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
921 if (cpufreq_driver->bios_limit) {
922 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
927 return cpufreq_add_dev_symlink(policy);
930 static void cpufreq_init_policy(struct cpufreq_policy *policy)
932 struct cpufreq_governor *gov = NULL;
933 struct cpufreq_policy new_policy;
936 memcpy(&new_policy, policy, sizeof(*policy));
938 /* Update governor of new_policy to the governor used before hotplug */
939 gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
941 pr_debug("Restoring governor %s for cpu %d\n",
942 policy->governor->name, policy->cpu);
944 gov = CPUFREQ_DEFAULT_GOVERNOR;
946 new_policy.governor = gov;
948 /* Use the default policy if its valid. */
949 if (cpufreq_driver->setpolicy)
950 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
952 /* set default policy */
953 ret = cpufreq_set_policy(policy, &new_policy);
955 pr_debug("setting policy failed\n");
956 if (cpufreq_driver->exit)
957 cpufreq_driver->exit(policy);
961 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
962 unsigned int cpu, struct device *dev)
968 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
970 pr_err("%s: Failed to stop governor\n", __func__);
975 down_write(&policy->rwsem);
977 write_lock_irqsave(&cpufreq_driver_lock, flags);
979 cpumask_set_cpu(cpu, policy->cpus);
980 per_cpu(cpufreq_cpu_data, cpu) = policy;
981 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
983 up_write(&policy->rwsem);
986 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
988 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
991 pr_err("%s: Failed to start governor\n", __func__);
996 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
999 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1001 struct cpufreq_policy *policy;
1002 unsigned long flags;
1004 read_lock_irqsave(&cpufreq_driver_lock, flags);
1006 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
1008 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1011 policy->governor = NULL;
1016 static struct cpufreq_policy *cpufreq_policy_alloc(void)
1018 struct cpufreq_policy *policy;
1020 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1024 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1025 goto err_free_policy;
1027 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1028 goto err_free_cpumask;
1030 INIT_LIST_HEAD(&policy->policy_list);
1031 init_rwsem(&policy->rwsem);
1032 spin_lock_init(&policy->transition_lock);
1033 init_waitqueue_head(&policy->transition_wait);
1038 free_cpumask_var(policy->cpus);
1045 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1047 struct kobject *kobj;
1048 struct completion *cmp;
1050 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1051 CPUFREQ_REMOVE_POLICY, policy);
1053 down_read(&policy->rwsem);
1054 kobj = &policy->kobj;
1055 cmp = &policy->kobj_unregister;
1056 up_read(&policy->rwsem);
1060 * We need to make sure that the underlying kobj is
1061 * actually not referenced anymore by anybody before we
1062 * proceed with unloading.
1064 pr_debug("waiting for dropping of refcount\n");
1065 wait_for_completion(cmp);
1066 pr_debug("wait complete\n");
1069 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1071 free_cpumask_var(policy->related_cpus);
1072 free_cpumask_var(policy->cpus);
1076 static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1077 struct device *cpu_dev)
1081 if (WARN_ON(cpu == policy->cpu))
1084 /* Move kobject to the new policy->cpu */
1085 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1087 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1091 down_write(&policy->rwsem);
1093 policy->last_cpu = policy->cpu;
1096 up_write(&policy->rwsem);
1098 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1099 CPUFREQ_UPDATE_POLICY_CPU, policy);
1104 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1106 unsigned int j, cpu = dev->id;
1108 struct cpufreq_policy *policy, *tpolicy;
1109 unsigned long flags;
1110 bool recover_policy = cpufreq_suspended;
1112 if (cpu_is_offline(cpu))
1115 pr_debug("adding CPU %u\n", cpu);
1117 /* check whether a different CPU already registered this
1118 * CPU because it is in the same boat. */
1119 policy = cpufreq_cpu_get(cpu);
1120 if (unlikely(policy)) {
1121 cpufreq_cpu_put(policy);
1125 if (!down_read_trylock(&cpufreq_rwsem))
1128 /* Check if this cpu was hot-unplugged earlier and has siblings */
1129 read_lock_irqsave(&cpufreq_driver_lock, flags);
1130 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1131 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
1132 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1133 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
1134 up_read(&cpufreq_rwsem);
1138 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1141 * Restore the saved policy when doing light-weight init and fall back
1142 * to the full init if that fails.
1144 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
1146 recover_policy = false;
1147 policy = cpufreq_policy_alloc();
1153 * In the resume path, since we restore a saved policy, the assignment
1154 * to policy->cpu is like an update of the existing policy, rather than
1155 * the creation of a brand new one. So we need to perform this update
1156 * by invoking update_policy_cpu().
1158 if (recover_policy && cpu != policy->cpu)
1159 WARN_ON(update_policy_cpu(policy, cpu, dev));
1163 cpumask_copy(policy->cpus, cpumask_of(cpu));
1165 init_completion(&policy->kobj_unregister);
1166 INIT_WORK(&policy->update, handle_update);
1168 /* call driver. From then on the cpufreq must be able
1169 * to accept all calls to ->verify and ->setpolicy for this CPU
1171 ret = cpufreq_driver->init(policy);
1173 pr_debug("initialization failed\n");
1174 goto err_set_policy_cpu;
1177 down_write(&policy->rwsem);
1179 /* related cpus should atleast have policy->cpus */
1180 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1183 * affected cpus must always be the one, which are online. We aren't
1184 * managing offline cpus here.
1186 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1188 if (!recover_policy) {
1189 policy->user_policy.min = policy->min;
1190 policy->user_policy.max = policy->max;
1192 /* prepare interface data */
1193 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1194 &dev->kobj, "cpufreq");
1196 pr_err("%s: failed to init policy->kobj: %d\n",
1198 goto err_init_policy_kobj;
1202 write_lock_irqsave(&cpufreq_driver_lock, flags);
1203 for_each_cpu(j, policy->cpus)
1204 per_cpu(cpufreq_cpu_data, j) = policy;
1205 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1207 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1208 policy->cur = cpufreq_driver->get(policy->cpu);
1210 pr_err("%s: ->get() failed\n", __func__);
1216 * Sometimes boot loaders set CPU frequency to a value outside of
1217 * frequency table present with cpufreq core. In such cases CPU might be
1218 * unstable if it has to run on that frequency for long duration of time
1219 * and so its better to set it to a frequency which is specified in
1220 * freq-table. This also makes cpufreq stats inconsistent as
1221 * cpufreq-stats would fail to register because current frequency of CPU
1222 * isn't found in freq-table.
1224 * Because we don't want this change to effect boot process badly, we go
1225 * for the next freq which is >= policy->cur ('cur' must be set by now,
1226 * otherwise we will end up setting freq to lowest of the table as 'cur'
1227 * is initialized to zero).
1229 * We are passing target-freq as "policy->cur - 1" otherwise
1230 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1231 * equal to target-freq.
1233 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1235 /* Are we running at unknown frequency ? */
1236 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1237 if (ret == -EINVAL) {
1238 /* Warn user and fix it */
1239 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1240 __func__, policy->cpu, policy->cur);
1241 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1242 CPUFREQ_RELATION_L);
1245 * Reaching here after boot in a few seconds may not
1246 * mean that system will remain stable at "unknown"
1247 * frequency for longer duration. Hence, a BUG_ON().
1250 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1251 __func__, policy->cpu, policy->cur);
1255 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1256 CPUFREQ_START, policy);
1258 if (!recover_policy) {
1259 ret = cpufreq_add_dev_interface(policy, dev);
1261 goto err_out_unregister;
1262 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1263 CPUFREQ_CREATE_POLICY, policy);
1266 write_lock_irqsave(&cpufreq_driver_lock, flags);
1267 list_add(&policy->policy_list, &cpufreq_policy_list);
1268 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1270 cpufreq_init_policy(policy);
1272 if (!recover_policy) {
1273 policy->user_policy.policy = policy->policy;
1274 policy->user_policy.governor = policy->governor;
1276 up_write(&policy->rwsem);
1278 kobject_uevent(&policy->kobj, KOBJ_ADD);
1280 up_read(&cpufreq_rwsem);
1282 /* Callback for handling stuff after policy is ready */
1283 if (cpufreq_driver->ready)
1284 cpufreq_driver->ready(policy);
1286 pr_debug("initialization complete\n");
1292 write_lock_irqsave(&cpufreq_driver_lock, flags);
1293 for_each_cpu(j, policy->cpus)
1294 per_cpu(cpufreq_cpu_data, j) = NULL;
1295 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1297 if (!recover_policy) {
1298 kobject_put(&policy->kobj);
1299 wait_for_completion(&policy->kobj_unregister);
1301 err_init_policy_kobj:
1302 up_write(&policy->rwsem);
1304 if (cpufreq_driver->exit)
1305 cpufreq_driver->exit(policy);
1307 if (recover_policy) {
1308 /* Do not leave stale fallback data behind. */
1309 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1310 cpufreq_policy_put_kobj(policy);
1312 cpufreq_policy_free(policy);
1315 up_read(&cpufreq_rwsem);
1321 * cpufreq_add_dev - add a CPU device
1323 * Adds the cpufreq interface for a CPU device.
1325 * The Oracle says: try running cpufreq registration/unregistration concurrently
1326 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1327 * mess up, but more thorough testing is needed. - Mathieu
1329 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1331 return __cpufreq_add_dev(dev, sif);
1334 static int __cpufreq_remove_dev_prepare(struct device *dev,
1335 struct subsys_interface *sif)
1337 unsigned int cpu = dev->id, cpus;
1339 unsigned long flags;
1340 struct cpufreq_policy *policy;
1342 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1344 write_lock_irqsave(&cpufreq_driver_lock, flags);
1346 policy = per_cpu(cpufreq_cpu_data, cpu);
1348 /* Save the policy somewhere when doing a light-weight tear-down */
1349 if (cpufreq_suspended)
1350 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1352 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1355 pr_debug("%s: No cpu_data found\n", __func__);
1360 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1362 pr_err("%s: Failed to stop governor\n", __func__);
1366 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1367 policy->governor->name, CPUFREQ_NAME_LEN);
1370 down_read(&policy->rwsem);
1371 cpus = cpumask_weight(policy->cpus);
1372 up_read(&policy->rwsem);
1374 if (cpu != policy->cpu) {
1375 sysfs_remove_link(&dev->kobj, "cpufreq");
1376 } else if (cpus > 1) {
1377 /* Nominate new CPU */
1378 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1379 struct device *cpu_dev = get_cpu_device(new_cpu);
1381 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1382 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1384 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1386 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1387 __func__, cpu_dev->id);
1391 if (!cpufreq_suspended)
1392 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1393 __func__, new_cpu, cpu);
1394 } else if (cpufreq_driver->stop_cpu) {
1395 cpufreq_driver->stop_cpu(policy);
1401 static int __cpufreq_remove_dev_finish(struct device *dev,
1402 struct subsys_interface *sif)
1404 unsigned int cpu = dev->id, cpus;
1406 unsigned long flags;
1407 struct cpufreq_policy *policy;
1409 read_lock_irqsave(&cpufreq_driver_lock, flags);
1410 policy = per_cpu(cpufreq_cpu_data, cpu);
1411 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1414 pr_debug("%s: No cpu_data found\n", __func__);
1418 down_write(&policy->rwsem);
1419 cpus = cpumask_weight(policy->cpus);
1422 cpumask_clear_cpu(cpu, policy->cpus);
1423 up_write(&policy->rwsem);
1425 /* If cpu is last user of policy, free policy */
1428 ret = __cpufreq_governor(policy,
1429 CPUFREQ_GOV_POLICY_EXIT);
1431 pr_err("%s: Failed to exit governor\n",
1437 if (!cpufreq_suspended)
1438 cpufreq_policy_put_kobj(policy);
1441 * Perform the ->exit() even during light-weight tear-down,
1442 * since this is a core component, and is essential for the
1443 * subsequent light-weight ->init() to succeed.
1445 if (cpufreq_driver->exit)
1446 cpufreq_driver->exit(policy);
1448 /* Remove policy from list of active policies */
1449 write_lock_irqsave(&cpufreq_driver_lock, flags);
1450 list_del(&policy->policy_list);
1451 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1453 if (!cpufreq_suspended)
1454 cpufreq_policy_free(policy);
1455 } else if (has_target()) {
1456 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1458 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1461 pr_err("%s: Failed to start governor\n", __func__);
1466 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1471 * cpufreq_remove_dev - remove a CPU device
1473 * Removes the cpufreq interface for a CPU device.
1475 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1477 unsigned int cpu = dev->id;
1480 if (cpu_is_offline(cpu))
1483 ret = __cpufreq_remove_dev_prepare(dev, sif);
1486 ret = __cpufreq_remove_dev_finish(dev, sif);
1491 static void handle_update(struct work_struct *work)
1493 struct cpufreq_policy *policy =
1494 container_of(work, struct cpufreq_policy, update);
1495 unsigned int cpu = policy->cpu;
1496 pr_debug("handle_update for cpu %u called\n", cpu);
1497 cpufreq_update_policy(cpu);
1501 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1503 * @policy: policy managing CPUs
1504 * @new_freq: CPU frequency the CPU actually runs at
1506 * We adjust to current frequency first, and need to clean up later.
1507 * So either call to cpufreq_update_policy() or schedule handle_update()).
1509 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1510 unsigned int new_freq)
1512 struct cpufreq_freqs freqs;
1514 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1515 policy->cur, new_freq);
1517 freqs.old = policy->cur;
1518 freqs.new = new_freq;
1520 cpufreq_freq_transition_begin(policy, &freqs);
1521 cpufreq_freq_transition_end(policy, &freqs, 0);
1525 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1528 * This is the last known freq, without actually getting it from the driver.
1529 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1531 unsigned int cpufreq_quick_get(unsigned int cpu)
1533 struct cpufreq_policy *policy;
1534 unsigned int ret_freq = 0;
1536 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1537 return cpufreq_driver->get(cpu);
1539 policy = cpufreq_cpu_get(cpu);
1541 ret_freq = policy->cur;
1542 cpufreq_cpu_put(policy);
1547 EXPORT_SYMBOL(cpufreq_quick_get);
1550 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1553 * Just return the max possible frequency for a given CPU.
1555 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1557 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1558 unsigned int ret_freq = 0;
1561 ret_freq = policy->max;
1562 cpufreq_cpu_put(policy);
1567 EXPORT_SYMBOL(cpufreq_quick_get_max);
1569 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1571 unsigned int ret_freq = 0;
1573 if (!cpufreq_driver->get)
1576 ret_freq = cpufreq_driver->get(policy->cpu);
1578 if (ret_freq && policy->cur &&
1579 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1580 /* verify no discrepancy between actual and
1581 saved value exists */
1582 if (unlikely(ret_freq != policy->cur)) {
1583 cpufreq_out_of_sync(policy, ret_freq);
1584 schedule_work(&policy->update);
1592 * cpufreq_get - get the current CPU frequency (in kHz)
1595 * Get the CPU current (static) CPU frequency
1597 unsigned int cpufreq_get(unsigned int cpu)
1599 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1600 unsigned int ret_freq = 0;
1603 down_read(&policy->rwsem);
1604 ret_freq = __cpufreq_get(policy);
1605 up_read(&policy->rwsem);
1607 cpufreq_cpu_put(policy);
1612 EXPORT_SYMBOL(cpufreq_get);
1614 static struct subsys_interface cpufreq_interface = {
1616 .subsys = &cpu_subsys,
1617 .add_dev = cpufreq_add_dev,
1618 .remove_dev = cpufreq_remove_dev,
1622 * In case platform wants some specific frequency to be configured
1625 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1629 if (!policy->suspend_freq) {
1630 pr_err("%s: suspend_freq can't be zero\n", __func__);
1634 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1635 policy->suspend_freq);
1637 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1638 CPUFREQ_RELATION_H);
1640 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1641 __func__, policy->suspend_freq, ret);
1645 EXPORT_SYMBOL(cpufreq_generic_suspend);
1648 * cpufreq_suspend() - Suspend CPUFreq governors
1650 * Called during system wide Suspend/Hibernate cycles for suspending governors
1651 * as some platforms can't change frequency after this point in suspend cycle.
1652 * Because some of the devices (like: i2c, regulators, etc) they use for
1653 * changing frequency are suspended quickly after this point.
1655 void cpufreq_suspend(void)
1657 struct cpufreq_policy *policy;
1659 if (!cpufreq_driver)
1665 pr_debug("%s: Suspending Governors\n", __func__);
1667 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1668 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1669 pr_err("%s: Failed to stop governor for policy: %p\n",
1671 else if (cpufreq_driver->suspend
1672 && cpufreq_driver->suspend(policy))
1673 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1678 cpufreq_suspended = true;
1682 * cpufreq_resume() - Resume CPUFreq governors
1684 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1685 * are suspended with cpufreq_suspend().
1687 void cpufreq_resume(void)
1689 struct cpufreq_policy *policy;
1691 if (!cpufreq_driver)
1694 cpufreq_suspended = false;
1699 pr_debug("%s: Resuming Governors\n", __func__);
1701 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1702 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1703 pr_err("%s: Failed to resume driver: %p\n", __func__,
1705 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1706 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1707 pr_err("%s: Failed to start governor for policy: %p\n",
1711 * schedule call cpufreq_update_policy() for boot CPU, i.e. last
1712 * policy in list. It will verify that the current freq is in
1713 * sync with what we believe it to be.
1715 if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
1716 schedule_work(&policy->update);
1721 * cpufreq_get_current_driver - return current driver's name
1723 * Return the name string of the currently loaded cpufreq driver
1726 const char *cpufreq_get_current_driver(void)
1729 return cpufreq_driver->name;
1733 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1736 * cpufreq_get_driver_data - return current driver data
1738 * Return the private data of the currently loaded cpufreq
1739 * driver, or NULL if no cpufreq driver is loaded.
1741 void *cpufreq_get_driver_data(void)
1744 return cpufreq_driver->driver_data;
1748 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1750 /*********************************************************************
1751 * NOTIFIER LISTS INTERFACE *
1752 *********************************************************************/
1755 * cpufreq_register_notifier - register a driver with cpufreq
1756 * @nb: notifier function to register
1757 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1759 * Add a driver to one of two lists: either a list of drivers that
1760 * are notified about clock rate changes (once before and once after
1761 * the transition), or a list of drivers that are notified about
1762 * changes in cpufreq policy.
1764 * This function may sleep, and has the same return conditions as
1765 * blocking_notifier_chain_register.
1767 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1771 if (cpufreq_disabled())
1774 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1777 case CPUFREQ_TRANSITION_NOTIFIER:
1778 ret = srcu_notifier_chain_register(
1779 &cpufreq_transition_notifier_list, nb);
1781 case CPUFREQ_POLICY_NOTIFIER:
1782 ret = blocking_notifier_chain_register(
1783 &cpufreq_policy_notifier_list, nb);
1791 EXPORT_SYMBOL(cpufreq_register_notifier);
1794 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1795 * @nb: notifier block to be unregistered
1796 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1798 * Remove a driver from the CPU frequency notifier list.
1800 * This function may sleep, and has the same return conditions as
1801 * blocking_notifier_chain_unregister.
1803 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1807 if (cpufreq_disabled())
1811 case CPUFREQ_TRANSITION_NOTIFIER:
1812 ret = srcu_notifier_chain_unregister(
1813 &cpufreq_transition_notifier_list, nb);
1815 case CPUFREQ_POLICY_NOTIFIER:
1816 ret = blocking_notifier_chain_unregister(
1817 &cpufreq_policy_notifier_list, nb);
1825 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1828 /*********************************************************************
1830 *********************************************************************/
1832 /* Must set freqs->new to intermediate frequency */
1833 static int __target_intermediate(struct cpufreq_policy *policy,
1834 struct cpufreq_freqs *freqs, int index)
1838 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1840 /* We don't need to switch to intermediate freq */
1844 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1845 __func__, policy->cpu, freqs->old, freqs->new);
1847 cpufreq_freq_transition_begin(policy, freqs);
1848 ret = cpufreq_driver->target_intermediate(policy, index);
1849 cpufreq_freq_transition_end(policy, freqs, ret);
1852 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1858 static int __target_index(struct cpufreq_policy *policy,
1859 struct cpufreq_frequency_table *freq_table, int index)
1861 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1862 unsigned int intermediate_freq = 0;
1863 int retval = -EINVAL;
1866 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1868 /* Handle switching to intermediate frequency */
1869 if (cpufreq_driver->get_intermediate) {
1870 retval = __target_intermediate(policy, &freqs, index);
1874 intermediate_freq = freqs.new;
1875 /* Set old freq to intermediate */
1876 if (intermediate_freq)
1877 freqs.old = freqs.new;
1880 freqs.new = freq_table[index].frequency;
1881 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1882 __func__, policy->cpu, freqs.old, freqs.new);
1884 cpufreq_freq_transition_begin(policy, &freqs);
1887 retval = cpufreq_driver->target_index(policy, index);
1889 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1893 cpufreq_freq_transition_end(policy, &freqs, retval);
1896 * Failed after setting to intermediate freq? Driver should have
1897 * reverted back to initial frequency and so should we. Check
1898 * here for intermediate_freq instead of get_intermediate, in
1899 * case we have't switched to intermediate freq at all.
1901 if (unlikely(retval && intermediate_freq)) {
1902 freqs.old = intermediate_freq;
1903 freqs.new = policy->restore_freq;
1904 cpufreq_freq_transition_begin(policy, &freqs);
1905 cpufreq_freq_transition_end(policy, &freqs, 0);
1912 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1913 unsigned int target_freq,
1914 unsigned int relation)
1916 unsigned int old_target_freq = target_freq;
1917 int retval = -EINVAL;
1919 if (cpufreq_disabled())
1922 /* Make sure that target_freq is within supported range */
1923 if (target_freq > policy->max)
1924 target_freq = policy->max;
1925 if (target_freq < policy->min)
1926 target_freq = policy->min;
1928 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1929 policy->cpu, target_freq, relation, old_target_freq);
1932 * This might look like a redundant call as we are checking it again
1933 * after finding index. But it is left intentionally for cases where
1934 * exactly same freq is called again and so we can save on few function
1937 if (target_freq == policy->cur)
1940 /* Save last value to restore later on errors */
1941 policy->restore_freq = policy->cur;
1943 if (cpufreq_driver->target)
1944 retval = cpufreq_driver->target(policy, target_freq, relation);
1945 else if (cpufreq_driver->target_index) {
1946 struct cpufreq_frequency_table *freq_table;
1949 freq_table = cpufreq_frequency_get_table(policy->cpu);
1950 if (unlikely(!freq_table)) {
1951 pr_err("%s: Unable to find freq_table\n", __func__);
1955 retval = cpufreq_frequency_table_target(policy, freq_table,
1956 target_freq, relation, &index);
1957 if (unlikely(retval)) {
1958 pr_err("%s: Unable to find matching freq\n", __func__);
1962 if (freq_table[index].frequency == policy->cur) {
1967 retval = __target_index(policy, freq_table, index);
1973 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1975 int cpufreq_driver_target(struct cpufreq_policy *policy,
1976 unsigned int target_freq,
1977 unsigned int relation)
1981 down_write(&policy->rwsem);
1983 ret = __cpufreq_driver_target(policy, target_freq, relation);
1985 up_write(&policy->rwsem);
1989 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1991 static int __cpufreq_governor(struct cpufreq_policy *policy,
1996 /* Only must be defined when default governor is known to have latency
1997 restrictions, like e.g. conservative or ondemand.
1998 That this is the case is already ensured in Kconfig
2000 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2001 struct cpufreq_governor *gov = &cpufreq_gov_performance;
2003 struct cpufreq_governor *gov = NULL;
2006 /* Don't start any governor operations if we are entering suspend */
2007 if (cpufreq_suspended)
2010 * Governor might not be initiated here if ACPI _PPC changed
2011 * notification happened, so check it.
2013 if (!policy->governor)
2016 if (policy->governor->max_transition_latency &&
2017 policy->cpuinfo.transition_latency >
2018 policy->governor->max_transition_latency) {
2022 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2023 policy->governor->name, gov->name);
2024 policy->governor = gov;
2028 if (event == CPUFREQ_GOV_POLICY_INIT)
2029 if (!try_module_get(policy->governor->owner))
2032 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
2033 policy->cpu, event);
2035 mutex_lock(&cpufreq_governor_lock);
2036 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
2037 || (!policy->governor_enabled
2038 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
2039 mutex_unlock(&cpufreq_governor_lock);
2043 if (event == CPUFREQ_GOV_STOP)
2044 policy->governor_enabled = false;
2045 else if (event == CPUFREQ_GOV_START)
2046 policy->governor_enabled = true;
2048 mutex_unlock(&cpufreq_governor_lock);
2050 ret = policy->governor->governor(policy, event);
2053 if (event == CPUFREQ_GOV_POLICY_INIT)
2054 policy->governor->initialized++;
2055 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2056 policy->governor->initialized--;
2058 /* Restore original values */
2059 mutex_lock(&cpufreq_governor_lock);
2060 if (event == CPUFREQ_GOV_STOP)
2061 policy->governor_enabled = true;
2062 else if (event == CPUFREQ_GOV_START)
2063 policy->governor_enabled = false;
2064 mutex_unlock(&cpufreq_governor_lock);
2067 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2068 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
2069 module_put(policy->governor->owner);
2074 int cpufreq_register_governor(struct cpufreq_governor *governor)
2081 if (cpufreq_disabled())
2084 mutex_lock(&cpufreq_governor_mutex);
2086 governor->initialized = 0;
2088 if (!find_governor(governor->name)) {
2090 list_add(&governor->governor_list, &cpufreq_governor_list);
2093 mutex_unlock(&cpufreq_governor_mutex);
2096 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2098 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2105 if (cpufreq_disabled())
2108 for_each_present_cpu(cpu) {
2109 if (cpu_online(cpu))
2111 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
2112 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
2115 mutex_lock(&cpufreq_governor_mutex);
2116 list_del(&governor->governor_list);
2117 mutex_unlock(&cpufreq_governor_mutex);
2120 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2123 /*********************************************************************
2124 * POLICY INTERFACE *
2125 *********************************************************************/
2128 * cpufreq_get_policy - get the current cpufreq_policy
2129 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2132 * Reads the current cpufreq policy.
2134 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2136 struct cpufreq_policy *cpu_policy;
2140 cpu_policy = cpufreq_cpu_get(cpu);
2144 memcpy(policy, cpu_policy, sizeof(*policy));
2146 cpufreq_cpu_put(cpu_policy);
2149 EXPORT_SYMBOL(cpufreq_get_policy);
2152 * policy : current policy.
2153 * new_policy: policy to be set.
2155 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2156 struct cpufreq_policy *new_policy)
2158 struct cpufreq_governor *old_gov;
2161 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2162 new_policy->cpu, new_policy->min, new_policy->max);
2164 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2166 if (new_policy->min > policy->max || new_policy->max < policy->min)
2169 /* verify the cpu speed can be set within this limit */
2170 ret = cpufreq_driver->verify(new_policy);
2174 /* adjust if necessary - all reasons */
2175 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2176 CPUFREQ_ADJUST, new_policy);
2178 /* adjust if necessary - hardware incompatibility*/
2179 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2180 CPUFREQ_INCOMPATIBLE, new_policy);
2183 * verify the cpu speed can be set within this limit, which might be
2184 * different to the first one
2186 ret = cpufreq_driver->verify(new_policy);
2190 /* notification of the new policy */
2191 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2192 CPUFREQ_NOTIFY, new_policy);
2194 policy->min = new_policy->min;
2195 policy->max = new_policy->max;
2197 pr_debug("new min and max freqs are %u - %u kHz\n",
2198 policy->min, policy->max);
2200 if (cpufreq_driver->setpolicy) {
2201 policy->policy = new_policy->policy;
2202 pr_debug("setting range\n");
2203 return cpufreq_driver->setpolicy(new_policy);
2206 if (new_policy->governor == policy->governor)
2209 pr_debug("governor switch\n");
2211 /* save old, working values */
2212 old_gov = policy->governor;
2213 /* end old governor */
2215 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2216 up_write(&policy->rwsem);
2217 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2218 down_write(&policy->rwsem);
2221 /* start new governor */
2222 policy->governor = new_policy->governor;
2223 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2224 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2227 up_write(&policy->rwsem);
2228 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2229 down_write(&policy->rwsem);
2232 /* new governor failed, so re-start old one */
2233 pr_debug("starting governor %s failed\n", policy->governor->name);
2235 policy->governor = old_gov;
2236 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2237 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2243 pr_debug("governor: change or update limits\n");
2244 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2248 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2249 * @cpu: CPU which shall be re-evaluated
2251 * Useful for policy notifiers which have different necessities
2252 * at different times.
2254 int cpufreq_update_policy(unsigned int cpu)
2256 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2257 struct cpufreq_policy new_policy;
2263 down_write(&policy->rwsem);
2265 pr_debug("updating policy for CPU %u\n", cpu);
2266 memcpy(&new_policy, policy, sizeof(*policy));
2267 new_policy.min = policy->user_policy.min;
2268 new_policy.max = policy->user_policy.max;
2269 new_policy.policy = policy->user_policy.policy;
2270 new_policy.governor = policy->user_policy.governor;
2273 * BIOS might change freq behind our back
2274 * -> ask driver for current freq and notify governors about a change
2276 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2277 new_policy.cur = cpufreq_driver->get(cpu);
2278 if (WARN_ON(!new_policy.cur)) {
2284 pr_debug("Driver did not initialize current freq\n");
2285 policy->cur = new_policy.cur;
2287 if (policy->cur != new_policy.cur && has_target())
2288 cpufreq_out_of_sync(policy, new_policy.cur);
2292 ret = cpufreq_set_policy(policy, &new_policy);
2295 up_write(&policy->rwsem);
2297 cpufreq_cpu_put(policy);
2300 EXPORT_SYMBOL(cpufreq_update_policy);
2302 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2303 unsigned long action, void *hcpu)
2305 unsigned int cpu = (unsigned long)hcpu;
2308 dev = get_cpu_device(cpu);
2310 switch (action & ~CPU_TASKS_FROZEN) {
2312 __cpufreq_add_dev(dev, NULL);
2315 case CPU_DOWN_PREPARE:
2316 __cpufreq_remove_dev_prepare(dev, NULL);
2320 __cpufreq_remove_dev_finish(dev, NULL);
2323 case CPU_DOWN_FAILED:
2324 __cpufreq_add_dev(dev, NULL);
2331 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2332 .notifier_call = cpufreq_cpu_callback,
2335 /*********************************************************************
2337 *********************************************************************/
2338 static int cpufreq_boost_set_sw(int state)
2340 struct cpufreq_frequency_table *freq_table;
2341 struct cpufreq_policy *policy;
2344 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
2345 freq_table = cpufreq_frequency_get_table(policy->cpu);
2347 ret = cpufreq_frequency_table_cpuinfo(policy,
2350 pr_err("%s: Policy frequency update failed\n",
2354 policy->user_policy.max = policy->max;
2355 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2362 int cpufreq_boost_trigger_state(int state)
2364 unsigned long flags;
2367 if (cpufreq_driver->boost_enabled == state)
2370 write_lock_irqsave(&cpufreq_driver_lock, flags);
2371 cpufreq_driver->boost_enabled = state;
2372 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2374 ret = cpufreq_driver->set_boost(state);
2376 write_lock_irqsave(&cpufreq_driver_lock, flags);
2377 cpufreq_driver->boost_enabled = !state;
2378 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2380 pr_err("%s: Cannot %s BOOST\n",
2381 __func__, state ? "enable" : "disable");
2387 int cpufreq_boost_supported(void)
2389 if (likely(cpufreq_driver))
2390 return cpufreq_driver->boost_supported;
2394 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2396 int cpufreq_boost_enabled(void)
2398 return cpufreq_driver->boost_enabled;
2400 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2402 /*********************************************************************
2403 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2404 *********************************************************************/
2407 * cpufreq_register_driver - register a CPU Frequency driver
2408 * @driver_data: A struct cpufreq_driver containing the values#
2409 * submitted by the CPU Frequency driver.
2411 * Registers a CPU Frequency driver to this core code. This code
2412 * returns zero on success, -EBUSY when another driver got here first
2413 * (and isn't unregistered in the meantime).
2416 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2418 unsigned long flags;
2421 if (cpufreq_disabled())
2424 if (!driver_data || !driver_data->verify || !driver_data->init ||
2425 !(driver_data->setpolicy || driver_data->target_index ||
2426 driver_data->target) ||
2427 (driver_data->setpolicy && (driver_data->target_index ||
2428 driver_data->target)) ||
2429 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2432 pr_debug("trying to register driver %s\n", driver_data->name);
2434 write_lock_irqsave(&cpufreq_driver_lock, flags);
2435 if (cpufreq_driver) {
2436 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2439 cpufreq_driver = driver_data;
2440 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2442 if (driver_data->setpolicy)
2443 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2445 if (cpufreq_boost_supported()) {
2447 * Check if driver provides function to enable boost -
2448 * if not, use cpufreq_boost_set_sw as default
2450 if (!cpufreq_driver->set_boost)
2451 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2453 ret = cpufreq_sysfs_create_file(&boost.attr);
2455 pr_err("%s: cannot register global BOOST sysfs file\n",
2457 goto err_null_driver;
2461 ret = subsys_interface_register(&cpufreq_interface);
2463 goto err_boost_unreg;
2465 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2469 /* check for at least one working CPU */
2470 for (i = 0; i < nr_cpu_ids; i++)
2471 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2476 /* if all ->init() calls failed, unregister */
2478 pr_debug("no CPU initialized for driver %s\n",
2484 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2485 pr_debug("driver %s up and running\n", driver_data->name);
2489 subsys_interface_unregister(&cpufreq_interface);
2491 if (cpufreq_boost_supported())
2492 cpufreq_sysfs_remove_file(&boost.attr);
2494 write_lock_irqsave(&cpufreq_driver_lock, flags);
2495 cpufreq_driver = NULL;
2496 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2499 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2502 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2504 * Unregister the current CPUFreq driver. Only call this if you have
2505 * the right to do so, i.e. if you have succeeded in initialising before!
2506 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2507 * currently not initialised.
2509 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2511 unsigned long flags;
2513 if (!cpufreq_driver || (driver != cpufreq_driver))
2516 pr_debug("unregistering driver %s\n", driver->name);
2518 subsys_interface_unregister(&cpufreq_interface);
2519 if (cpufreq_boost_supported())
2520 cpufreq_sysfs_remove_file(&boost.attr);
2522 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2524 down_write(&cpufreq_rwsem);
2525 write_lock_irqsave(&cpufreq_driver_lock, flags);
2527 cpufreq_driver = NULL;
2529 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2530 up_write(&cpufreq_rwsem);
2534 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2537 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2538 * or mutexes when secondary CPUs are halted.
2540 static struct syscore_ops cpufreq_syscore_ops = {
2541 .shutdown = cpufreq_suspend,
2544 static int __init cpufreq_core_init(void)
2546 if (cpufreq_disabled())
2549 cpufreq_global_kobject = kobject_create();
2550 BUG_ON(!cpufreq_global_kobject);
2552 register_syscore_ops(&cpufreq_syscore_ops);
2556 core_initcall(cpufreq_core_init);