2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
34 static LIST_HEAD(cpufreq_policy_list);
36 static inline bool policy_is_inactive(struct cpufreq_policy *policy)
38 return cpumask_empty(policy->cpus);
41 /* Macros to iterate over CPU policies */
42 #define for_each_suitable_policy(__policy, __active) \
43 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
44 if ((__active) == !policy_is_inactive(__policy))
46 #define for_each_active_policy(__policy) \
47 for_each_suitable_policy(__policy, true)
48 #define for_each_inactive_policy(__policy) \
49 for_each_suitable_policy(__policy, false)
51 #define for_each_policy(__policy) \
52 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
54 /* Iterate over governors */
55 static LIST_HEAD(cpufreq_governor_list);
56 #define for_each_governor(__governor) \
57 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
60 * The "cpufreq driver" - the arch- or hardware-dependent low
61 * level driver of CPUFreq support, and its spinlock. This lock
62 * also protects the cpufreq_cpu_data array.
64 static struct cpufreq_driver *cpufreq_driver;
65 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
66 static DEFINE_RWLOCK(cpufreq_driver_lock);
68 /* Flag to suspend/resume CPUFreq governors */
69 static bool cpufreq_suspended;
71 static inline bool has_target(void)
73 return cpufreq_driver->target_index || cpufreq_driver->target;
76 /* internal prototypes */
77 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
78 static int cpufreq_init_governor(struct cpufreq_policy *policy);
79 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
80 static int cpufreq_start_governor(struct cpufreq_policy *policy);
81 static void cpufreq_stop_governor(struct cpufreq_policy *policy);
82 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
85 * Two notifier lists: the "policy" list is involved in the
86 * validation process for a new CPU frequency policy; the
87 * "transition" list for kernel code that needs to handle
88 * changes to devices when the CPU clock speed changes.
89 * The mutex locks both lists.
91 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
92 SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
94 static int off __read_mostly;
95 static int cpufreq_disabled(void)
99 void disable_cpufreq(void)
103 static DEFINE_MUTEX(cpufreq_governor_mutex);
105 bool have_governor_per_policy(void)
107 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
109 EXPORT_SYMBOL_GPL(have_governor_per_policy);
111 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
113 if (have_governor_per_policy())
114 return &policy->kobj;
116 return cpufreq_global_kobject;
118 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
120 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
126 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
128 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
129 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
130 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
131 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
132 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
133 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
135 idle_time = cur_wall_time - busy_time;
137 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
139 return div_u64(idle_time, NSEC_PER_USEC);
142 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
144 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
146 if (idle_time == -1ULL)
147 return get_cpu_idle_time_jiffy(cpu, wall);
149 idle_time += get_cpu_iowait_time_us(cpu, wall);
153 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
155 __weak void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
156 unsigned long max_freq)
159 EXPORT_SYMBOL_GPL(arch_set_freq_scale);
162 * This is a generic cpufreq init() routine which can be used by cpufreq
163 * drivers of SMP systems. It will do following:
164 * - validate & show freq table passed
165 * - set policies transition latency
166 * - policy->cpus with all possible CPUs
168 int cpufreq_generic_init(struct cpufreq_policy *policy,
169 struct cpufreq_frequency_table *table,
170 unsigned int transition_latency)
172 policy->freq_table = table;
173 policy->cpuinfo.transition_latency = transition_latency;
176 * The driver only supports the SMP configuration where all processors
177 * share the clock and voltage and clock.
179 cpumask_setall(policy->cpus);
183 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
185 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
187 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
189 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
191 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
193 unsigned int cpufreq_generic_get(unsigned int cpu)
195 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
197 if (!policy || IS_ERR(policy->clk)) {
198 pr_err("%s: No %s associated to cpu: %d\n",
199 __func__, policy ? "clk" : "policy", cpu);
203 return clk_get_rate(policy->clk) / 1000;
205 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
208 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
210 * @cpu: cpu to find policy for.
212 * This returns policy for 'cpu', returns NULL if it doesn't exist.
213 * It also increments the kobject reference count to mark it busy and so would
214 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
215 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
216 * freed as that depends on the kobj count.
218 * Return: A valid policy on success, otherwise NULL on failure.
220 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
222 struct cpufreq_policy *policy = NULL;
225 if (WARN_ON(cpu >= nr_cpu_ids))
228 /* get the cpufreq driver */
229 read_lock_irqsave(&cpufreq_driver_lock, flags);
231 if (cpufreq_driver) {
233 policy = cpufreq_cpu_get_raw(cpu);
235 kobject_get(&policy->kobj);
238 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
242 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
245 * cpufreq_cpu_put: Decrements the usage count of a policy
247 * @policy: policy earlier returned by cpufreq_cpu_get().
249 * This decrements the kobject reference count incremented earlier by calling
252 void cpufreq_cpu_put(struct cpufreq_policy *policy)
254 kobject_put(&policy->kobj);
256 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
258 /*********************************************************************
259 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
260 *********************************************************************/
263 * adjust_jiffies - adjust the system "loops_per_jiffy"
265 * This function alters the system "loops_per_jiffy" for the clock
266 * speed change. Note that loops_per_jiffy cannot be updated on SMP
267 * systems as each CPU might be scaled differently. So, use the arch
268 * per-CPU loops_per_jiffy value wherever possible.
270 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
273 static unsigned long l_p_j_ref;
274 static unsigned int l_p_j_ref_freq;
276 if (ci->flags & CPUFREQ_CONST_LOOPS)
279 if (!l_p_j_ref_freq) {
280 l_p_j_ref = loops_per_jiffy;
281 l_p_j_ref_freq = ci->old;
282 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
283 l_p_j_ref, l_p_j_ref_freq);
285 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
286 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
288 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
289 loops_per_jiffy, ci->new);
295 * cpufreq_notify_transition - Notify frequency transition and adjust_jiffies.
296 * @policy: cpufreq policy to enable fast frequency switching for.
297 * @freqs: contain details of the frequency update.
298 * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
300 * This function calls the transition notifiers and the "adjust_jiffies"
301 * function. It is called twice on all CPU frequency changes that have
304 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
305 struct cpufreq_freqs *freqs,
308 BUG_ON(irqs_disabled());
310 if (cpufreq_disabled())
313 freqs->flags = cpufreq_driver->flags;
314 pr_debug("notification %u of frequency transition to %u kHz\n",
318 case CPUFREQ_PRECHANGE:
320 * Detect if the driver reported a value as "old frequency"
321 * which is not equal to what the cpufreq core thinks is
324 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
325 if (policy->cur && (policy->cur != freqs->old)) {
326 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
327 freqs->old, policy->cur);
328 freqs->old = policy->cur;
332 for_each_cpu(freqs->cpu, policy->cpus) {
333 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
334 CPUFREQ_PRECHANGE, freqs);
337 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
340 case CPUFREQ_POSTCHANGE:
341 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
342 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
343 cpumask_pr_args(policy->cpus));
345 for_each_cpu(freqs->cpu, policy->cpus) {
346 trace_cpu_frequency(freqs->new, freqs->cpu);
347 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
348 CPUFREQ_POSTCHANGE, freqs);
351 cpufreq_stats_record_transition(policy, freqs->new);
352 policy->cur = freqs->new;
356 /* Do post notifications when there are chances that transition has failed */
357 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
358 struct cpufreq_freqs *freqs, int transition_failed)
360 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
361 if (!transition_failed)
364 swap(freqs->old, freqs->new);
365 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
366 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
369 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
370 struct cpufreq_freqs *freqs)
374 * Catch double invocations of _begin() which lead to self-deadlock.
375 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
376 * doesn't invoke _begin() on their behalf, and hence the chances of
377 * double invocations are very low. Moreover, there are scenarios
378 * where these checks can emit false-positive warnings in these
379 * drivers; so we avoid that by skipping them altogether.
381 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
382 && current == policy->transition_task);
385 wait_event(policy->transition_wait, !policy->transition_ongoing);
387 spin_lock(&policy->transition_lock);
389 if (unlikely(policy->transition_ongoing)) {
390 spin_unlock(&policy->transition_lock);
394 policy->transition_ongoing = true;
395 policy->transition_task = current;
397 spin_unlock(&policy->transition_lock);
399 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
401 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
403 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
404 struct cpufreq_freqs *freqs, int transition_failed)
406 if (WARN_ON(!policy->transition_ongoing))
409 cpufreq_notify_post_transition(policy, freqs, transition_failed);
411 policy->transition_ongoing = false;
412 policy->transition_task = NULL;
414 wake_up(&policy->transition_wait);
416 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
419 * Fast frequency switching status count. Positive means "enabled", negative
420 * means "disabled" and 0 means "not decided yet".
422 static int cpufreq_fast_switch_count;
423 static DEFINE_MUTEX(cpufreq_fast_switch_lock);
425 static void cpufreq_list_transition_notifiers(void)
427 struct notifier_block *nb;
429 pr_info("Registered transition notifiers:\n");
431 mutex_lock(&cpufreq_transition_notifier_list.mutex);
433 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
434 pr_info("%pF\n", nb->notifier_call);
436 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
440 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
441 * @policy: cpufreq policy to enable fast frequency switching for.
443 * Try to enable fast frequency switching for @policy.
445 * The attempt will fail if there is at least one transition notifier registered
446 * at this point, as fast frequency switching is quite fundamentally at odds
447 * with transition notifiers. Thus if successful, it will make registration of
448 * transition notifiers fail going forward.
450 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
452 lockdep_assert_held(&policy->rwsem);
454 if (!policy->fast_switch_possible)
457 mutex_lock(&cpufreq_fast_switch_lock);
458 if (cpufreq_fast_switch_count >= 0) {
459 cpufreq_fast_switch_count++;
460 policy->fast_switch_enabled = true;
462 pr_warn("CPU%u: Fast frequency switching not enabled\n",
464 cpufreq_list_transition_notifiers();
466 mutex_unlock(&cpufreq_fast_switch_lock);
468 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
471 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
472 * @policy: cpufreq policy to disable fast frequency switching for.
474 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
476 mutex_lock(&cpufreq_fast_switch_lock);
477 if (policy->fast_switch_enabled) {
478 policy->fast_switch_enabled = false;
479 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
480 cpufreq_fast_switch_count--;
482 mutex_unlock(&cpufreq_fast_switch_lock);
484 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
487 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
489 * @target_freq: target frequency to resolve.
491 * The target to driver frequency mapping is cached in the policy.
493 * Return: Lowest driver-supported frequency greater than or equal to the
494 * given target_freq, subject to policy (min/max) and driver limitations.
496 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
497 unsigned int target_freq)
499 target_freq = clamp_val(target_freq, policy->min, policy->max);
500 policy->cached_target_freq = target_freq;
502 if (cpufreq_driver->target_index) {
505 idx = cpufreq_frequency_table_target(policy, target_freq,
507 policy->cached_resolved_idx = idx;
508 return policy->freq_table[idx].frequency;
511 if (cpufreq_driver->resolve_freq)
512 return cpufreq_driver->resolve_freq(policy, target_freq);
516 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
518 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
520 unsigned int latency;
522 if (policy->transition_delay_us)
523 return policy->transition_delay_us;
525 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
528 * For platforms that can change the frequency very fast (< 10
529 * us), the above formula gives a decent transition delay. But
530 * for platforms where transition_latency is in milliseconds, it
531 * ends up giving unrealistic values.
533 * Cap the default transition delay to 10 ms, which seems to be
534 * a reasonable amount of time after which we should reevaluate
537 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
540 return LATENCY_MULTIPLIER;
542 EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
544 /*********************************************************************
546 *********************************************************************/
547 static ssize_t show_boost(struct kobject *kobj,
548 struct attribute *attr, char *buf)
550 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
553 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
554 const char *buf, size_t count)
558 ret = sscanf(buf, "%d", &enable);
559 if (ret != 1 || enable < 0 || enable > 1)
562 if (cpufreq_boost_trigger_state(enable)) {
563 pr_err("%s: Cannot %s BOOST!\n",
564 __func__, enable ? "enable" : "disable");
568 pr_debug("%s: cpufreq BOOST %s\n",
569 __func__, enable ? "enabled" : "disabled");
573 define_one_global_rw(boost);
575 static struct cpufreq_governor *find_governor(const char *str_governor)
577 struct cpufreq_governor *t;
580 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
587 * cpufreq_parse_governor - parse a governor string
589 static int cpufreq_parse_governor(char *str_governor,
590 struct cpufreq_policy *policy)
592 if (cpufreq_driver->setpolicy) {
593 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
594 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
598 if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
599 policy->policy = CPUFREQ_POLICY_POWERSAVE;
603 struct cpufreq_governor *t;
605 mutex_lock(&cpufreq_governor_mutex);
607 t = find_governor(str_governor);
611 mutex_unlock(&cpufreq_governor_mutex);
613 ret = request_module("cpufreq_%s", str_governor);
617 mutex_lock(&cpufreq_governor_mutex);
619 t = find_governor(str_governor);
621 if (t && !try_module_get(t->owner))
624 mutex_unlock(&cpufreq_governor_mutex);
627 policy->governor = t;
636 * cpufreq_per_cpu_attr_read() / show_##file_name() -
637 * print out cpufreq information
639 * Write out information from cpufreq_driver->policy[cpu]; object must be
643 #define show_one(file_name, object) \
644 static ssize_t show_##file_name \
645 (struct cpufreq_policy *policy, char *buf) \
647 return sprintf(buf, "%u\n", policy->object); \
650 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
651 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
652 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
653 show_one(scaling_min_freq, min);
654 show_one(scaling_max_freq, max);
656 __weak unsigned int arch_freq_get_on_cpu(int cpu)
661 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
666 freq = arch_freq_get_on_cpu(policy->cpu);
668 ret = sprintf(buf, "%u\n", freq);
669 else if (cpufreq_driver && cpufreq_driver->setpolicy &&
671 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
673 ret = sprintf(buf, "%u\n", policy->cur);
677 static int cpufreq_set_policy(struct cpufreq_policy *policy,
678 struct cpufreq_policy *new_policy);
681 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
683 #define store_one(file_name, object) \
684 static ssize_t store_##file_name \
685 (struct cpufreq_policy *policy, const char *buf, size_t count) \
688 struct cpufreq_policy new_policy; \
690 memcpy(&new_policy, policy, sizeof(*policy)); \
691 new_policy.min = policy->user_policy.min; \
692 new_policy.max = policy->user_policy.max; \
694 ret = sscanf(buf, "%u", &new_policy.object); \
698 temp = new_policy.object; \
699 ret = cpufreq_set_policy(policy, &new_policy); \
701 policy->user_policy.object = temp; \
703 return ret ? ret : count; \
706 store_one(scaling_min_freq, min);
707 store_one(scaling_max_freq, max);
710 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
712 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
715 unsigned int cur_freq = __cpufreq_get(policy);
718 return sprintf(buf, "%u\n", cur_freq);
720 return sprintf(buf, "<unknown>\n");
724 * show_scaling_governor - show the current policy for the specified CPU
726 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
728 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
729 return sprintf(buf, "powersave\n");
730 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
731 return sprintf(buf, "performance\n");
732 else if (policy->governor)
733 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
734 policy->governor->name);
739 * store_scaling_governor - store policy for the specified CPU
741 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
742 const char *buf, size_t count)
745 char str_governor[16];
746 struct cpufreq_policy new_policy;
748 memcpy(&new_policy, policy, sizeof(*policy));
750 ret = sscanf(buf, "%15s", str_governor);
754 if (cpufreq_parse_governor(str_governor, &new_policy))
757 ret = cpufreq_set_policy(policy, &new_policy);
759 if (new_policy.governor)
760 module_put(new_policy.governor->owner);
762 return ret ? ret : count;
766 * show_scaling_driver - show the cpufreq driver currently loaded
768 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
770 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
774 * show_scaling_available_governors - show the available CPUfreq governors
776 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
780 struct cpufreq_governor *t;
783 i += sprintf(buf, "performance powersave");
787 for_each_governor(t) {
788 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
789 - (CPUFREQ_NAME_LEN + 2)))
791 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
794 i += sprintf(&buf[i], "\n");
798 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
803 for_each_cpu(cpu, mask) {
805 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
806 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
807 if (i >= (PAGE_SIZE - 5))
810 i += sprintf(&buf[i], "\n");
813 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
816 * show_related_cpus - show the CPUs affected by each transition even if
817 * hw coordination is in use
819 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
821 return cpufreq_show_cpus(policy->related_cpus, buf);
825 * show_affected_cpus - show the CPUs affected by each transition
827 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
829 return cpufreq_show_cpus(policy->cpus, buf);
832 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
833 const char *buf, size_t count)
835 unsigned int freq = 0;
838 if (!policy->governor || !policy->governor->store_setspeed)
841 ret = sscanf(buf, "%u", &freq);
845 policy->governor->store_setspeed(policy, freq);
850 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
852 if (!policy->governor || !policy->governor->show_setspeed)
853 return sprintf(buf, "<unsupported>\n");
855 return policy->governor->show_setspeed(policy, buf);
859 * show_bios_limit - show the current cpufreq HW/BIOS limitation
861 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
865 if (cpufreq_driver->bios_limit) {
866 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
868 return sprintf(buf, "%u\n", limit);
870 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
873 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
874 cpufreq_freq_attr_ro(cpuinfo_min_freq);
875 cpufreq_freq_attr_ro(cpuinfo_max_freq);
876 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
877 cpufreq_freq_attr_ro(scaling_available_governors);
878 cpufreq_freq_attr_ro(scaling_driver);
879 cpufreq_freq_attr_ro(scaling_cur_freq);
880 cpufreq_freq_attr_ro(bios_limit);
881 cpufreq_freq_attr_ro(related_cpus);
882 cpufreq_freq_attr_ro(affected_cpus);
883 cpufreq_freq_attr_rw(scaling_min_freq);
884 cpufreq_freq_attr_rw(scaling_max_freq);
885 cpufreq_freq_attr_rw(scaling_governor);
886 cpufreq_freq_attr_rw(scaling_setspeed);
888 static struct attribute *default_attrs[] = {
889 &cpuinfo_min_freq.attr,
890 &cpuinfo_max_freq.attr,
891 &cpuinfo_transition_latency.attr,
892 &scaling_min_freq.attr,
893 &scaling_max_freq.attr,
896 &scaling_governor.attr,
897 &scaling_driver.attr,
898 &scaling_available_governors.attr,
899 &scaling_setspeed.attr,
903 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
904 #define to_attr(a) container_of(a, struct freq_attr, attr)
906 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
908 struct cpufreq_policy *policy = to_policy(kobj);
909 struct freq_attr *fattr = to_attr(attr);
912 down_read(&policy->rwsem);
913 ret = fattr->show(policy, buf);
914 up_read(&policy->rwsem);
919 static ssize_t store(struct kobject *kobj, struct attribute *attr,
920 const char *buf, size_t count)
922 struct cpufreq_policy *policy = to_policy(kobj);
923 struct freq_attr *fattr = to_attr(attr);
924 ssize_t ret = -EINVAL;
927 * cpus_read_trylock() is used here to work around a circular lock
928 * dependency problem with respect to the cpufreq_register_driver().
930 if (!cpus_read_trylock())
933 if (cpu_online(policy->cpu)) {
934 down_write(&policy->rwsem);
935 ret = fattr->store(policy, buf, count);
936 up_write(&policy->rwsem);
944 static void cpufreq_sysfs_release(struct kobject *kobj)
946 struct cpufreq_policy *policy = to_policy(kobj);
947 pr_debug("last reference is dropped\n");
948 complete(&policy->kobj_unregister);
951 static const struct sysfs_ops sysfs_ops = {
956 static struct kobj_type ktype_cpufreq = {
957 .sysfs_ops = &sysfs_ops,
958 .default_attrs = default_attrs,
959 .release = cpufreq_sysfs_release,
962 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
964 struct device *dev = get_cpu_device(cpu);
969 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
972 dev_dbg(dev, "%s: Adding symlink\n", __func__);
973 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
974 dev_err(dev, "cpufreq symlink creation failed\n");
977 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
980 dev_dbg(dev, "%s: Removing symlink\n", __func__);
981 sysfs_remove_link(&dev->kobj, "cpufreq");
984 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
986 struct freq_attr **drv_attr;
989 /* set up files for this cpu device */
990 drv_attr = cpufreq_driver->attr;
991 while (drv_attr && *drv_attr) {
992 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
997 if (cpufreq_driver->get) {
998 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1003 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1007 if (cpufreq_driver->bios_limit) {
1008 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1016 __weak struct cpufreq_governor *cpufreq_default_governor(void)
1021 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1023 struct cpufreq_governor *gov = NULL;
1024 struct cpufreq_policy new_policy;
1026 memcpy(&new_policy, policy, sizeof(*policy));
1028 /* Update governor of new_policy to the governor used before hotplug */
1029 gov = find_governor(policy->last_governor);
1031 pr_debug("Restoring governor %s for cpu %d\n",
1032 policy->governor->name, policy->cpu);
1034 gov = cpufreq_default_governor();
1039 new_policy.governor = gov;
1041 /* Use the default policy if there is no last_policy. */
1042 if (cpufreq_driver->setpolicy) {
1043 if (policy->last_policy)
1044 new_policy.policy = policy->last_policy;
1046 cpufreq_parse_governor(gov->name, &new_policy);
1048 /* set default policy */
1049 return cpufreq_set_policy(policy, &new_policy);
1052 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1056 /* Has this CPU been taken care of already? */
1057 if (cpumask_test_cpu(cpu, policy->cpus))
1060 down_write(&policy->rwsem);
1062 cpufreq_stop_governor(policy);
1064 cpumask_set_cpu(cpu, policy->cpus);
1067 ret = cpufreq_start_governor(policy);
1069 pr_err("%s: Failed to start governor\n", __func__);
1071 up_write(&policy->rwsem);
1075 static void handle_update(struct work_struct *work)
1077 struct cpufreq_policy *policy =
1078 container_of(work, struct cpufreq_policy, update);
1079 unsigned int cpu = policy->cpu;
1080 pr_debug("handle_update for cpu %u called\n", cpu);
1081 cpufreq_update_policy(cpu);
1084 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1086 struct cpufreq_policy *policy;
1089 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1093 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1094 goto err_free_policy;
1096 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1097 goto err_free_cpumask;
1099 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1100 goto err_free_rcpumask;
1102 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1103 cpufreq_global_kobject, "policy%u", cpu);
1105 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1106 goto err_free_real_cpus;
1109 INIT_LIST_HEAD(&policy->policy_list);
1110 init_rwsem(&policy->rwsem);
1111 spin_lock_init(&policy->transition_lock);
1112 init_waitqueue_head(&policy->transition_wait);
1113 init_completion(&policy->kobj_unregister);
1114 INIT_WORK(&policy->update, handle_update);
1120 free_cpumask_var(policy->real_cpus);
1122 free_cpumask_var(policy->related_cpus);
1124 free_cpumask_var(policy->cpus);
1131 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1133 struct kobject *kobj;
1134 struct completion *cmp;
1136 down_write(&policy->rwsem);
1137 cpufreq_stats_free_table(policy);
1138 kobj = &policy->kobj;
1139 cmp = &policy->kobj_unregister;
1140 up_write(&policy->rwsem);
1144 * We need to make sure that the underlying kobj is
1145 * actually not referenced anymore by anybody before we
1146 * proceed with unloading.
1148 pr_debug("waiting for dropping of refcount\n");
1149 wait_for_completion(cmp);
1150 pr_debug("wait complete\n");
1153 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1155 unsigned long flags;
1158 /* Remove policy from list */
1159 write_lock_irqsave(&cpufreq_driver_lock, flags);
1160 list_del(&policy->policy_list);
1162 for_each_cpu(cpu, policy->related_cpus)
1163 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1164 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1166 cpufreq_policy_put_kobj(policy);
1167 free_cpumask_var(policy->real_cpus);
1168 free_cpumask_var(policy->related_cpus);
1169 free_cpumask_var(policy->cpus);
1173 static int cpufreq_online(unsigned int cpu)
1175 struct cpufreq_policy *policy;
1177 unsigned long flags;
1181 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1183 /* Check if this CPU already has a policy to manage it */
1184 policy = per_cpu(cpufreq_cpu_data, cpu);
1186 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1187 if (!policy_is_inactive(policy))
1188 return cpufreq_add_policy_cpu(policy, cpu);
1190 /* This is the only online CPU for the policy. Start over. */
1192 down_write(&policy->rwsem);
1194 policy->governor = NULL;
1195 up_write(&policy->rwsem);
1198 policy = cpufreq_policy_alloc(cpu);
1203 cpumask_copy(policy->cpus, cpumask_of(cpu));
1205 /* call driver. From then on the cpufreq must be able
1206 * to accept all calls to ->verify and ->setpolicy for this CPU
1208 ret = cpufreq_driver->init(policy);
1210 pr_debug("initialization failed\n");
1211 goto out_free_policy;
1214 ret = cpufreq_table_validate_and_sort(policy);
1216 goto out_exit_policy;
1218 down_write(&policy->rwsem);
1221 /* related_cpus should at least include policy->cpus. */
1222 cpumask_copy(policy->related_cpus, policy->cpus);
1226 * affected cpus must always be the one, which are online. We aren't
1227 * managing offline cpus here.
1229 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1232 policy->user_policy.min = policy->min;
1233 policy->user_policy.max = policy->max;
1235 for_each_cpu(j, policy->related_cpus) {
1236 per_cpu(cpufreq_cpu_data, j) = policy;
1237 add_cpu_dev_symlink(policy, j);
1240 policy->min = policy->user_policy.min;
1241 policy->max = policy->user_policy.max;
1244 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1245 policy->cur = cpufreq_driver->get(policy->cpu);
1247 pr_err("%s: ->get() failed\n", __func__);
1248 goto out_destroy_policy;
1253 * Sometimes boot loaders set CPU frequency to a value outside of
1254 * frequency table present with cpufreq core. In such cases CPU might be
1255 * unstable if it has to run on that frequency for long duration of time
1256 * and so its better to set it to a frequency which is specified in
1257 * freq-table. This also makes cpufreq stats inconsistent as
1258 * cpufreq-stats would fail to register because current frequency of CPU
1259 * isn't found in freq-table.
1261 * Because we don't want this change to effect boot process badly, we go
1262 * for the next freq which is >= policy->cur ('cur' must be set by now,
1263 * otherwise we will end up setting freq to lowest of the table as 'cur'
1264 * is initialized to zero).
1266 * We are passing target-freq as "policy->cur - 1" otherwise
1267 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1268 * equal to target-freq.
1270 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1272 /* Are we running at unknown frequency ? */
1273 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1274 if (ret == -EINVAL) {
1275 /* Warn user and fix it */
1276 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1277 __func__, policy->cpu, policy->cur);
1278 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1279 CPUFREQ_RELATION_L);
1282 * Reaching here after boot in a few seconds may not
1283 * mean that system will remain stable at "unknown"
1284 * frequency for longer duration. Hence, a BUG_ON().
1287 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1288 __func__, policy->cpu, policy->cur);
1293 ret = cpufreq_add_dev_interface(policy);
1295 goto out_destroy_policy;
1297 cpufreq_stats_create_table(policy);
1299 write_lock_irqsave(&cpufreq_driver_lock, flags);
1300 list_add(&policy->policy_list, &cpufreq_policy_list);
1301 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1304 ret = cpufreq_init_policy(policy);
1306 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1307 __func__, cpu, ret);
1308 goto out_destroy_policy;
1311 up_write(&policy->rwsem);
1313 kobject_uevent(&policy->kobj, KOBJ_ADD);
1315 /* Callback for handling stuff after policy is ready */
1316 if (cpufreq_driver->ready)
1317 cpufreq_driver->ready(policy);
1319 pr_debug("initialization complete\n");
1324 for_each_cpu(j, policy->real_cpus)
1325 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1327 up_write(&policy->rwsem);
1330 if (cpufreq_driver->exit)
1331 cpufreq_driver->exit(policy);
1334 cpufreq_policy_free(policy);
1339 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1341 * @sif: Subsystem interface structure pointer (not used)
1343 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1345 struct cpufreq_policy *policy;
1346 unsigned cpu = dev->id;
1349 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1351 if (cpu_online(cpu)) {
1352 ret = cpufreq_online(cpu);
1357 /* Create sysfs link on CPU registration */
1358 policy = per_cpu(cpufreq_cpu_data, cpu);
1360 add_cpu_dev_symlink(policy, cpu);
1365 static int cpufreq_offline(unsigned int cpu)
1367 struct cpufreq_policy *policy;
1370 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1372 policy = cpufreq_cpu_get_raw(cpu);
1374 pr_debug("%s: No cpu_data found\n", __func__);
1378 down_write(&policy->rwsem);
1380 cpufreq_stop_governor(policy);
1382 cpumask_clear_cpu(cpu, policy->cpus);
1384 if (policy_is_inactive(policy)) {
1386 strncpy(policy->last_governor, policy->governor->name,
1389 policy->last_policy = policy->policy;
1390 } else if (cpu == policy->cpu) {
1391 /* Nominate new CPU */
1392 policy->cpu = cpumask_any(policy->cpus);
1395 /* Start governor again for active policy */
1396 if (!policy_is_inactive(policy)) {
1398 ret = cpufreq_start_governor(policy);
1400 pr_err("%s: Failed to start governor\n", __func__);
1406 if (cpufreq_driver->stop_cpu)
1407 cpufreq_driver->stop_cpu(policy);
1410 cpufreq_exit_governor(policy);
1413 * Perform the ->exit() even during light-weight tear-down,
1414 * since this is a core component, and is essential for the
1415 * subsequent light-weight ->init() to succeed.
1417 if (cpufreq_driver->exit) {
1418 cpufreq_driver->exit(policy);
1419 policy->freq_table = NULL;
1423 up_write(&policy->rwsem);
1428 * cpufreq_remove_dev - remove a CPU device
1430 * Removes the cpufreq interface for a CPU device.
1432 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1434 unsigned int cpu = dev->id;
1435 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1440 if (cpu_online(cpu))
1441 cpufreq_offline(cpu);
1443 cpumask_clear_cpu(cpu, policy->real_cpus);
1444 remove_cpu_dev_symlink(policy, dev);
1446 if (cpumask_empty(policy->real_cpus))
1447 cpufreq_policy_free(policy);
1451 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1453 * @policy: policy managing CPUs
1454 * @new_freq: CPU frequency the CPU actually runs at
1456 * We adjust to current frequency first, and need to clean up later.
1457 * So either call to cpufreq_update_policy() or schedule handle_update()).
1459 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1460 unsigned int new_freq)
1462 struct cpufreq_freqs freqs;
1464 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1465 policy->cur, new_freq);
1467 freqs.old = policy->cur;
1468 freqs.new = new_freq;
1470 cpufreq_freq_transition_begin(policy, &freqs);
1471 cpufreq_freq_transition_end(policy, &freqs, 0);
1475 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1478 * This is the last known freq, without actually getting it from the driver.
1479 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1481 unsigned int cpufreq_quick_get(unsigned int cpu)
1483 struct cpufreq_policy *policy;
1484 unsigned int ret_freq = 0;
1485 unsigned long flags;
1487 read_lock_irqsave(&cpufreq_driver_lock, flags);
1489 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1490 ret_freq = cpufreq_driver->get(cpu);
1491 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1495 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1497 policy = cpufreq_cpu_get(cpu);
1499 ret_freq = policy->cur;
1500 cpufreq_cpu_put(policy);
1505 EXPORT_SYMBOL(cpufreq_quick_get);
1508 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1511 * Just return the max possible frequency for a given CPU.
1513 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1515 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1516 unsigned int ret_freq = 0;
1519 ret_freq = policy->max;
1520 cpufreq_cpu_put(policy);
1525 EXPORT_SYMBOL(cpufreq_quick_get_max);
1527 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1529 unsigned int ret_freq = 0;
1531 if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
1534 ret_freq = cpufreq_driver->get(policy->cpu);
1537 * If fast frequency switching is used with the given policy, the check
1538 * against policy->cur is pointless, so skip it in that case too.
1540 if (policy->fast_switch_enabled)
1543 if (ret_freq && policy->cur &&
1544 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1545 /* verify no discrepancy between actual and
1546 saved value exists */
1547 if (unlikely(ret_freq != policy->cur)) {
1548 cpufreq_out_of_sync(policy, ret_freq);
1549 schedule_work(&policy->update);
1557 * cpufreq_get - get the current CPU frequency (in kHz)
1560 * Get the CPU current (static) CPU frequency
1562 unsigned int cpufreq_get(unsigned int cpu)
1564 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1565 unsigned int ret_freq = 0;
1568 down_read(&policy->rwsem);
1569 ret_freq = __cpufreq_get(policy);
1570 up_read(&policy->rwsem);
1572 cpufreq_cpu_put(policy);
1577 EXPORT_SYMBOL(cpufreq_get);
1579 static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
1581 unsigned int new_freq;
1583 new_freq = cpufreq_driver->get(policy->cpu);
1588 pr_debug("cpufreq: Driver did not initialize current freq\n");
1589 policy->cur = new_freq;
1590 } else if (policy->cur != new_freq && has_target()) {
1591 cpufreq_out_of_sync(policy, new_freq);
1597 static struct subsys_interface cpufreq_interface = {
1599 .subsys = &cpu_subsys,
1600 .add_dev = cpufreq_add_dev,
1601 .remove_dev = cpufreq_remove_dev,
1605 * In case platform wants some specific frequency to be configured
1608 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1612 if (!policy->suspend_freq) {
1613 pr_debug("%s: suspend_freq not defined\n", __func__);
1617 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1618 policy->suspend_freq);
1620 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1621 CPUFREQ_RELATION_H);
1623 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1624 __func__, policy->suspend_freq, ret);
1628 EXPORT_SYMBOL(cpufreq_generic_suspend);
1631 * cpufreq_suspend() - Suspend CPUFreq governors
1633 * Called during system wide Suspend/Hibernate cycles for suspending governors
1634 * as some platforms can't change frequency after this point in suspend cycle.
1635 * Because some of the devices (like: i2c, regulators, etc) they use for
1636 * changing frequency are suspended quickly after this point.
1638 void cpufreq_suspend(void)
1640 struct cpufreq_policy *policy;
1642 if (!cpufreq_driver)
1645 if (!has_target() && !cpufreq_driver->suspend)
1648 pr_debug("%s: Suspending Governors\n", __func__);
1650 for_each_active_policy(policy) {
1652 down_write(&policy->rwsem);
1653 cpufreq_stop_governor(policy);
1654 up_write(&policy->rwsem);
1657 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1658 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1663 cpufreq_suspended = true;
1667 * cpufreq_resume() - Resume CPUFreq governors
1669 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1670 * are suspended with cpufreq_suspend().
1672 void cpufreq_resume(void)
1674 struct cpufreq_policy *policy;
1677 if (!cpufreq_driver)
1680 if (unlikely(!cpufreq_suspended))
1683 cpufreq_suspended = false;
1685 if (!has_target() && !cpufreq_driver->resume)
1688 pr_debug("%s: Resuming Governors\n", __func__);
1690 for_each_active_policy(policy) {
1691 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1692 pr_err("%s: Failed to resume driver: %p\n", __func__,
1694 } else if (has_target()) {
1695 down_write(&policy->rwsem);
1696 ret = cpufreq_start_governor(policy);
1697 up_write(&policy->rwsem);
1700 pr_err("%s: Failed to start governor for policy: %p\n",
1707 * cpufreq_get_current_driver - return current driver's name
1709 * Return the name string of the currently loaded cpufreq driver
1712 const char *cpufreq_get_current_driver(void)
1715 return cpufreq_driver->name;
1719 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1722 * cpufreq_get_driver_data - return current driver data
1724 * Return the private data of the currently loaded cpufreq
1725 * driver, or NULL if no cpufreq driver is loaded.
1727 void *cpufreq_get_driver_data(void)
1730 return cpufreq_driver->driver_data;
1734 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1736 /*********************************************************************
1737 * NOTIFIER LISTS INTERFACE *
1738 *********************************************************************/
1741 * cpufreq_register_notifier - register a driver with cpufreq
1742 * @nb: notifier function to register
1743 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1745 * Add a driver to one of two lists: either a list of drivers that
1746 * are notified about clock rate changes (once before and once after
1747 * the transition), or a list of drivers that are notified about
1748 * changes in cpufreq policy.
1750 * This function may sleep, and has the same return conditions as
1751 * blocking_notifier_chain_register.
1753 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1757 if (cpufreq_disabled())
1761 case CPUFREQ_TRANSITION_NOTIFIER:
1762 mutex_lock(&cpufreq_fast_switch_lock);
1764 if (cpufreq_fast_switch_count > 0) {
1765 mutex_unlock(&cpufreq_fast_switch_lock);
1768 ret = srcu_notifier_chain_register(
1769 &cpufreq_transition_notifier_list, nb);
1771 cpufreq_fast_switch_count--;
1773 mutex_unlock(&cpufreq_fast_switch_lock);
1775 case CPUFREQ_POLICY_NOTIFIER:
1776 ret = blocking_notifier_chain_register(
1777 &cpufreq_policy_notifier_list, nb);
1785 EXPORT_SYMBOL(cpufreq_register_notifier);
1788 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1789 * @nb: notifier block to be unregistered
1790 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1792 * Remove a driver from the CPU frequency notifier list.
1794 * This function may sleep, and has the same return conditions as
1795 * blocking_notifier_chain_unregister.
1797 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1801 if (cpufreq_disabled())
1805 case CPUFREQ_TRANSITION_NOTIFIER:
1806 mutex_lock(&cpufreq_fast_switch_lock);
1808 ret = srcu_notifier_chain_unregister(
1809 &cpufreq_transition_notifier_list, nb);
1810 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
1811 cpufreq_fast_switch_count++;
1813 mutex_unlock(&cpufreq_fast_switch_lock);
1815 case CPUFREQ_POLICY_NOTIFIER:
1816 ret = blocking_notifier_chain_unregister(
1817 &cpufreq_policy_notifier_list, nb);
1825 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1828 /*********************************************************************
1830 *********************************************************************/
1833 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
1834 * @policy: cpufreq policy to switch the frequency for.
1835 * @target_freq: New frequency to set (may be approximate).
1837 * Carry out a fast frequency switch without sleeping.
1839 * The driver's ->fast_switch() callback invoked by this function must be
1840 * suitable for being called from within RCU-sched read-side critical sections
1841 * and it is expected to select the minimum available frequency greater than or
1842 * equal to @target_freq (CPUFREQ_RELATION_L).
1844 * This function must not be called if policy->fast_switch_enabled is unset.
1846 * Governors calling this function must guarantee that it will never be invoked
1847 * twice in parallel for the same policy and that it will never be called in
1848 * parallel with either ->target() or ->target_index() for the same policy.
1850 * Returns the actual frequency set for the CPU.
1852 * If 0 is returned by the driver's ->fast_switch() callback to indicate an
1853 * error condition, the hardware configuration must be preserved.
1855 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
1856 unsigned int target_freq)
1858 target_freq = clamp_val(target_freq, policy->min, policy->max);
1860 return cpufreq_driver->fast_switch(policy, target_freq);
1862 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
1864 /* Must set freqs->new to intermediate frequency */
1865 static int __target_intermediate(struct cpufreq_policy *policy,
1866 struct cpufreq_freqs *freqs, int index)
1870 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1872 /* We don't need to switch to intermediate freq */
1876 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1877 __func__, policy->cpu, freqs->old, freqs->new);
1879 cpufreq_freq_transition_begin(policy, freqs);
1880 ret = cpufreq_driver->target_intermediate(policy, index);
1881 cpufreq_freq_transition_end(policy, freqs, ret);
1884 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1890 static int __target_index(struct cpufreq_policy *policy, int index)
1892 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1893 unsigned int intermediate_freq = 0;
1894 unsigned int newfreq = policy->freq_table[index].frequency;
1895 int retval = -EINVAL;
1898 if (newfreq == policy->cur)
1901 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1903 /* Handle switching to intermediate frequency */
1904 if (cpufreq_driver->get_intermediate) {
1905 retval = __target_intermediate(policy, &freqs, index);
1909 intermediate_freq = freqs.new;
1910 /* Set old freq to intermediate */
1911 if (intermediate_freq)
1912 freqs.old = freqs.new;
1915 freqs.new = newfreq;
1916 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1917 __func__, policy->cpu, freqs.old, freqs.new);
1919 cpufreq_freq_transition_begin(policy, &freqs);
1922 retval = cpufreq_driver->target_index(policy, index);
1924 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1928 cpufreq_freq_transition_end(policy, &freqs, retval);
1931 * Failed after setting to intermediate freq? Driver should have
1932 * reverted back to initial frequency and so should we. Check
1933 * here for intermediate_freq instead of get_intermediate, in
1934 * case we haven't switched to intermediate freq at all.
1936 if (unlikely(retval && intermediate_freq)) {
1937 freqs.old = intermediate_freq;
1938 freqs.new = policy->restore_freq;
1939 cpufreq_freq_transition_begin(policy, &freqs);
1940 cpufreq_freq_transition_end(policy, &freqs, 0);
1947 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1948 unsigned int target_freq,
1949 unsigned int relation)
1951 unsigned int old_target_freq = target_freq;
1954 if (cpufreq_disabled())
1957 /* Make sure that target_freq is within supported range */
1958 target_freq = clamp_val(target_freq, policy->min, policy->max);
1960 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1961 policy->cpu, target_freq, relation, old_target_freq);
1964 * This might look like a redundant call as we are checking it again
1965 * after finding index. But it is left intentionally for cases where
1966 * exactly same freq is called again and so we can save on few function
1969 if (target_freq == policy->cur)
1972 /* Save last value to restore later on errors */
1973 policy->restore_freq = policy->cur;
1975 if (cpufreq_driver->target)
1976 return cpufreq_driver->target(policy, target_freq, relation);
1978 if (!cpufreq_driver->target_index)
1981 index = cpufreq_frequency_table_target(policy, target_freq, relation);
1983 return __target_index(policy, index);
1985 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1987 int cpufreq_driver_target(struct cpufreq_policy *policy,
1988 unsigned int target_freq,
1989 unsigned int relation)
1993 down_write(&policy->rwsem);
1995 ret = __cpufreq_driver_target(policy, target_freq, relation);
1997 up_write(&policy->rwsem);
2001 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2003 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2008 static int cpufreq_init_governor(struct cpufreq_policy *policy)
2012 /* Don't start any governor operations if we are entering suspend */
2013 if (cpufreq_suspended)
2016 * Governor might not be initiated here if ACPI _PPC changed
2017 * notification happened, so check it.
2019 if (!policy->governor)
2022 /* Platform doesn't want dynamic frequency switching ? */
2023 if (policy->governor->dynamic_switching &&
2024 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2025 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2028 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2029 policy->governor->name, gov->name);
2030 policy->governor = gov;
2036 if (!try_module_get(policy->governor->owner))
2039 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2041 if (policy->governor->init) {
2042 ret = policy->governor->init(policy);
2044 module_put(policy->governor->owner);
2052 static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2054 if (cpufreq_suspended || !policy->governor)
2057 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2059 if (policy->governor->exit)
2060 policy->governor->exit(policy);
2062 module_put(policy->governor->owner);
2065 static int cpufreq_start_governor(struct cpufreq_policy *policy)
2069 if (cpufreq_suspended)
2072 if (!policy->governor)
2075 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2077 if (cpufreq_driver->get && !cpufreq_driver->setpolicy)
2078 cpufreq_update_current_freq(policy);
2080 if (policy->governor->start) {
2081 ret = policy->governor->start(policy);
2086 if (policy->governor->limits)
2087 policy->governor->limits(policy);
2092 static void cpufreq_stop_governor(struct cpufreq_policy *policy)
2094 if (cpufreq_suspended || !policy->governor)
2097 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2099 if (policy->governor->stop)
2100 policy->governor->stop(policy);
2103 static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2105 if (cpufreq_suspended || !policy->governor)
2108 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2110 if (policy->governor->limits)
2111 policy->governor->limits(policy);
2114 int cpufreq_register_governor(struct cpufreq_governor *governor)
2121 if (cpufreq_disabled())
2124 mutex_lock(&cpufreq_governor_mutex);
2127 if (!find_governor(governor->name)) {
2129 list_add(&governor->governor_list, &cpufreq_governor_list);
2132 mutex_unlock(&cpufreq_governor_mutex);
2135 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2137 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2139 struct cpufreq_policy *policy;
2140 unsigned long flags;
2145 if (cpufreq_disabled())
2148 /* clear last_governor for all inactive policies */
2149 read_lock_irqsave(&cpufreq_driver_lock, flags);
2150 for_each_inactive_policy(policy) {
2151 if (!strcmp(policy->last_governor, governor->name)) {
2152 policy->governor = NULL;
2153 strcpy(policy->last_governor, "\0");
2156 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2158 mutex_lock(&cpufreq_governor_mutex);
2159 list_del(&governor->governor_list);
2160 mutex_unlock(&cpufreq_governor_mutex);
2162 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2165 /*********************************************************************
2166 * POLICY INTERFACE *
2167 *********************************************************************/
2170 * cpufreq_get_policy - get the current cpufreq_policy
2171 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2174 * Reads the current cpufreq policy.
2176 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2178 struct cpufreq_policy *cpu_policy;
2182 cpu_policy = cpufreq_cpu_get(cpu);
2186 memcpy(policy, cpu_policy, sizeof(*policy));
2188 cpufreq_cpu_put(cpu_policy);
2191 EXPORT_SYMBOL(cpufreq_get_policy);
2194 * policy : current policy.
2195 * new_policy: policy to be set.
2197 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2198 struct cpufreq_policy *new_policy)
2200 struct cpufreq_governor *old_gov;
2203 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2204 new_policy->cpu, new_policy->min, new_policy->max);
2206 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2209 * This check works well when we store new min/max freq attributes,
2210 * because new_policy is a copy of policy with one field updated.
2212 if (new_policy->min > new_policy->max)
2215 /* verify the cpu speed can be set within this limit */
2216 ret = cpufreq_driver->verify(new_policy);
2220 /* adjust if necessary - all reasons */
2221 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2222 CPUFREQ_ADJUST, new_policy);
2225 * verify the cpu speed can be set within this limit, which might be
2226 * different to the first one
2228 ret = cpufreq_driver->verify(new_policy);
2232 /* notification of the new policy */
2233 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2234 CPUFREQ_NOTIFY, new_policy);
2236 policy->min = new_policy->min;
2237 policy->max = new_policy->max;
2238 trace_cpu_frequency_limits(policy);
2240 policy->cached_target_freq = UINT_MAX;
2242 pr_debug("new min and max freqs are %u - %u kHz\n",
2243 policy->min, policy->max);
2245 if (cpufreq_driver->setpolicy) {
2246 policy->policy = new_policy->policy;
2247 pr_debug("setting range\n");
2248 return cpufreq_driver->setpolicy(new_policy);
2251 if (new_policy->governor == policy->governor) {
2252 pr_debug("cpufreq: governor limits update\n");
2253 cpufreq_governor_limits(policy);
2257 pr_debug("governor switch\n");
2259 /* save old, working values */
2260 old_gov = policy->governor;
2261 /* end old governor */
2263 cpufreq_stop_governor(policy);
2264 cpufreq_exit_governor(policy);
2267 /* start new governor */
2268 policy->governor = new_policy->governor;
2269 ret = cpufreq_init_governor(policy);
2271 ret = cpufreq_start_governor(policy);
2273 pr_debug("cpufreq: governor change\n");
2274 sched_cpufreq_governor_change(policy, old_gov);
2277 cpufreq_exit_governor(policy);
2280 /* new governor failed, so re-start old one */
2281 pr_debug("starting governor %s failed\n", policy->governor->name);
2283 policy->governor = old_gov;
2284 if (cpufreq_init_governor(policy))
2285 policy->governor = NULL;
2287 cpufreq_start_governor(policy);
2294 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2295 * @cpu: CPU which shall be re-evaluated
2297 * Useful for policy notifiers which have different necessities
2298 * at different times.
2300 void cpufreq_update_policy(unsigned int cpu)
2302 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2303 struct cpufreq_policy new_policy;
2308 down_write(&policy->rwsem);
2310 if (policy_is_inactive(policy))
2313 pr_debug("updating policy for CPU %u\n", cpu);
2314 memcpy(&new_policy, policy, sizeof(*policy));
2315 new_policy.min = policy->user_policy.min;
2316 new_policy.max = policy->user_policy.max;
2319 * BIOS might change freq behind our back
2320 * -> ask driver for current freq and notify governors about a change
2322 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2323 if (cpufreq_suspended)
2326 new_policy.cur = cpufreq_update_current_freq(policy);
2327 if (WARN_ON(!new_policy.cur))
2331 cpufreq_set_policy(policy, &new_policy);
2334 up_write(&policy->rwsem);
2336 cpufreq_cpu_put(policy);
2338 EXPORT_SYMBOL(cpufreq_update_policy);
2340 /*********************************************************************
2342 *********************************************************************/
2343 static int cpufreq_boost_set_sw(int state)
2345 struct cpufreq_policy *policy;
2348 for_each_active_policy(policy) {
2349 if (!policy->freq_table)
2352 ret = cpufreq_frequency_table_cpuinfo(policy,
2353 policy->freq_table);
2355 pr_err("%s: Policy frequency update failed\n",
2360 down_write(&policy->rwsem);
2361 policy->user_policy.max = policy->max;
2362 cpufreq_governor_limits(policy);
2363 up_write(&policy->rwsem);
2369 int cpufreq_boost_trigger_state(int state)
2371 unsigned long flags;
2374 if (cpufreq_driver->boost_enabled == state)
2377 write_lock_irqsave(&cpufreq_driver_lock, flags);
2378 cpufreq_driver->boost_enabled = state;
2379 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2381 ret = cpufreq_driver->set_boost(state);
2383 write_lock_irqsave(&cpufreq_driver_lock, flags);
2384 cpufreq_driver->boost_enabled = !state;
2385 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2387 pr_err("%s: Cannot %s BOOST\n",
2388 __func__, state ? "enable" : "disable");
2394 static bool cpufreq_boost_supported(void)
2396 return likely(cpufreq_driver) && cpufreq_driver->set_boost;
2399 static int create_boost_sysfs_file(void)
2403 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2405 pr_err("%s: cannot register global BOOST sysfs file\n",
2411 static void remove_boost_sysfs_file(void)
2413 if (cpufreq_boost_supported())
2414 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2417 int cpufreq_enable_boost_support(void)
2419 if (!cpufreq_driver)
2422 if (cpufreq_boost_supported())
2425 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2427 /* This will get removed on driver unregister */
2428 return create_boost_sysfs_file();
2430 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2432 int cpufreq_boost_enabled(void)
2434 return cpufreq_driver->boost_enabled;
2436 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2438 /*********************************************************************
2439 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2440 *********************************************************************/
2441 static enum cpuhp_state hp_online;
2443 static int cpuhp_cpufreq_online(unsigned int cpu)
2445 cpufreq_online(cpu);
2450 static int cpuhp_cpufreq_offline(unsigned int cpu)
2452 cpufreq_offline(cpu);
2458 * cpufreq_register_driver - register a CPU Frequency driver
2459 * @driver_data: A struct cpufreq_driver containing the values#
2460 * submitted by the CPU Frequency driver.
2462 * Registers a CPU Frequency driver to this core code. This code
2463 * returns zero on success, -EEXIST when another driver got here first
2464 * (and isn't unregistered in the meantime).
2467 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2469 unsigned long flags;
2472 if (cpufreq_disabled())
2475 if (!driver_data || !driver_data->verify || !driver_data->init ||
2476 !(driver_data->setpolicy || driver_data->target_index ||
2477 driver_data->target) ||
2478 (driver_data->setpolicy && (driver_data->target_index ||
2479 driver_data->target)) ||
2480 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2483 pr_debug("trying to register driver %s\n", driver_data->name);
2485 /* Protect against concurrent CPU online/offline. */
2488 write_lock_irqsave(&cpufreq_driver_lock, flags);
2489 if (cpufreq_driver) {
2490 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2494 cpufreq_driver = driver_data;
2495 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2497 if (driver_data->setpolicy)
2498 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2500 if (cpufreq_boost_supported()) {
2501 ret = create_boost_sysfs_file();
2503 goto err_null_driver;
2506 ret = subsys_interface_register(&cpufreq_interface);
2508 goto err_boost_unreg;
2510 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2511 list_empty(&cpufreq_policy_list)) {
2512 /* if all ->init() calls failed, unregister */
2514 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2519 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2521 cpuhp_cpufreq_online,
2522 cpuhp_cpufreq_offline);
2528 pr_debug("driver %s up and running\n", driver_data->name);
2532 subsys_interface_unregister(&cpufreq_interface);
2534 remove_boost_sysfs_file();
2536 write_lock_irqsave(&cpufreq_driver_lock, flags);
2537 cpufreq_driver = NULL;
2538 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2543 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2546 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2548 * Unregister the current CPUFreq driver. Only call this if you have
2549 * the right to do so, i.e. if you have succeeded in initialising before!
2550 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2551 * currently not initialised.
2553 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2555 unsigned long flags;
2557 if (!cpufreq_driver || (driver != cpufreq_driver))
2560 pr_debug("unregistering driver %s\n", driver->name);
2562 /* Protect against concurrent cpu hotplug */
2564 subsys_interface_unregister(&cpufreq_interface);
2565 remove_boost_sysfs_file();
2566 cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2568 write_lock_irqsave(&cpufreq_driver_lock, flags);
2570 cpufreq_driver = NULL;
2572 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2577 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2580 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2581 * or mutexes when secondary CPUs are halted.
2583 static struct syscore_ops cpufreq_syscore_ops = {
2584 .shutdown = cpufreq_suspend,
2587 struct kobject *cpufreq_global_kobject;
2588 EXPORT_SYMBOL(cpufreq_global_kobject);
2590 static int __init cpufreq_core_init(void)
2592 if (cpufreq_disabled())
2595 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2596 BUG_ON(!cpufreq_global_kobject);
2598 register_syscore_ops(&cpufreq_syscore_ops);
2602 module_param(off, int, 0444);
2603 core_initcall(cpufreq_core_init);