2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/cpu_cooling.h>
23 #include <linux/delay.h>
24 #include <linux/device.h>
25 #include <linux/init.h>
26 #include <linux/kernel_stat.h>
27 #include <linux/module.h>
28 #include <linux/mutex.h>
29 #include <linux/slab.h>
30 #include <linux/suspend.h>
31 #include <linux/syscore_ops.h>
32 #include <linux/tick.h>
33 #include <trace/events/power.h>
35 static LIST_HEAD(cpufreq_policy_list);
37 /* Macros to iterate over CPU policies */
38 #define for_each_suitable_policy(__policy, __active) \
39 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
40 if ((__active) == !policy_is_inactive(__policy))
42 #define for_each_active_policy(__policy) \
43 for_each_suitable_policy(__policy, true)
44 #define for_each_inactive_policy(__policy) \
45 for_each_suitable_policy(__policy, false)
47 #define for_each_policy(__policy) \
48 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
50 /* Iterate over governors */
51 static LIST_HEAD(cpufreq_governor_list);
52 #define for_each_governor(__governor) \
53 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
56 * The "cpufreq driver" - the arch- or hardware-dependent low
57 * level driver of CPUFreq support, and its spinlock. This lock
58 * also protects the cpufreq_cpu_data array.
60 static struct cpufreq_driver *cpufreq_driver;
61 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
62 static DEFINE_RWLOCK(cpufreq_driver_lock);
64 /* Flag to suspend/resume CPUFreq governors */
65 static bool cpufreq_suspended;
67 static inline bool has_target(void)
69 return cpufreq_driver->target_index || cpufreq_driver->target;
72 /* internal prototypes */
73 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
74 static int cpufreq_init_governor(struct cpufreq_policy *policy);
75 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
76 static int cpufreq_start_governor(struct cpufreq_policy *policy);
77 static void cpufreq_stop_governor(struct cpufreq_policy *policy);
78 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
81 * Two notifier lists: the "policy" list is involved in the
82 * validation process for a new CPU frequency policy; the
83 * "transition" list for kernel code that needs to handle
84 * changes to devices when the CPU clock speed changes.
85 * The mutex locks both lists.
87 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
88 SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
90 static int off __read_mostly;
91 static int cpufreq_disabled(void)
95 void disable_cpufreq(void)
99 static DEFINE_MUTEX(cpufreq_governor_mutex);
101 bool have_governor_per_policy(void)
103 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
105 EXPORT_SYMBOL_GPL(have_governor_per_policy);
107 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
109 if (have_governor_per_policy())
110 return &policy->kobj;
112 return cpufreq_global_kobject;
114 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
116 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
122 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
124 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
127 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
128 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
129 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
131 idle_time = cur_wall_time - busy_time;
133 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
135 return div_u64(idle_time, NSEC_PER_USEC);
138 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
140 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
142 if (idle_time == -1ULL)
143 return get_cpu_idle_time_jiffy(cpu, wall);
145 idle_time += get_cpu_iowait_time_us(cpu, wall);
149 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
151 __weak void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
152 unsigned long max_freq)
155 EXPORT_SYMBOL_GPL(arch_set_freq_scale);
158 * This is a generic cpufreq init() routine which can be used by cpufreq
159 * drivers of SMP systems. It will do following:
160 * - validate & show freq table passed
161 * - set policies transition latency
162 * - policy->cpus with all possible CPUs
164 int cpufreq_generic_init(struct cpufreq_policy *policy,
165 struct cpufreq_frequency_table *table,
166 unsigned int transition_latency)
168 policy->freq_table = table;
169 policy->cpuinfo.transition_latency = transition_latency;
172 * The driver only supports the SMP configuration where all processors
173 * share the clock and voltage and clock.
175 cpumask_setall(policy->cpus);
179 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
181 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
183 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
185 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
187 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
189 unsigned int cpufreq_generic_get(unsigned int cpu)
191 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
193 if (!policy || IS_ERR(policy->clk)) {
194 pr_err("%s: No %s associated to cpu: %d\n",
195 __func__, policy ? "clk" : "policy", cpu);
199 return clk_get_rate(policy->clk) / 1000;
201 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
204 * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
205 * @cpu: CPU to find the policy for.
207 * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
208 * the kobject reference counter of that policy. Return a valid policy on
209 * success or NULL on failure.
211 * The policy returned by this function has to be released with the help of
212 * cpufreq_cpu_put() to balance its kobject reference counter properly.
214 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
216 struct cpufreq_policy *policy = NULL;
219 if (WARN_ON(cpu >= nr_cpu_ids))
222 /* get the cpufreq driver */
223 read_lock_irqsave(&cpufreq_driver_lock, flags);
225 if (cpufreq_driver) {
227 policy = cpufreq_cpu_get_raw(cpu);
229 kobject_get(&policy->kobj);
232 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
236 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
239 * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
240 * @policy: cpufreq policy returned by cpufreq_cpu_get().
242 void cpufreq_cpu_put(struct cpufreq_policy *policy)
244 kobject_put(&policy->kobj);
246 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
249 * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
250 * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
252 void cpufreq_cpu_release(struct cpufreq_policy *policy)
254 if (WARN_ON(!policy))
257 lockdep_assert_held(&policy->rwsem);
259 up_write(&policy->rwsem);
261 cpufreq_cpu_put(policy);
265 * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
266 * @cpu: CPU to find the policy for.
268 * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
269 * if the policy returned by it is not NULL, acquire its rwsem for writing.
270 * Return the policy if it is active or release it and return NULL otherwise.
272 * The policy returned by this function has to be released with the help of
273 * cpufreq_cpu_release() in order to release its rwsem and balance its usage
276 struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
278 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
283 down_write(&policy->rwsem);
285 if (policy_is_inactive(policy)) {
286 cpufreq_cpu_release(policy);
293 /*********************************************************************
294 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
295 *********************************************************************/
298 * adjust_jiffies - adjust the system "loops_per_jiffy"
300 * This function alters the system "loops_per_jiffy" for the clock
301 * speed change. Note that loops_per_jiffy cannot be updated on SMP
302 * systems as each CPU might be scaled differently. So, use the arch
303 * per-CPU loops_per_jiffy value wherever possible.
305 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
308 static unsigned long l_p_j_ref;
309 static unsigned int l_p_j_ref_freq;
311 if (ci->flags & CPUFREQ_CONST_LOOPS)
314 if (!l_p_j_ref_freq) {
315 l_p_j_ref = loops_per_jiffy;
316 l_p_j_ref_freq = ci->old;
317 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
318 l_p_j_ref, l_p_j_ref_freq);
320 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
321 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
323 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
324 loops_per_jiffy, ci->new);
330 * cpufreq_notify_transition - Notify frequency transition and adjust_jiffies.
331 * @policy: cpufreq policy to enable fast frequency switching for.
332 * @freqs: contain details of the frequency update.
333 * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
335 * This function calls the transition notifiers and the "adjust_jiffies"
336 * function. It is called twice on all CPU frequency changes that have
339 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
340 struct cpufreq_freqs *freqs,
345 BUG_ON(irqs_disabled());
347 if (cpufreq_disabled())
350 freqs->policy = policy;
351 freqs->flags = cpufreq_driver->flags;
352 pr_debug("notification %u of frequency transition to %u kHz\n",
356 case CPUFREQ_PRECHANGE:
358 * Detect if the driver reported a value as "old frequency"
359 * which is not equal to what the cpufreq core thinks is
362 if (policy->cur && policy->cur != freqs->old) {
363 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
364 freqs->old, policy->cur);
365 freqs->old = policy->cur;
368 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
369 CPUFREQ_PRECHANGE, freqs);
371 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
374 case CPUFREQ_POSTCHANGE:
375 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
376 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
377 cpumask_pr_args(policy->cpus));
379 for_each_cpu(cpu, policy->cpus)
380 trace_cpu_frequency(freqs->new, cpu);
382 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
383 CPUFREQ_POSTCHANGE, freqs);
385 cpufreq_stats_record_transition(policy, freqs->new);
386 policy->cur = freqs->new;
390 /* Do post notifications when there are chances that transition has failed */
391 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
392 struct cpufreq_freqs *freqs, int transition_failed)
394 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
395 if (!transition_failed)
398 swap(freqs->old, freqs->new);
399 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
400 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
403 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
404 struct cpufreq_freqs *freqs)
408 * Catch double invocations of _begin() which lead to self-deadlock.
409 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
410 * doesn't invoke _begin() on their behalf, and hence the chances of
411 * double invocations are very low. Moreover, there are scenarios
412 * where these checks can emit false-positive warnings in these
413 * drivers; so we avoid that by skipping them altogether.
415 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
416 && current == policy->transition_task);
419 wait_event(policy->transition_wait, !policy->transition_ongoing);
421 spin_lock(&policy->transition_lock);
423 if (unlikely(policy->transition_ongoing)) {
424 spin_unlock(&policy->transition_lock);
428 policy->transition_ongoing = true;
429 policy->transition_task = current;
431 spin_unlock(&policy->transition_lock);
433 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
435 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
437 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
438 struct cpufreq_freqs *freqs, int transition_failed)
440 if (WARN_ON(!policy->transition_ongoing))
443 cpufreq_notify_post_transition(policy, freqs, transition_failed);
445 policy->transition_ongoing = false;
446 policy->transition_task = NULL;
448 wake_up(&policy->transition_wait);
450 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
453 * Fast frequency switching status count. Positive means "enabled", negative
454 * means "disabled" and 0 means "not decided yet".
456 static int cpufreq_fast_switch_count;
457 static DEFINE_MUTEX(cpufreq_fast_switch_lock);
459 static void cpufreq_list_transition_notifiers(void)
461 struct notifier_block *nb;
463 pr_info("Registered transition notifiers:\n");
465 mutex_lock(&cpufreq_transition_notifier_list.mutex);
467 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
468 pr_info("%pS\n", nb->notifier_call);
470 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
474 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
475 * @policy: cpufreq policy to enable fast frequency switching for.
477 * Try to enable fast frequency switching for @policy.
479 * The attempt will fail if there is at least one transition notifier registered
480 * at this point, as fast frequency switching is quite fundamentally at odds
481 * with transition notifiers. Thus if successful, it will make registration of
482 * transition notifiers fail going forward.
484 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
486 lockdep_assert_held(&policy->rwsem);
488 if (!policy->fast_switch_possible)
491 mutex_lock(&cpufreq_fast_switch_lock);
492 if (cpufreq_fast_switch_count >= 0) {
493 cpufreq_fast_switch_count++;
494 policy->fast_switch_enabled = true;
496 pr_warn("CPU%u: Fast frequency switching not enabled\n",
498 cpufreq_list_transition_notifiers();
500 mutex_unlock(&cpufreq_fast_switch_lock);
502 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
505 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
506 * @policy: cpufreq policy to disable fast frequency switching for.
508 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
510 mutex_lock(&cpufreq_fast_switch_lock);
511 if (policy->fast_switch_enabled) {
512 policy->fast_switch_enabled = false;
513 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
514 cpufreq_fast_switch_count--;
516 mutex_unlock(&cpufreq_fast_switch_lock);
518 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
521 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
523 * @target_freq: target frequency to resolve.
525 * The target to driver frequency mapping is cached in the policy.
527 * Return: Lowest driver-supported frequency greater than or equal to the
528 * given target_freq, subject to policy (min/max) and driver limitations.
530 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
531 unsigned int target_freq)
533 target_freq = clamp_val(target_freq, policy->min, policy->max);
534 policy->cached_target_freq = target_freq;
536 if (cpufreq_driver->target_index) {
539 idx = cpufreq_frequency_table_target(policy, target_freq,
541 policy->cached_resolved_idx = idx;
542 return policy->freq_table[idx].frequency;
545 if (cpufreq_driver->resolve_freq)
546 return cpufreq_driver->resolve_freq(policy, target_freq);
550 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
552 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
554 unsigned int latency;
556 if (policy->transition_delay_us)
557 return policy->transition_delay_us;
559 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
562 * For platforms that can change the frequency very fast (< 10
563 * us), the above formula gives a decent transition delay. But
564 * for platforms where transition_latency is in milliseconds, it
565 * ends up giving unrealistic values.
567 * Cap the default transition delay to 10 ms, which seems to be
568 * a reasonable amount of time after which we should reevaluate
571 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
574 return LATENCY_MULTIPLIER;
576 EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
578 /*********************************************************************
580 *********************************************************************/
581 static ssize_t show_boost(struct kobject *kobj,
582 struct kobj_attribute *attr, char *buf)
584 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
587 static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
588 const char *buf, size_t count)
592 ret = sscanf(buf, "%d", &enable);
593 if (ret != 1 || enable < 0 || enable > 1)
596 if (cpufreq_boost_trigger_state(enable)) {
597 pr_err("%s: Cannot %s BOOST!\n",
598 __func__, enable ? "enable" : "disable");
602 pr_debug("%s: cpufreq BOOST %s\n",
603 __func__, enable ? "enabled" : "disabled");
607 define_one_global_rw(boost);
609 static struct cpufreq_governor *find_governor(const char *str_governor)
611 struct cpufreq_governor *t;
614 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
620 static int cpufreq_parse_policy(char *str_governor,
621 struct cpufreq_policy *policy)
623 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
624 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
627 if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
628 policy->policy = CPUFREQ_POLICY_POWERSAVE;
635 * cpufreq_parse_governor - parse a governor string only for has_target()
637 static int cpufreq_parse_governor(char *str_governor,
638 struct cpufreq_policy *policy)
640 struct cpufreq_governor *t;
642 mutex_lock(&cpufreq_governor_mutex);
644 t = find_governor(str_governor);
648 mutex_unlock(&cpufreq_governor_mutex);
650 ret = request_module("cpufreq_%s", str_governor);
654 mutex_lock(&cpufreq_governor_mutex);
656 t = find_governor(str_governor);
658 if (t && !try_module_get(t->owner))
661 mutex_unlock(&cpufreq_governor_mutex);
664 policy->governor = t;
672 * cpufreq_per_cpu_attr_read() / show_##file_name() -
673 * print out cpufreq information
675 * Write out information from cpufreq_driver->policy[cpu]; object must be
679 #define show_one(file_name, object) \
680 static ssize_t show_##file_name \
681 (struct cpufreq_policy *policy, char *buf) \
683 return sprintf(buf, "%u\n", policy->object); \
686 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
687 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
688 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
689 show_one(scaling_min_freq, min);
690 show_one(scaling_max_freq, max);
692 __weak unsigned int arch_freq_get_on_cpu(int cpu)
697 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
702 freq = arch_freq_get_on_cpu(policy->cpu);
704 ret = sprintf(buf, "%u\n", freq);
705 else if (cpufreq_driver && cpufreq_driver->setpolicy &&
707 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
709 ret = sprintf(buf, "%u\n", policy->cur);
714 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
716 #define store_one(file_name, object) \
717 static ssize_t store_##file_name \
718 (struct cpufreq_policy *policy, const char *buf, size_t count) \
721 struct cpufreq_policy new_policy; \
723 memcpy(&new_policy, policy, sizeof(*policy)); \
724 new_policy.min = policy->user_policy.min; \
725 new_policy.max = policy->user_policy.max; \
727 ret = sscanf(buf, "%u", &new_policy.object); \
731 temp = new_policy.object; \
732 ret = cpufreq_set_policy(policy, &new_policy); \
734 policy->user_policy.object = temp; \
736 return ret ? ret : count; \
739 store_one(scaling_min_freq, min);
740 store_one(scaling_max_freq, max);
743 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
745 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
748 unsigned int cur_freq = __cpufreq_get(policy);
751 return sprintf(buf, "%u\n", cur_freq);
753 return sprintf(buf, "<unknown>\n");
757 * show_scaling_governor - show the current policy for the specified CPU
759 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
761 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
762 return sprintf(buf, "powersave\n");
763 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
764 return sprintf(buf, "performance\n");
765 else if (policy->governor)
766 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
767 policy->governor->name);
772 * store_scaling_governor - store policy for the specified CPU
774 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
775 const char *buf, size_t count)
778 char str_governor[16];
779 struct cpufreq_policy new_policy;
781 memcpy(&new_policy, policy, sizeof(*policy));
783 ret = sscanf(buf, "%15s", str_governor);
787 if (cpufreq_driver->setpolicy) {
788 if (cpufreq_parse_policy(str_governor, &new_policy))
791 if (cpufreq_parse_governor(str_governor, &new_policy))
795 ret = cpufreq_set_policy(policy, &new_policy);
797 if (new_policy.governor)
798 module_put(new_policy.governor->owner);
800 return ret ? ret : count;
804 * show_scaling_driver - show the cpufreq driver currently loaded
806 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
808 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
812 * show_scaling_available_governors - show the available CPUfreq governors
814 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
818 struct cpufreq_governor *t;
821 i += sprintf(buf, "performance powersave");
825 for_each_governor(t) {
826 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
827 - (CPUFREQ_NAME_LEN + 2)))
829 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
832 i += sprintf(&buf[i], "\n");
836 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
841 for_each_cpu(cpu, mask) {
843 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
844 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
845 if (i >= (PAGE_SIZE - 5))
848 i += sprintf(&buf[i], "\n");
851 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
854 * show_related_cpus - show the CPUs affected by each transition even if
855 * hw coordination is in use
857 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
859 return cpufreq_show_cpus(policy->related_cpus, buf);
863 * show_affected_cpus - show the CPUs affected by each transition
865 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
867 return cpufreq_show_cpus(policy->cpus, buf);
870 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
871 const char *buf, size_t count)
873 unsigned int freq = 0;
876 if (!policy->governor || !policy->governor->store_setspeed)
879 ret = sscanf(buf, "%u", &freq);
883 policy->governor->store_setspeed(policy, freq);
888 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
890 if (!policy->governor || !policy->governor->show_setspeed)
891 return sprintf(buf, "<unsupported>\n");
893 return policy->governor->show_setspeed(policy, buf);
897 * show_bios_limit - show the current cpufreq HW/BIOS limitation
899 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
903 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
905 return sprintf(buf, "%u\n", limit);
906 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
909 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
910 cpufreq_freq_attr_ro(cpuinfo_min_freq);
911 cpufreq_freq_attr_ro(cpuinfo_max_freq);
912 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
913 cpufreq_freq_attr_ro(scaling_available_governors);
914 cpufreq_freq_attr_ro(scaling_driver);
915 cpufreq_freq_attr_ro(scaling_cur_freq);
916 cpufreq_freq_attr_ro(bios_limit);
917 cpufreq_freq_attr_ro(related_cpus);
918 cpufreq_freq_attr_ro(affected_cpus);
919 cpufreq_freq_attr_rw(scaling_min_freq);
920 cpufreq_freq_attr_rw(scaling_max_freq);
921 cpufreq_freq_attr_rw(scaling_governor);
922 cpufreq_freq_attr_rw(scaling_setspeed);
924 static struct attribute *default_attrs[] = {
925 &cpuinfo_min_freq.attr,
926 &cpuinfo_max_freq.attr,
927 &cpuinfo_transition_latency.attr,
928 &scaling_min_freq.attr,
929 &scaling_max_freq.attr,
932 &scaling_governor.attr,
933 &scaling_driver.attr,
934 &scaling_available_governors.attr,
935 &scaling_setspeed.attr,
939 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
940 #define to_attr(a) container_of(a, struct freq_attr, attr)
942 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
944 struct cpufreq_policy *policy = to_policy(kobj);
945 struct freq_attr *fattr = to_attr(attr);
948 down_read(&policy->rwsem);
949 ret = fattr->show(policy, buf);
950 up_read(&policy->rwsem);
955 static ssize_t store(struct kobject *kobj, struct attribute *attr,
956 const char *buf, size_t count)
958 struct cpufreq_policy *policy = to_policy(kobj);
959 struct freq_attr *fattr = to_attr(attr);
960 ssize_t ret = -EINVAL;
963 * cpus_read_trylock() is used here to work around a circular lock
964 * dependency problem with respect to the cpufreq_register_driver().
966 if (!cpus_read_trylock())
969 if (cpu_online(policy->cpu)) {
970 down_write(&policy->rwsem);
971 ret = fattr->store(policy, buf, count);
972 up_write(&policy->rwsem);
980 static void cpufreq_sysfs_release(struct kobject *kobj)
982 struct cpufreq_policy *policy = to_policy(kobj);
983 pr_debug("last reference is dropped\n");
984 complete(&policy->kobj_unregister);
987 static const struct sysfs_ops sysfs_ops = {
992 static struct kobj_type ktype_cpufreq = {
993 .sysfs_ops = &sysfs_ops,
994 .default_attrs = default_attrs,
995 .release = cpufreq_sysfs_release,
998 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
1000 struct device *dev = get_cpu_device(cpu);
1005 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1008 dev_dbg(dev, "%s: Adding symlink\n", __func__);
1009 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1010 dev_err(dev, "cpufreq symlink creation failed\n");
1013 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
1016 dev_dbg(dev, "%s: Removing symlink\n", __func__);
1017 sysfs_remove_link(&dev->kobj, "cpufreq");
1020 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1022 struct freq_attr **drv_attr;
1025 /* set up files for this cpu device */
1026 drv_attr = cpufreq_driver->attr;
1027 while (drv_attr && *drv_attr) {
1028 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1033 if (cpufreq_driver->get) {
1034 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1039 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1043 if (cpufreq_driver->bios_limit) {
1044 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1052 __weak struct cpufreq_governor *cpufreq_default_governor(void)
1057 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1059 struct cpufreq_governor *gov = NULL, *def_gov = NULL;
1060 struct cpufreq_policy new_policy;
1062 memcpy(&new_policy, policy, sizeof(*policy));
1064 def_gov = cpufreq_default_governor();
1068 * Update governor of new_policy to the governor used before
1071 gov = find_governor(policy->last_governor);
1073 pr_debug("Restoring governor %s for cpu %d\n",
1074 policy->governor->name, policy->cpu);
1080 new_policy.governor = gov;
1082 /* Use the default policy if there is no last_policy. */
1083 if (policy->last_policy) {
1084 new_policy.policy = policy->last_policy;
1088 cpufreq_parse_policy(def_gov->name, &new_policy);
1092 return cpufreq_set_policy(policy, &new_policy);
1095 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1099 /* Has this CPU been taken care of already? */
1100 if (cpumask_test_cpu(cpu, policy->cpus))
1103 down_write(&policy->rwsem);
1105 cpufreq_stop_governor(policy);
1107 cpumask_set_cpu(cpu, policy->cpus);
1110 ret = cpufreq_start_governor(policy);
1112 pr_err("%s: Failed to start governor\n", __func__);
1114 up_write(&policy->rwsem);
1118 static void handle_update(struct work_struct *work)
1120 struct cpufreq_policy *policy =
1121 container_of(work, struct cpufreq_policy, update);
1122 unsigned int cpu = policy->cpu;
1123 pr_debug("handle_update for cpu %u called\n", cpu);
1124 cpufreq_update_policy(cpu);
1127 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1129 struct cpufreq_policy *policy;
1132 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1136 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1137 goto err_free_policy;
1139 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1140 goto err_free_cpumask;
1142 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1143 goto err_free_rcpumask;
1145 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1146 cpufreq_global_kobject, "policy%u", cpu);
1148 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1150 * The entire policy object will be freed below, but the extra
1151 * memory allocated for the kobject name needs to be freed by
1152 * releasing the kobject.
1154 kobject_put(&policy->kobj);
1155 goto err_free_real_cpus;
1158 INIT_LIST_HEAD(&policy->policy_list);
1159 init_rwsem(&policy->rwsem);
1160 spin_lock_init(&policy->transition_lock);
1161 init_waitqueue_head(&policy->transition_wait);
1162 init_completion(&policy->kobj_unregister);
1163 INIT_WORK(&policy->update, handle_update);
1169 free_cpumask_var(policy->real_cpus);
1171 free_cpumask_var(policy->related_cpus);
1173 free_cpumask_var(policy->cpus);
1180 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1182 struct kobject *kobj;
1183 struct completion *cmp;
1185 down_write(&policy->rwsem);
1186 cpufreq_stats_free_table(policy);
1187 kobj = &policy->kobj;
1188 cmp = &policy->kobj_unregister;
1189 up_write(&policy->rwsem);
1193 * We need to make sure that the underlying kobj is
1194 * actually not referenced anymore by anybody before we
1195 * proceed with unloading.
1197 pr_debug("waiting for dropping of refcount\n");
1198 wait_for_completion(cmp);
1199 pr_debug("wait complete\n");
1202 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1204 unsigned long flags;
1207 /* Remove policy from list */
1208 write_lock_irqsave(&cpufreq_driver_lock, flags);
1209 list_del(&policy->policy_list);
1211 for_each_cpu(cpu, policy->related_cpus)
1212 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1213 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1215 cpufreq_policy_put_kobj(policy);
1216 free_cpumask_var(policy->real_cpus);
1217 free_cpumask_var(policy->related_cpus);
1218 free_cpumask_var(policy->cpus);
1222 static int cpufreq_online(unsigned int cpu)
1224 struct cpufreq_policy *policy;
1226 unsigned long flags;
1230 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1232 /* Check if this CPU already has a policy to manage it */
1233 policy = per_cpu(cpufreq_cpu_data, cpu);
1235 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1236 if (!policy_is_inactive(policy))
1237 return cpufreq_add_policy_cpu(policy, cpu);
1239 /* This is the only online CPU for the policy. Start over. */
1241 down_write(&policy->rwsem);
1243 policy->governor = NULL;
1244 up_write(&policy->rwsem);
1247 policy = cpufreq_policy_alloc(cpu);
1252 if (!new_policy && cpufreq_driver->online) {
1253 ret = cpufreq_driver->online(policy);
1255 pr_debug("%s: %d: initialization failed\n", __func__,
1257 goto out_exit_policy;
1260 /* Recover policy->cpus using related_cpus */
1261 cpumask_copy(policy->cpus, policy->related_cpus);
1263 cpumask_copy(policy->cpus, cpumask_of(cpu));
1266 * Call driver. From then on the cpufreq must be able
1267 * to accept all calls to ->verify and ->setpolicy for this CPU.
1269 ret = cpufreq_driver->init(policy);
1271 pr_debug("%s: %d: initialization failed\n", __func__,
1273 goto out_free_policy;
1276 ret = cpufreq_table_validate_and_sort(policy);
1278 goto out_exit_policy;
1280 /* related_cpus should at least include policy->cpus. */
1281 cpumask_copy(policy->related_cpus, policy->cpus);
1284 down_write(&policy->rwsem);
1286 * affected cpus must always be the one, which are online. We aren't
1287 * managing offline cpus here.
1289 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1292 policy->user_policy.min = policy->min;
1293 policy->user_policy.max = policy->max;
1295 for_each_cpu(j, policy->related_cpus) {
1296 per_cpu(cpufreq_cpu_data, j) = policy;
1297 add_cpu_dev_symlink(policy, j);
1300 policy->min = policy->user_policy.min;
1301 policy->max = policy->user_policy.max;
1304 if (cpufreq_driver->get && has_target()) {
1305 policy->cur = cpufreq_driver->get(policy->cpu);
1307 pr_err("%s: ->get() failed\n", __func__);
1308 goto out_destroy_policy;
1313 * Sometimes boot loaders set CPU frequency to a value outside of
1314 * frequency table present with cpufreq core. In such cases CPU might be
1315 * unstable if it has to run on that frequency for long duration of time
1316 * and so its better to set it to a frequency which is specified in
1317 * freq-table. This also makes cpufreq stats inconsistent as
1318 * cpufreq-stats would fail to register because current frequency of CPU
1319 * isn't found in freq-table.
1321 * Because we don't want this change to effect boot process badly, we go
1322 * for the next freq which is >= policy->cur ('cur' must be set by now,
1323 * otherwise we will end up setting freq to lowest of the table as 'cur'
1324 * is initialized to zero).
1326 * We are passing target-freq as "policy->cur - 1" otherwise
1327 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1328 * equal to target-freq.
1330 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1332 /* Are we running at unknown frequency ? */
1333 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1334 if (ret == -EINVAL) {
1335 /* Warn user and fix it */
1336 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1337 __func__, policy->cpu, policy->cur);
1338 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1339 CPUFREQ_RELATION_L);
1342 * Reaching here after boot in a few seconds may not
1343 * mean that system will remain stable at "unknown"
1344 * frequency for longer duration. Hence, a BUG_ON().
1347 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1348 __func__, policy->cpu, policy->cur);
1353 ret = cpufreq_add_dev_interface(policy);
1355 goto out_destroy_policy;
1357 cpufreq_stats_create_table(policy);
1359 write_lock_irqsave(&cpufreq_driver_lock, flags);
1360 list_add(&policy->policy_list, &cpufreq_policy_list);
1361 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1364 ret = cpufreq_init_policy(policy);
1366 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1367 __func__, cpu, ret);
1368 goto out_destroy_policy;
1371 up_write(&policy->rwsem);
1373 kobject_uevent(&policy->kobj, KOBJ_ADD);
1375 /* Callback for handling stuff after policy is ready */
1376 if (cpufreq_driver->ready)
1377 cpufreq_driver->ready(policy);
1379 if (cpufreq_thermal_control_enabled(cpufreq_driver))
1380 policy->cdev = of_cpufreq_cooling_register(policy);
1382 pr_debug("initialization complete\n");
1387 for_each_cpu(j, policy->real_cpus)
1388 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1390 up_write(&policy->rwsem);
1393 if (cpufreq_driver->exit)
1394 cpufreq_driver->exit(policy);
1397 cpufreq_policy_free(policy);
1402 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1404 * @sif: Subsystem interface structure pointer (not used)
1406 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1408 struct cpufreq_policy *policy;
1409 unsigned cpu = dev->id;
1412 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1414 if (cpu_online(cpu)) {
1415 ret = cpufreq_online(cpu);
1420 /* Create sysfs link on CPU registration */
1421 policy = per_cpu(cpufreq_cpu_data, cpu);
1423 add_cpu_dev_symlink(policy, cpu);
1428 static int cpufreq_offline(unsigned int cpu)
1430 struct cpufreq_policy *policy;
1433 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1435 policy = cpufreq_cpu_get_raw(cpu);
1437 pr_debug("%s: No cpu_data found\n", __func__);
1441 down_write(&policy->rwsem);
1443 cpufreq_stop_governor(policy);
1445 cpumask_clear_cpu(cpu, policy->cpus);
1447 if (policy_is_inactive(policy)) {
1449 strncpy(policy->last_governor, policy->governor->name,
1452 policy->last_policy = policy->policy;
1453 } else if (cpu == policy->cpu) {
1454 /* Nominate new CPU */
1455 policy->cpu = cpumask_any(policy->cpus);
1458 /* Start governor again for active policy */
1459 if (!policy_is_inactive(policy)) {
1461 ret = cpufreq_start_governor(policy);
1463 pr_err("%s: Failed to start governor\n", __func__);
1469 if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1470 cpufreq_cooling_unregister(policy->cdev);
1471 policy->cdev = NULL;
1474 if (cpufreq_driver->stop_cpu)
1475 cpufreq_driver->stop_cpu(policy);
1478 cpufreq_exit_governor(policy);
1481 * Perform the ->offline() during light-weight tear-down, as
1482 * that allows fast recovery when the CPU comes back.
1484 if (cpufreq_driver->offline) {
1485 cpufreq_driver->offline(policy);
1486 } else if (cpufreq_driver->exit) {
1487 cpufreq_driver->exit(policy);
1488 policy->freq_table = NULL;
1492 up_write(&policy->rwsem);
1497 * cpufreq_remove_dev - remove a CPU device
1499 * Removes the cpufreq interface for a CPU device.
1501 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1503 unsigned int cpu = dev->id;
1504 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1509 if (cpu_online(cpu))
1510 cpufreq_offline(cpu);
1512 cpumask_clear_cpu(cpu, policy->real_cpus);
1513 remove_cpu_dev_symlink(policy, dev);
1515 if (cpumask_empty(policy->real_cpus)) {
1516 /* We did light-weight exit earlier, do full tear down now */
1517 if (cpufreq_driver->offline)
1518 cpufreq_driver->exit(policy);
1520 cpufreq_policy_free(policy);
1525 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1527 * @policy: policy managing CPUs
1528 * @new_freq: CPU frequency the CPU actually runs at
1530 * We adjust to current frequency first, and need to clean up later.
1531 * So either call to cpufreq_update_policy() or schedule handle_update()).
1533 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1534 unsigned int new_freq)
1536 struct cpufreq_freqs freqs;
1538 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1539 policy->cur, new_freq);
1541 freqs.old = policy->cur;
1542 freqs.new = new_freq;
1544 cpufreq_freq_transition_begin(policy, &freqs);
1545 cpufreq_freq_transition_end(policy, &freqs, 0);
1549 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1552 * This is the last known freq, without actually getting it from the driver.
1553 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1555 unsigned int cpufreq_quick_get(unsigned int cpu)
1557 struct cpufreq_policy *policy;
1558 unsigned int ret_freq = 0;
1559 unsigned long flags;
1561 read_lock_irqsave(&cpufreq_driver_lock, flags);
1563 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1564 ret_freq = cpufreq_driver->get(cpu);
1565 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1569 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1571 policy = cpufreq_cpu_get(cpu);
1573 ret_freq = policy->cur;
1574 cpufreq_cpu_put(policy);
1579 EXPORT_SYMBOL(cpufreq_quick_get);
1582 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1585 * Just return the max possible frequency for a given CPU.
1587 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1589 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1590 unsigned int ret_freq = 0;
1593 ret_freq = policy->max;
1594 cpufreq_cpu_put(policy);
1599 EXPORT_SYMBOL(cpufreq_quick_get_max);
1601 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1603 unsigned int ret_freq = 0;
1605 if (unlikely(policy_is_inactive(policy)))
1608 ret_freq = cpufreq_driver->get(policy->cpu);
1611 * If fast frequency switching is used with the given policy, the check
1612 * against policy->cur is pointless, so skip it in that case too.
1614 if (policy->fast_switch_enabled)
1617 if (has_target() && ret_freq && policy->cur) {
1618 /* verify no discrepancy between actual and
1619 saved value exists */
1620 if (unlikely(ret_freq != policy->cur)) {
1621 cpufreq_out_of_sync(policy, ret_freq);
1622 schedule_work(&policy->update);
1630 * cpufreq_get - get the current CPU frequency (in kHz)
1633 * Get the CPU current (static) CPU frequency
1635 unsigned int cpufreq_get(unsigned int cpu)
1637 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1638 unsigned int ret_freq = 0;
1641 down_read(&policy->rwsem);
1642 if (cpufreq_driver->get)
1643 ret_freq = __cpufreq_get(policy);
1644 up_read(&policy->rwsem);
1646 cpufreq_cpu_put(policy);
1651 EXPORT_SYMBOL(cpufreq_get);
1653 static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
1655 unsigned int new_freq;
1657 new_freq = cpufreq_driver->get(policy->cpu);
1662 pr_debug("cpufreq: Driver did not initialize current freq\n");
1663 policy->cur = new_freq;
1664 } else if (policy->cur != new_freq && has_target()) {
1665 cpufreq_out_of_sync(policy, new_freq);
1671 static struct subsys_interface cpufreq_interface = {
1673 .subsys = &cpu_subsys,
1674 .add_dev = cpufreq_add_dev,
1675 .remove_dev = cpufreq_remove_dev,
1679 * In case platform wants some specific frequency to be configured
1682 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1686 if (!policy->suspend_freq) {
1687 pr_debug("%s: suspend_freq not defined\n", __func__);
1691 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1692 policy->suspend_freq);
1694 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1695 CPUFREQ_RELATION_H);
1697 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1698 __func__, policy->suspend_freq, ret);
1702 EXPORT_SYMBOL(cpufreq_generic_suspend);
1705 * cpufreq_suspend() - Suspend CPUFreq governors
1707 * Called during system wide Suspend/Hibernate cycles for suspending governors
1708 * as some platforms can't change frequency after this point in suspend cycle.
1709 * Because some of the devices (like: i2c, regulators, etc) they use for
1710 * changing frequency are suspended quickly after this point.
1712 void cpufreq_suspend(void)
1714 struct cpufreq_policy *policy;
1716 if (!cpufreq_driver)
1719 if (!has_target() && !cpufreq_driver->suspend)
1722 pr_debug("%s: Suspending Governors\n", __func__);
1724 for_each_active_policy(policy) {
1726 down_write(&policy->rwsem);
1727 cpufreq_stop_governor(policy);
1728 up_write(&policy->rwsem);
1731 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1732 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1737 cpufreq_suspended = true;
1741 * cpufreq_resume() - Resume CPUFreq governors
1743 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1744 * are suspended with cpufreq_suspend().
1746 void cpufreq_resume(void)
1748 struct cpufreq_policy *policy;
1751 if (!cpufreq_driver)
1754 if (unlikely(!cpufreq_suspended))
1757 cpufreq_suspended = false;
1759 if (!has_target() && !cpufreq_driver->resume)
1762 pr_debug("%s: Resuming Governors\n", __func__);
1764 for_each_active_policy(policy) {
1765 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1766 pr_err("%s: Failed to resume driver: %p\n", __func__,
1768 } else if (has_target()) {
1769 down_write(&policy->rwsem);
1770 ret = cpufreq_start_governor(policy);
1771 up_write(&policy->rwsem);
1774 pr_err("%s: Failed to start governor for policy: %p\n",
1781 * cpufreq_get_current_driver - return current driver's name
1783 * Return the name string of the currently loaded cpufreq driver
1786 const char *cpufreq_get_current_driver(void)
1789 return cpufreq_driver->name;
1793 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1796 * cpufreq_get_driver_data - return current driver data
1798 * Return the private data of the currently loaded cpufreq
1799 * driver, or NULL if no cpufreq driver is loaded.
1801 void *cpufreq_get_driver_data(void)
1804 return cpufreq_driver->driver_data;
1808 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1810 /*********************************************************************
1811 * NOTIFIER LISTS INTERFACE *
1812 *********************************************************************/
1815 * cpufreq_register_notifier - register a driver with cpufreq
1816 * @nb: notifier function to register
1817 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1819 * Add a driver to one of two lists: either a list of drivers that
1820 * are notified about clock rate changes (once before and once after
1821 * the transition), or a list of drivers that are notified about
1822 * changes in cpufreq policy.
1824 * This function may sleep, and has the same return conditions as
1825 * blocking_notifier_chain_register.
1827 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1831 if (cpufreq_disabled())
1835 case CPUFREQ_TRANSITION_NOTIFIER:
1836 mutex_lock(&cpufreq_fast_switch_lock);
1838 if (cpufreq_fast_switch_count > 0) {
1839 mutex_unlock(&cpufreq_fast_switch_lock);
1842 ret = srcu_notifier_chain_register(
1843 &cpufreq_transition_notifier_list, nb);
1845 cpufreq_fast_switch_count--;
1847 mutex_unlock(&cpufreq_fast_switch_lock);
1849 case CPUFREQ_POLICY_NOTIFIER:
1850 ret = blocking_notifier_chain_register(
1851 &cpufreq_policy_notifier_list, nb);
1859 EXPORT_SYMBOL(cpufreq_register_notifier);
1862 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1863 * @nb: notifier block to be unregistered
1864 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1866 * Remove a driver from the CPU frequency notifier list.
1868 * This function may sleep, and has the same return conditions as
1869 * blocking_notifier_chain_unregister.
1871 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1875 if (cpufreq_disabled())
1879 case CPUFREQ_TRANSITION_NOTIFIER:
1880 mutex_lock(&cpufreq_fast_switch_lock);
1882 ret = srcu_notifier_chain_unregister(
1883 &cpufreq_transition_notifier_list, nb);
1884 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
1885 cpufreq_fast_switch_count++;
1887 mutex_unlock(&cpufreq_fast_switch_lock);
1889 case CPUFREQ_POLICY_NOTIFIER:
1890 ret = blocking_notifier_chain_unregister(
1891 &cpufreq_policy_notifier_list, nb);
1899 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1902 /*********************************************************************
1904 *********************************************************************/
1907 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
1908 * @policy: cpufreq policy to switch the frequency for.
1909 * @target_freq: New frequency to set (may be approximate).
1911 * Carry out a fast frequency switch without sleeping.
1913 * The driver's ->fast_switch() callback invoked by this function must be
1914 * suitable for being called from within RCU-sched read-side critical sections
1915 * and it is expected to select the minimum available frequency greater than or
1916 * equal to @target_freq (CPUFREQ_RELATION_L).
1918 * This function must not be called if policy->fast_switch_enabled is unset.
1920 * Governors calling this function must guarantee that it will never be invoked
1921 * twice in parallel for the same policy and that it will never be called in
1922 * parallel with either ->target() or ->target_index() for the same policy.
1924 * Returns the actual frequency set for the CPU.
1926 * If 0 is returned by the driver's ->fast_switch() callback to indicate an
1927 * error condition, the hardware configuration must be preserved.
1929 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
1930 unsigned int target_freq)
1932 target_freq = clamp_val(target_freq, policy->min, policy->max);
1934 return cpufreq_driver->fast_switch(policy, target_freq);
1936 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
1938 /* Must set freqs->new to intermediate frequency */
1939 static int __target_intermediate(struct cpufreq_policy *policy,
1940 struct cpufreq_freqs *freqs, int index)
1944 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1946 /* We don't need to switch to intermediate freq */
1950 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1951 __func__, policy->cpu, freqs->old, freqs->new);
1953 cpufreq_freq_transition_begin(policy, freqs);
1954 ret = cpufreq_driver->target_intermediate(policy, index);
1955 cpufreq_freq_transition_end(policy, freqs, ret);
1958 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1964 static int __target_index(struct cpufreq_policy *policy, int index)
1966 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1967 unsigned int intermediate_freq = 0;
1968 unsigned int newfreq = policy->freq_table[index].frequency;
1969 int retval = -EINVAL;
1972 if (newfreq == policy->cur)
1975 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1977 /* Handle switching to intermediate frequency */
1978 if (cpufreq_driver->get_intermediate) {
1979 retval = __target_intermediate(policy, &freqs, index);
1983 intermediate_freq = freqs.new;
1984 /* Set old freq to intermediate */
1985 if (intermediate_freq)
1986 freqs.old = freqs.new;
1989 freqs.new = newfreq;
1990 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1991 __func__, policy->cpu, freqs.old, freqs.new);
1993 cpufreq_freq_transition_begin(policy, &freqs);
1996 retval = cpufreq_driver->target_index(policy, index);
1998 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2002 cpufreq_freq_transition_end(policy, &freqs, retval);
2005 * Failed after setting to intermediate freq? Driver should have
2006 * reverted back to initial frequency and so should we. Check
2007 * here for intermediate_freq instead of get_intermediate, in
2008 * case we haven't switched to intermediate freq at all.
2010 if (unlikely(retval && intermediate_freq)) {
2011 freqs.old = intermediate_freq;
2012 freqs.new = policy->restore_freq;
2013 cpufreq_freq_transition_begin(policy, &freqs);
2014 cpufreq_freq_transition_end(policy, &freqs, 0);
2021 int __cpufreq_driver_target(struct cpufreq_policy *policy,
2022 unsigned int target_freq,
2023 unsigned int relation)
2025 unsigned int old_target_freq = target_freq;
2028 if (cpufreq_disabled())
2031 /* Make sure that target_freq is within supported range */
2032 target_freq = clamp_val(target_freq, policy->min, policy->max);
2034 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2035 policy->cpu, target_freq, relation, old_target_freq);
2038 * This might look like a redundant call as we are checking it again
2039 * after finding index. But it is left intentionally for cases where
2040 * exactly same freq is called again and so we can save on few function
2043 if (target_freq == policy->cur)
2046 /* Save last value to restore later on errors */
2047 policy->restore_freq = policy->cur;
2049 if (cpufreq_driver->target)
2050 return cpufreq_driver->target(policy, target_freq, relation);
2052 if (!cpufreq_driver->target_index)
2055 index = cpufreq_frequency_table_target(policy, target_freq, relation);
2057 return __target_index(policy, index);
2059 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2061 int cpufreq_driver_target(struct cpufreq_policy *policy,
2062 unsigned int target_freq,
2063 unsigned int relation)
2067 down_write(&policy->rwsem);
2069 ret = __cpufreq_driver_target(policy, target_freq, relation);
2071 up_write(&policy->rwsem);
2075 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2077 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2082 static int cpufreq_init_governor(struct cpufreq_policy *policy)
2086 /* Don't start any governor operations if we are entering suspend */
2087 if (cpufreq_suspended)
2090 * Governor might not be initiated here if ACPI _PPC changed
2091 * notification happened, so check it.
2093 if (!policy->governor)
2096 /* Platform doesn't want dynamic frequency switching ? */
2097 if (policy->governor->dynamic_switching &&
2098 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2099 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2102 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2103 policy->governor->name, gov->name);
2104 policy->governor = gov;
2110 if (!try_module_get(policy->governor->owner))
2113 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2115 if (policy->governor->init) {
2116 ret = policy->governor->init(policy);
2118 module_put(policy->governor->owner);
2126 static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2128 if (cpufreq_suspended || !policy->governor)
2131 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2133 if (policy->governor->exit)
2134 policy->governor->exit(policy);
2136 module_put(policy->governor->owner);
2139 static int cpufreq_start_governor(struct cpufreq_policy *policy)
2143 if (cpufreq_suspended)
2146 if (!policy->governor)
2149 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2151 if (cpufreq_driver->get)
2152 cpufreq_update_current_freq(policy);
2154 if (policy->governor->start) {
2155 ret = policy->governor->start(policy);
2160 if (policy->governor->limits)
2161 policy->governor->limits(policy);
2166 static void cpufreq_stop_governor(struct cpufreq_policy *policy)
2168 if (cpufreq_suspended || !policy->governor)
2171 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2173 if (policy->governor->stop)
2174 policy->governor->stop(policy);
2177 static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2179 if (cpufreq_suspended || !policy->governor)
2182 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2184 if (policy->governor->limits)
2185 policy->governor->limits(policy);
2188 int cpufreq_register_governor(struct cpufreq_governor *governor)
2195 if (cpufreq_disabled())
2198 mutex_lock(&cpufreq_governor_mutex);
2201 if (!find_governor(governor->name)) {
2203 list_add(&governor->governor_list, &cpufreq_governor_list);
2206 mutex_unlock(&cpufreq_governor_mutex);
2209 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2211 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2213 struct cpufreq_policy *policy;
2214 unsigned long flags;
2219 if (cpufreq_disabled())
2222 /* clear last_governor for all inactive policies */
2223 read_lock_irqsave(&cpufreq_driver_lock, flags);
2224 for_each_inactive_policy(policy) {
2225 if (!strcmp(policy->last_governor, governor->name)) {
2226 policy->governor = NULL;
2227 strcpy(policy->last_governor, "\0");
2230 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2232 mutex_lock(&cpufreq_governor_mutex);
2233 list_del(&governor->governor_list);
2234 mutex_unlock(&cpufreq_governor_mutex);
2236 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2239 /*********************************************************************
2240 * POLICY INTERFACE *
2241 *********************************************************************/
2244 * cpufreq_get_policy - get the current cpufreq_policy
2245 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2248 * Reads the current cpufreq policy.
2250 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2252 struct cpufreq_policy *cpu_policy;
2256 cpu_policy = cpufreq_cpu_get(cpu);
2260 memcpy(policy, cpu_policy, sizeof(*policy));
2262 cpufreq_cpu_put(cpu_policy);
2265 EXPORT_SYMBOL(cpufreq_get_policy);
2268 * cpufreq_set_policy - Modify cpufreq policy parameters.
2269 * @policy: Policy object to modify.
2270 * @new_policy: New policy data.
2272 * Pass @new_policy to the cpufreq driver's ->verify() callback, run the
2273 * installed policy notifiers for it with the CPUFREQ_ADJUST value, pass it to
2274 * the driver's ->verify() callback again and run the notifiers for it again
2275 * with the CPUFREQ_NOTIFY value. Next, copy the min and max parameters
2276 * of @new_policy to @policy and either invoke the driver's ->setpolicy()
2277 * callback (if present) or carry out a governor update for @policy. That is,
2278 * run the current governor's ->limits() callback (if the governor field in
2279 * @new_policy points to the same object as the one in @policy) or replace the
2280 * governor for @policy with the new one stored in @new_policy.
2282 * The cpuinfo part of @policy is not updated by this function.
2284 int cpufreq_set_policy(struct cpufreq_policy *policy,
2285 struct cpufreq_policy *new_policy)
2287 struct cpufreq_governor *old_gov;
2290 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2291 new_policy->cpu, new_policy->min, new_policy->max);
2293 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2296 * This check works well when we store new min/max freq attributes,
2297 * because new_policy is a copy of policy with one field updated.
2299 if (new_policy->min > new_policy->max)
2302 /* verify the cpu speed can be set within this limit */
2303 ret = cpufreq_driver->verify(new_policy);
2307 /* adjust if necessary - all reasons */
2308 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2309 CPUFREQ_ADJUST, new_policy);
2312 * verify the cpu speed can be set within this limit, which might be
2313 * different to the first one
2315 ret = cpufreq_driver->verify(new_policy);
2319 /* notification of the new policy */
2320 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2321 CPUFREQ_NOTIFY, new_policy);
2323 policy->min = new_policy->min;
2324 policy->max = new_policy->max;
2325 trace_cpu_frequency_limits(policy);
2327 policy->cached_target_freq = UINT_MAX;
2329 pr_debug("new min and max freqs are %u - %u kHz\n",
2330 policy->min, policy->max);
2332 if (cpufreq_driver->setpolicy) {
2333 policy->policy = new_policy->policy;
2334 pr_debug("setting range\n");
2335 return cpufreq_driver->setpolicy(policy);
2338 if (new_policy->governor == policy->governor) {
2339 pr_debug("governor limits update\n");
2340 cpufreq_governor_limits(policy);
2344 pr_debug("governor switch\n");
2346 /* save old, working values */
2347 old_gov = policy->governor;
2348 /* end old governor */
2350 cpufreq_stop_governor(policy);
2351 cpufreq_exit_governor(policy);
2354 /* start new governor */
2355 policy->governor = new_policy->governor;
2356 ret = cpufreq_init_governor(policy);
2358 ret = cpufreq_start_governor(policy);
2360 pr_debug("governor change\n");
2361 sched_cpufreq_governor_change(policy, old_gov);
2364 cpufreq_exit_governor(policy);
2367 /* new governor failed, so re-start old one */
2368 pr_debug("starting governor %s failed\n", policy->governor->name);
2370 policy->governor = old_gov;
2371 if (cpufreq_init_governor(policy))
2372 policy->governor = NULL;
2374 cpufreq_start_governor(policy);
2381 * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2382 * @cpu: CPU to re-evaluate the policy for.
2384 * Update the current frequency for the cpufreq policy of @cpu and use
2385 * cpufreq_set_policy() to re-apply the min and max limits saved in the
2386 * user_policy sub-structure of that policy, which triggers the evaluation
2387 * of policy notifiers and the cpufreq driver's ->verify() callback for the
2388 * policy in question, among other things.
2390 void cpufreq_update_policy(unsigned int cpu)
2392 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2393 struct cpufreq_policy new_policy;
2399 * BIOS might change freq behind our back
2400 * -> ask driver for current freq and notify governors about a change
2402 if (cpufreq_driver->get && has_target() &&
2403 (cpufreq_suspended || WARN_ON(!cpufreq_update_current_freq(policy))))
2406 pr_debug("updating policy for CPU %u\n", cpu);
2407 memcpy(&new_policy, policy, sizeof(*policy));
2408 new_policy.min = policy->user_policy.min;
2409 new_policy.max = policy->user_policy.max;
2411 cpufreq_set_policy(policy, &new_policy);
2414 cpufreq_cpu_release(policy);
2416 EXPORT_SYMBOL(cpufreq_update_policy);
2419 * cpufreq_update_limits - Update policy limits for a given CPU.
2420 * @cpu: CPU to update the policy limits for.
2422 * Invoke the driver's ->update_limits callback if present or call
2423 * cpufreq_update_policy() for @cpu.
2425 void cpufreq_update_limits(unsigned int cpu)
2427 if (cpufreq_driver->update_limits)
2428 cpufreq_driver->update_limits(cpu);
2430 cpufreq_update_policy(cpu);
2432 EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2434 /*********************************************************************
2436 *********************************************************************/
2437 static int cpufreq_boost_set_sw(int state)
2439 struct cpufreq_policy *policy;
2442 for_each_active_policy(policy) {
2443 if (!policy->freq_table)
2446 ret = cpufreq_frequency_table_cpuinfo(policy,
2447 policy->freq_table);
2449 pr_err("%s: Policy frequency update failed\n",
2454 down_write(&policy->rwsem);
2455 policy->user_policy.max = policy->max;
2456 cpufreq_governor_limits(policy);
2457 up_write(&policy->rwsem);
2463 int cpufreq_boost_trigger_state(int state)
2465 unsigned long flags;
2468 if (cpufreq_driver->boost_enabled == state)
2471 write_lock_irqsave(&cpufreq_driver_lock, flags);
2472 cpufreq_driver->boost_enabled = state;
2473 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2475 ret = cpufreq_driver->set_boost(state);
2477 write_lock_irqsave(&cpufreq_driver_lock, flags);
2478 cpufreq_driver->boost_enabled = !state;
2479 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2481 pr_err("%s: Cannot %s BOOST\n",
2482 __func__, state ? "enable" : "disable");
2488 static bool cpufreq_boost_supported(void)
2490 return cpufreq_driver->set_boost;
2493 static int create_boost_sysfs_file(void)
2497 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2499 pr_err("%s: cannot register global BOOST sysfs file\n",
2505 static void remove_boost_sysfs_file(void)
2507 if (cpufreq_boost_supported())
2508 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2511 int cpufreq_enable_boost_support(void)
2513 if (!cpufreq_driver)
2516 if (cpufreq_boost_supported())
2519 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2521 /* This will get removed on driver unregister */
2522 return create_boost_sysfs_file();
2524 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2526 int cpufreq_boost_enabled(void)
2528 return cpufreq_driver->boost_enabled;
2530 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2532 /*********************************************************************
2533 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2534 *********************************************************************/
2535 static enum cpuhp_state hp_online;
2537 static int cpuhp_cpufreq_online(unsigned int cpu)
2539 cpufreq_online(cpu);
2544 static int cpuhp_cpufreq_offline(unsigned int cpu)
2546 cpufreq_offline(cpu);
2552 * cpufreq_register_driver - register a CPU Frequency driver
2553 * @driver_data: A struct cpufreq_driver containing the values#
2554 * submitted by the CPU Frequency driver.
2556 * Registers a CPU Frequency driver to this core code. This code
2557 * returns zero on success, -EEXIST when another driver got here first
2558 * (and isn't unregistered in the meantime).
2561 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2563 unsigned long flags;
2566 if (cpufreq_disabled())
2569 if (!driver_data || !driver_data->verify || !driver_data->init ||
2570 !(driver_data->setpolicy || driver_data->target_index ||
2571 driver_data->target) ||
2572 (driver_data->setpolicy && (driver_data->target_index ||
2573 driver_data->target)) ||
2574 (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2575 (!driver_data->online != !driver_data->offline))
2578 pr_debug("trying to register driver %s\n", driver_data->name);
2580 /* Protect against concurrent CPU online/offline. */
2583 write_lock_irqsave(&cpufreq_driver_lock, flags);
2584 if (cpufreq_driver) {
2585 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2589 cpufreq_driver = driver_data;
2590 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2592 if (driver_data->setpolicy)
2593 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2595 if (cpufreq_boost_supported()) {
2596 ret = create_boost_sysfs_file();
2598 goto err_null_driver;
2601 ret = subsys_interface_register(&cpufreq_interface);
2603 goto err_boost_unreg;
2605 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2606 list_empty(&cpufreq_policy_list)) {
2607 /* if all ->init() calls failed, unregister */
2609 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2614 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2616 cpuhp_cpufreq_online,
2617 cpuhp_cpufreq_offline);
2623 pr_debug("driver %s up and running\n", driver_data->name);
2627 subsys_interface_unregister(&cpufreq_interface);
2629 remove_boost_sysfs_file();
2631 write_lock_irqsave(&cpufreq_driver_lock, flags);
2632 cpufreq_driver = NULL;
2633 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2638 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2641 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2643 * Unregister the current CPUFreq driver. Only call this if you have
2644 * the right to do so, i.e. if you have succeeded in initialising before!
2645 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2646 * currently not initialised.
2648 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2650 unsigned long flags;
2652 if (!cpufreq_driver || (driver != cpufreq_driver))
2655 pr_debug("unregistering driver %s\n", driver->name);
2657 /* Protect against concurrent cpu hotplug */
2659 subsys_interface_unregister(&cpufreq_interface);
2660 remove_boost_sysfs_file();
2661 cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2663 write_lock_irqsave(&cpufreq_driver_lock, flags);
2665 cpufreq_driver = NULL;
2667 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2672 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2675 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2676 * or mutexes when secondary CPUs are halted.
2678 static struct syscore_ops cpufreq_syscore_ops = {
2679 .shutdown = cpufreq_suspend,
2682 struct kobject *cpufreq_global_kobject;
2683 EXPORT_SYMBOL(cpufreq_global_kobject);
2685 static int __init cpufreq_core_init(void)
2687 if (cpufreq_disabled())
2690 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2691 BUG_ON(!cpufreq_global_kobject);
2693 register_syscore_ops(&cpufreq_syscore_ops);
2697 module_param(off, int, 0444);
2698 core_initcall(cpufreq_core_init);