Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
1da177e4 | 2 | /* |
bb176f7d | 3 | * linux/include/linux/cpufreq.h |
1da177e4 | 4 | * |
bb176f7d VK |
5 | * Copyright (C) 2001 Russell King |
6 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> | |
1da177e4 LT |
7 | */ |
8 | #ifndef _LINUX_CPUFREQ_H | |
9 | #define _LINUX_CPUFREQ_H | |
10 | ||
652ed95d | 11 | #include <linux/clk.h> |
c17495b0 | 12 | #include <linux/cpu.h> |
5ff0a268 VK |
13 | #include <linux/cpumask.h> |
14 | #include <linux/completion.h> | |
1da177e4 | 15 | #include <linux/kobject.h> |
5ff0a268 | 16 | #include <linux/notifier.h> |
8486a32d | 17 | #include <linux/of.h> |
c17495b0 | 18 | #include <linux/pm_opp.h> |
3000ce3c | 19 | #include <linux/pm_qos.h> |
12478cf0 | 20 | #include <linux/spinlock.h> |
1da177e4 | 21 | #include <linux/sysfs.h> |
a436ae94 | 22 | #include <linux/minmax.h> |
1da177e4 | 23 | |
1da177e4 | 24 | /********************************************************************* |
74aca95d | 25 | * CPUFREQ INTERFACE * |
1da177e4 | 26 | *********************************************************************/ |
74aca95d VK |
27 | /* |
28 | * Frequency values here are CPU kHz | |
29 | * | |
b53cc6ea | 30 | * Maximum transition latency is in nanoseconds - if it's unknown, |
1da177e4 LT |
31 | * CPUFREQ_ETERNAL shall be used. |
32 | */ | |
33 | ||
74aca95d VK |
34 | #define CPUFREQ_ETERNAL (-1) |
35 | #define CPUFREQ_NAME_LEN 16 | |
565ebe80 | 36 | /* Print length for names. Extra 1 space for accommodating '\n' in prints */ |
74aca95d VK |
37 | #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1) |
38 | ||
1da177e4 LT |
39 | struct cpufreq_governor; |
40 | ||
da0c6dc0 VK |
41 | enum cpufreq_table_sorting { |
42 | CPUFREQ_TABLE_UNSORTED, | |
43 | CPUFREQ_TABLE_SORTED_ASCENDING, | |
44 | CPUFREQ_TABLE_SORTED_DESCENDING | |
45 | }; | |
46 | ||
1da177e4 LT |
47 | struct cpufreq_cpuinfo { |
48 | unsigned int max_freq; | |
49 | unsigned int min_freq; | |
335dc333 TF |
50 | |
51 | /* in 10^(-9) s = nanoseconds */ | |
52 | unsigned int transition_latency; | |
1da177e4 LT |
53 | }; |
54 | ||
1da177e4 | 55 | struct cpufreq_policy { |
951fc5f4 VK |
56 | /* CPUs sharing clock, require sw coordination */ |
57 | cpumask_var_t cpus; /* Online CPUs only */ | |
58 | cpumask_var_t related_cpus; /* Online + Offline CPUs */ | |
559ed407 | 59 | cpumask_var_t real_cpus; /* Related and present */ |
951fc5f4 | 60 | |
62b36cc1 | 61 | unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs |
3b2d9942 | 62 | should set cpufreq */ |
9d16f207 | 63 | unsigned int cpu; /* cpu managing this policy, must be online */ |
9d16f207 | 64 | |
652ed95d | 65 | struct clk *clk; |
1da177e4 LT |
66 | struct cpufreq_cpuinfo cpuinfo;/* see above */ |
67 | ||
68 | unsigned int min; /* in kHz */ | |
69 | unsigned int max; /* in kHz */ | |
70 | unsigned int cur; /* in kHz, only needed if cpufreq | |
71 | * governors are used */ | |
e28867ea VK |
72 | unsigned int suspend_freq; /* freq to set during suspend */ |
73 | ||
335dc333 | 74 | unsigned int policy; /* see above */ |
69030dd1 | 75 | unsigned int last_policy; /* policy before unplug */ |
1da177e4 | 76 | struct cpufreq_governor *governor; /* see below */ |
7bd353a9 | 77 | void *governor_data; |
4573237b | 78 | char last_governor[CPUFREQ_NAME_LEN]; /* last governor used */ |
1da177e4 | 79 | |
1da177e4 LT |
80 | struct work_struct update; /* if update_policy() needs to be |
81 | * called, but you're in IRQ context */ | |
82 | ||
3000ce3c RW |
83 | struct freq_constraints constraints; |
84 | struct freq_qos_request *min_freq_req; | |
85 | struct freq_qos_request *max_freq_req; | |
86 | ||
e0b3165b | 87 | struct cpufreq_frequency_table *freq_table; |
da0c6dc0 | 88 | enum cpufreq_table_sorting freq_table_sorted; |
1da177e4 | 89 | |
c88a1f8b | 90 | struct list_head policy_list; |
1da177e4 LT |
91 | struct kobject kobj; |
92 | struct completion kobj_unregister; | |
ad7722da | 93 | |
94 | /* | |
95 | * The rules for this semaphore: | |
96 | * - Any routine that wants to read from the policy structure will | |
97 | * do a down_read on this semaphore. | |
98 | * - Any routine that will write to the policy structure and/or may take away | |
99 | * the policy altogether (eg. CPU hotplug), will hold this lock in write | |
100 | * mode before doing so. | |
ad7722da | 101 | */ |
102 | struct rw_semaphore rwsem; | |
12478cf0 | 103 | |
b7898fda RW |
104 | /* |
105 | * Fast switch flags: | |
106 | * - fast_switch_possible should be set by the driver if it can | |
107 | * guarantee that frequency can be changed on any CPU sharing the | |
108 | * policy and that the change will affect all of the policy CPUs then. | |
109 | * - fast_switch_enabled is to be set by governors that support fast | |
565ebe80 | 110 | * frequency switching with the help of cpufreq_enable_fast_switch(). |
b7898fda RW |
111 | */ |
112 | bool fast_switch_possible; | |
113 | bool fast_switch_enabled; | |
114 | ||
ea9364bb RW |
115 | /* |
116 | * Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current | |
117 | * governor. | |
118 | */ | |
119 | bool strict_target; | |
120 | ||
1f39fa0d VD |
121 | /* |
122 | * Set if inefficient frequencies were found in the frequency table. | |
123 | * This indicates if the relation flag CPUFREQ_RELATION_E can be | |
124 | * honored. | |
125 | */ | |
126 | bool efficiencies_available; | |
127 | ||
1b72e7fd RW |
128 | /* |
129 | * Preferred average time interval between consecutive invocations of | |
130 | * the driver to set the frequency for this policy. To be set by the | |
131 | * scaling driver (0, which is the default, means no preference). | |
132 | */ | |
133 | unsigned int transition_delay_us; | |
134 | ||
99d14d0e VK |
135 | /* |
136 | * Remote DVFS flag (Not added to the driver structure as we don't want | |
137 | * to access another structure from scheduler hotpath). | |
138 | * | |
139 | * Should be set if CPUs can do DVFS on behalf of other CPUs from | |
140 | * different cpufreq policies. | |
141 | */ | |
142 | bool dvfs_possible_from_any_cpu; | |
143 | ||
218a06a7 JZ |
144 | /* Per policy boost enabled flag. */ |
145 | bool boost_enabled; | |
146 | ||
1f7d1bab VK |
147 | /* Per policy boost supported flag. */ |
148 | bool boost_supported; | |
149 | ||
e3c06236 SM |
150 | /* Cached frequency lookup from cpufreq_driver_resolve_freq. */ |
151 | unsigned int cached_target_freq; | |
292072c3 | 152 | unsigned int cached_resolved_idx; |
e3c06236 | 153 | |
12478cf0 SB |
154 | /* Synchronization for frequency transitions */ |
155 | bool transition_ongoing; /* Tracks transition status */ | |
156 | spinlock_t transition_lock; | |
157 | wait_queue_head_t transition_wait; | |
ca654dc3 | 158 | struct task_struct *transition_task; /* Task which is doing the transition */ |
413fffc3 | 159 | |
a9aaf291 VK |
160 | /* cpufreq-stats */ |
161 | struct cpufreq_stats *stats; | |
162 | ||
413fffc3 VK |
163 | /* For cpufreq driver's internal use */ |
164 | void *driver_data; | |
5c238a8b AK |
165 | |
166 | /* Pointer to the cooling device if used for thermal mitigation */ | |
167 | struct thermal_cooling_device *cdev; | |
67d874c3 VK |
168 | |
169 | struct notifier_block nb_min; | |
170 | struct notifier_block nb_max; | |
1da177e4 LT |
171 | }; |
172 | ||
6fec833b RW |
173 | DEFINE_GUARD(cpufreq_policy_write, struct cpufreq_policy *, |
174 | down_write(&_T->rwsem), up_write(&_T->rwsem)) | |
175 | ||
176 | DEFINE_GUARD(cpufreq_policy_read, struct cpufreq_policy *, | |
177 | down_read(&_T->rwsem), up_read(&_T->rwsem)) | |
178 | ||
1e4f63ae RW |
179 | /* |
180 | * Used for passing new cpufreq policy data to the cpufreq driver's ->verify() | |
181 | * callback for sanitization. That callback is only expected to modify the min | |
182 | * and max values, if necessary, and specifically it must not update the | |
183 | * frequency table. | |
184 | */ | |
185 | struct cpufreq_policy_data { | |
186 | struct cpufreq_cpuinfo cpuinfo; | |
187 | struct cpufreq_frequency_table *freq_table; | |
188 | unsigned int cpu; | |
189 | unsigned int min; /* in kHz */ | |
190 | unsigned int max; /* in kHz */ | |
191 | }; | |
192 | ||
df24014a VK |
193 | struct cpufreq_freqs { |
194 | struct cpufreq_policy *policy; | |
195 | unsigned int old; | |
196 | unsigned int new; | |
197 | u8 flags; /* flags of cpufreq_driver, see below. */ | |
198 | }; | |
199 | ||
62b36cc1 | 200 | /* Only for ACPI */ |
46f18e3a VP |
201 | #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ |
202 | #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ | |
203 | #define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */ | |
204 | #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ | |
1da177e4 | 205 | |
c75b505d | 206 | #ifdef CONFIG_CPU_FREQ |
1f0bd44e | 207 | struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu); |
74aca95d | 208 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); |
3a3e9e06 | 209 | void cpufreq_cpu_put(struct cpufreq_policy *policy); |
c75b505d | 210 | #else |
1f0bd44e RW |
211 | static inline struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) |
212 | { | |
213 | return NULL; | |
214 | } | |
c75b505d DV |
215 | static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) |
216 | { | |
217 | return NULL; | |
218 | } | |
219 | static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { } | |
220 | #endif | |
74aca95d | 221 | |
97a705dc DU |
222 | /* Scope based cleanup macro for cpufreq_policy kobject reference counting */ |
223 | DEFINE_FREE(put_cpufreq_policy, struct cpufreq_policy *, if (_T) cpufreq_cpu_put(_T)) | |
224 | ||
9083e498 RW |
225 | static inline bool policy_is_inactive(struct cpufreq_policy *policy) |
226 | { | |
227 | return cpumask_empty(policy->cpus); | |
228 | } | |
229 | ||
2624f90c FB |
230 | static inline bool policy_is_shared(struct cpufreq_policy *policy) |
231 | { | |
232 | return cpumask_weight(policy->cpus) > 1; | |
233 | } | |
234 | ||
74aca95d VK |
235 | #ifdef CONFIG_CPU_FREQ |
236 | unsigned int cpufreq_get(unsigned int cpu); | |
237 | unsigned int cpufreq_quick_get(unsigned int cpu); | |
238 | unsigned int cpufreq_quick_get_max(unsigned int cpu); | |
bbce8eaa | 239 | unsigned int cpufreq_get_hw_max_freq(unsigned int cpu); |
74aca95d | 240 | void disable_cpufreq(void); |
1da177e4 | 241 | |
74aca95d | 242 | u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); |
9083e498 | 243 | |
c57b25bd | 244 | void refresh_frequency_limits(struct cpufreq_policy *policy); |
30248fef | 245 | void cpufreq_update_policy(unsigned int cpu); |
5a25e3f7 | 246 | void cpufreq_update_limits(unsigned int cpu); |
74aca95d | 247 | bool have_governor_per_policy(void); |
874f6353 | 248 | bool cpufreq_supports_freq_invariance(void); |
74aca95d | 249 | struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); |
b7898fda | 250 | void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); |
6c9d9c81 | 251 | void cpufreq_disable_fast_switch(struct cpufreq_policy *policy); |
a038895e | 252 | bool has_target_index(void); |
75d65931 VG |
253 | |
254 | DECLARE_PER_CPU(unsigned long, cpufreq_pressure); | |
255 | static inline unsigned long cpufreq_get_pressure(int cpu) | |
256 | { | |
257 | return READ_ONCE(per_cpu(cpufreq_pressure, cpu)); | |
258 | } | |
74aca95d VK |
259 | #else |
260 | static inline unsigned int cpufreq_get(unsigned int cpu) | |
1da177e4 | 261 | { |
74aca95d VK |
262 | return 0; |
263 | } | |
264 | static inline unsigned int cpufreq_quick_get(unsigned int cpu) | |
265 | { | |
266 | return 0; | |
267 | } | |
268 | static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) | |
269 | { | |
270 | return 0; | |
271 | } | |
bbce8eaa IV |
272 | static inline unsigned int cpufreq_get_hw_max_freq(unsigned int cpu) |
273 | { | |
274 | return 0; | |
275 | } | |
874f6353 IV |
276 | static inline bool cpufreq_supports_freq_invariance(void) |
277 | { | |
278 | return false; | |
279 | } | |
74aca95d | 280 | static inline void disable_cpufreq(void) { } |
9c4a13a0 | 281 | static inline void cpufreq_update_limits(unsigned int cpu) { } |
75d65931 VG |
282 | static inline unsigned long cpufreq_get_pressure(int cpu) |
283 | { | |
284 | return 0; | |
285 | } | |
1da177e4 | 286 | #endif |
1da177e4 | 287 | |
1aefc75b RW |
288 | #ifdef CONFIG_CPU_FREQ_STAT |
289 | void cpufreq_stats_create_table(struct cpufreq_policy *policy); | |
290 | void cpufreq_stats_free_table(struct cpufreq_policy *policy); | |
291 | void cpufreq_stats_record_transition(struct cpufreq_policy *policy, | |
292 | unsigned int new_freq); | |
293 | #else | |
294 | static inline void cpufreq_stats_create_table(struct cpufreq_policy *policy) { } | |
295 | static inline void cpufreq_stats_free_table(struct cpufreq_policy *policy) { } | |
296 | static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy, | |
297 | unsigned int new_freq) { } | |
298 | #endif /* CONFIG_CPU_FREQ_STAT */ | |
299 | ||
1da177e4 | 300 | /********************************************************************* |
74aca95d | 301 | * CPUFREQ DRIVER INTERFACE * |
1da177e4 LT |
302 | *********************************************************************/ |
303 | ||
74aca95d VK |
304 | #define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */ |
305 | #define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */ | |
5b0c0b16 | 306 | #define CPUFREQ_RELATION_C 2 /* closest frequency to target */ |
1f39fa0d VD |
307 | /* relation flags */ |
308 | #define CPUFREQ_RELATION_E BIT(2) /* Get if possible an efficient frequency */ | |
1da177e4 | 309 | |
b894d20e VD |
310 | #define CPUFREQ_RELATION_LE (CPUFREQ_RELATION_L | CPUFREQ_RELATION_E) |
311 | #define CPUFREQ_RELATION_HE (CPUFREQ_RELATION_H | CPUFREQ_RELATION_E) | |
312 | #define CPUFREQ_RELATION_CE (CPUFREQ_RELATION_C | CPUFREQ_RELATION_E) | |
1da177e4 | 313 | |
74aca95d VK |
314 | struct freq_attr { |
315 | struct attribute attr; | |
316 | ssize_t (*show)(struct cpufreq_policy *, char *); | |
317 | ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count); | |
1da177e4 LT |
318 | }; |
319 | ||
74aca95d VK |
320 | #define cpufreq_freq_attr_ro(_name) \ |
321 | static struct freq_attr _name = \ | |
322 | __ATTR(_name, 0444, show_##_name, NULL) | |
1da177e4 | 323 | |
74aca95d VK |
324 | #define cpufreq_freq_attr_ro_perm(_name, _perm) \ |
325 | static struct freq_attr _name = \ | |
326 | __ATTR(_name, _perm, show_##_name, NULL) | |
1da177e4 | 327 | |
74aca95d VK |
328 | #define cpufreq_freq_attr_rw(_name) \ |
329 | static struct freq_attr _name = \ | |
330 | __ATTR(_name, 0644, show_##_name, store_##_name) | |
331 | ||
ee7930ee MM |
332 | #define cpufreq_freq_attr_wo(_name) \ |
333 | static struct freq_attr _name = \ | |
334 | __ATTR(_name, 0200, NULL, store_##_name) | |
335 | ||
74aca95d | 336 | #define define_one_global_ro(_name) \ |
625c85a6 | 337 | static struct kobj_attribute _name = \ |
74aca95d VK |
338 | __ATTR(_name, 0444, show_##_name, NULL) |
339 | ||
340 | #define define_one_global_rw(_name) \ | |
625c85a6 | 341 | static struct kobj_attribute _name = \ |
74aca95d | 342 | __ATTR(_name, 0644, show_##_name, store_##_name) |
1da177e4 | 343 | |
1da177e4 LT |
344 | |
345 | struct cpufreq_driver { | |
90452e61 | 346 | char name[CPUFREQ_NAME_LEN]; |
1c534352 | 347 | u16 flags; |
90452e61 | 348 | void *driver_data; |
1da177e4 LT |
349 | |
350 | /* needed by all drivers */ | |
90452e61 | 351 | int (*init)(struct cpufreq_policy *policy); |
1e4f63ae | 352 | int (*verify)(struct cpufreq_policy_data *policy); |
1da177e4 LT |
353 | |
354 | /* define one out of two */ | |
90452e61 | 355 | int (*setpolicy)(struct cpufreq_policy *policy); |
1c03a2d0 | 356 | |
90452e61 VK |
357 | int (*target)(struct cpufreq_policy *policy, |
358 | unsigned int target_freq, | |
359 | unsigned int relation); /* Deprecated */ | |
360 | int (*target_index)(struct cpufreq_policy *policy, | |
361 | unsigned int index); | |
b7898fda RW |
362 | unsigned int (*fast_switch)(struct cpufreq_policy *policy, |
363 | unsigned int target_freq); | |
ee2cc427 RW |
364 | /* |
365 | * ->fast_switch() replacement for drivers that use an internal | |
366 | * representation of performance levels and can pass hints other than | |
b4a11fa3 WK |
367 | * the target performance level to the hardware. This can only be set |
368 | * if ->fast_switch is set too, because in those cases (under specific | |
369 | * conditions) scale invariance can be disabled, which causes the | |
370 | * schedutil governor to fall back to the latter. | |
ee2cc427 RW |
371 | */ |
372 | void (*adjust_perf)(unsigned int cpu, | |
373 | unsigned long min_perf, | |
374 | unsigned long target_perf, | |
375 | unsigned long capacity); | |
e3c06236 | 376 | |
1c03a2d0 VK |
377 | /* |
378 | * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION | |
379 | * unset. | |
380 | * | |
381 | * get_intermediate should return a stable intermediate frequency | |
382 | * platform wants to switch to and target_intermediate() should set CPU | |
2909438d | 383 | * to that frequency, before jumping to the frequency corresponding |
1c03a2d0 VK |
384 | * to 'index'. Core will take care of sending notifications and driver |
385 | * doesn't have to handle them in target_intermediate() or | |
386 | * target_index(). | |
387 | * | |
388 | * Drivers can return '0' from get_intermediate() in case they don't | |
389 | * wish to switch to intermediate frequency for some target frequency. | |
390 | * In that case core will directly call ->target_index(). | |
391 | */ | |
90452e61 VK |
392 | unsigned int (*get_intermediate)(struct cpufreq_policy *policy, |
393 | unsigned int index); | |
394 | int (*target_intermediate)(struct cpufreq_policy *policy, | |
395 | unsigned int index); | |
1da177e4 | 396 | |
6a4fec4f | 397 | /* should be defined, if possible, return 0 on error */ |
90452e61 | 398 | unsigned int (*get)(unsigned int cpu); |
1da177e4 | 399 | |
5a25e3f7 | 400 | /* Called to update policy limits on firmware notifications. */ |
eaff6b62 | 401 | void (*update_limits)(struct cpufreq_policy *policy); |
5a25e3f7 | 402 | |
1da177e4 | 403 | /* optional */ |
90452e61 | 404 | int (*bios_limit)(int cpu, unsigned int *limit); |
bf0b90e3 | 405 | |
91a12e91 VK |
406 | int (*online)(struct cpufreq_policy *policy); |
407 | int (*offline)(struct cpufreq_policy *policy); | |
b4b1ddc9 | 408 | void (*exit)(struct cpufreq_policy *policy); |
90452e61 VK |
409 | int (*suspend)(struct cpufreq_policy *policy); |
410 | int (*resume)(struct cpufreq_policy *policy); | |
7c45cf31 | 411 | |
4f774c4a BA |
412 | /* Will be called after the driver is fully initialized */ |
413 | void (*ready)(struct cpufreq_policy *policy); | |
414 | ||
90452e61 | 415 | struct freq_attr **attr; |
6f19efc0 LM |
416 | |
417 | /* platform specific boost support code */ | |
90452e61 | 418 | bool boost_enabled; |
cf6fada7 | 419 | int (*set_boost)(struct cpufreq_policy *policy, int state); |
c17495b0 VK |
420 | |
421 | /* | |
422 | * Set by drivers that want to register with the energy model after the | |
423 | * policy is properly initialized, but before the governor is started. | |
424 | */ | |
425 | void (*register_em)(struct cpufreq_policy *policy); | |
1da177e4 LT |
426 | }; |
427 | ||
428 | /* flags */ | |
8321be6a | 429 | |
5ae4a4b4 | 430 | /* |
3598b30b | 431 | * Set by drivers that need to update internal upper and lower boundaries along |
5ae4a4b4 VK |
432 | * with the target frequency and so the core and governors should also invoke |
433 | * the diver if the target frequency does not change, but the policy min or max | |
434 | * may have changed. | |
435 | */ | |
436 | #define CPUFREQ_NEED_UPDATE_LIMITS BIT(0) | |
8321be6a AK |
437 | |
438 | /* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */ | |
439 | #define CPUFREQ_CONST_LOOPS BIT(1) | |
440 | ||
2f053186 VK |
441 | /* |
442 | * Set by drivers that want the core to automatically register the cpufreq | |
443 | * driver as a thermal cooling device. | |
444 | */ | |
445 | #define CPUFREQ_IS_COOLING_DEV BIT(2) | |
1da177e4 | 446 | |
0b981e70 VK |
447 | /* |
448 | * This should be set by platforms having multiple clock-domains, i.e. | |
449 | * supporting multiple policies. With this sysfs directories of governor would | |
450 | * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same | |
451 | * governor with different tunables for different clusters. | |
452 | */ | |
8321be6a | 453 | #define CPUFREQ_HAVE_GOVERNOR_PER_POLICY BIT(3) |
0b981e70 | 454 | |
7dbf694d VK |
455 | /* |
456 | * Driver will do POSTCHANGE notifications from outside of their ->target() | |
457 | * routine and so must set cpufreq_driver->flags with this flag, so that core | |
458 | * can handle them specially. | |
459 | */ | |
8321be6a | 460 | #define CPUFREQ_ASYNC_NOTIFICATION BIT(4) |
7dbf694d | 461 | |
ae6b4271 VK |
462 | /* |
463 | * Set by drivers which want cpufreq core to check if CPU is running at a | |
464 | * frequency present in freq-table exposed by the driver. For these drivers if | |
465 | * CPU is found running at an out of table freq, we will try to set it to a freq | |
466 | * from the table. And if that fails, we will stop further boot process by | |
467 | * issuing a BUG_ON(). | |
468 | */ | |
8321be6a | 469 | #define CPUFREQ_NEED_INITIAL_FREQ_CHECK BIT(5) |
ae6b4271 | 470 | |
fe829ed8 VK |
471 | /* |
472 | * Set by drivers to disallow use of governors with "dynamic_switching" flag | |
473 | * set. | |
474 | */ | |
8321be6a | 475 | #define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6) |
fe829ed8 | 476 | |
221dee28 | 477 | int cpufreq_register_driver(struct cpufreq_driver *driver_data); |
dd329e1e | 478 | void cpufreq_unregister_driver(struct cpufreq_driver *driver_data); |
1da177e4 | 479 | |
a62f68f5 | 480 | bool cpufreq_driver_test_flags(u16 flags); |
74aca95d | 481 | const char *cpufreq_get_current_driver(void); |
51315cdf | 482 | void *cpufreq_get_driver_data(void); |
1da177e4 | 483 | |
bcc61569 DL |
484 | static inline int cpufreq_thermal_control_enabled(struct cpufreq_driver *drv) |
485 | { | |
486 | return IS_ENABLED(CONFIG_CPU_THERMAL) && | |
487 | (drv->flags & CPUFREQ_IS_COOLING_DEV); | |
488 | } | |
489 | ||
1e4f63ae RW |
490 | static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *policy, |
491 | unsigned int min, | |
492 | unsigned int max) | |
1da177e4 | 493 | { |
a436ae94 LC |
494 | policy->max = clamp(policy->max, min, max); |
495 | policy->min = clamp(policy->min, min, policy->max); | |
1da177e4 LT |
496 | } |
497 | ||
be49e346 | 498 | static inline void |
1e4f63ae | 499 | cpufreq_verify_within_cpu_limits(struct cpufreq_policy_data *policy) |
be49e346 VK |
500 | { |
501 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | |
1e4f63ae | 502 | policy->cpuinfo.max_freq); |
be49e346 VK |
503 | } |
504 | ||
2f0aea93 VK |
505 | #ifdef CONFIG_CPU_FREQ |
506 | void cpufreq_suspend(void); | |
507 | void cpufreq_resume(void); | |
e28867ea | 508 | int cpufreq_generic_suspend(struct cpufreq_policy *policy); |
2f0aea93 VK |
509 | #else |
510 | static inline void cpufreq_suspend(void) {} | |
511 | static inline void cpufreq_resume(void) {} | |
512 | #endif | |
513 | ||
74aca95d VK |
514 | /********************************************************************* |
515 | * CPUFREQ NOTIFIER INTERFACE * | |
516 | *********************************************************************/ | |
6dad2a29 | 517 | |
74aca95d VK |
518 | #define CPUFREQ_TRANSITION_NOTIFIER (0) |
519 | #define CPUFREQ_POLICY_NOTIFIER (1) | |
1da177e4 | 520 | |
74aca95d VK |
521 | /* Transition notifiers */ |
522 | #define CPUFREQ_PRECHANGE (0) | |
523 | #define CPUFREQ_POSTCHANGE (1) | |
6dad2a29 | 524 | |
74aca95d | 525 | /* Policy Notifiers */ |
df0eea44 VK |
526 | #define CPUFREQ_CREATE_POLICY (0) |
527 | #define CPUFREQ_REMOVE_POLICY (1) | |
6dad2a29 | 528 | |
74aca95d VK |
529 | #ifdef CONFIG_CPU_FREQ |
530 | int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); | |
531 | int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); | |
6dad2a29 | 532 | |
12478cf0 SB |
533 | void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, |
534 | struct cpufreq_freqs *freqs); | |
535 | void cpufreq_freq_transition_end(struct cpufreq_policy *policy, | |
536 | struct cpufreq_freqs *freqs, int transition_failed); | |
1da177e4 | 537 | |
74aca95d VK |
538 | #else /* CONFIG_CPU_FREQ */ |
539 | static inline int cpufreq_register_notifier(struct notifier_block *nb, | |
540 | unsigned int list) | |
2eca40a8 RD |
541 | { |
542 | return 0; | |
543 | } | |
74aca95d VK |
544 | static inline int cpufreq_unregister_notifier(struct notifier_block *nb, |
545 | unsigned int list) | |
95235ca2 VP |
546 | { |
547 | return 0; | |
548 | } | |
74aca95d VK |
549 | #endif /* !CONFIG_CPU_FREQ */ |
550 | ||
551 | /** | |
552 | * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch | |
553 | * safe) | |
554 | * @old: old value | |
555 | * @div: divisor | |
556 | * @mult: multiplier | |
557 | * | |
558 | * | |
559 | * new = old * mult / div | |
560 | */ | |
561 | static inline unsigned long cpufreq_scale(unsigned long old, u_int div, | |
562 | u_int mult) | |
3d737108 | 563 | { |
74aca95d VK |
564 | #if BITS_PER_LONG == 32 |
565 | u64 result = ((u64) old) * ((u64) mult); | |
566 | do_div(result, div); | |
567 | return (unsigned long) result; | |
568 | ||
569 | #elif BITS_PER_LONG == 64 | |
570 | unsigned long result = old * ((u64) mult); | |
571 | result /= div; | |
572 | return result; | |
95235ca2 | 573 | #endif |
74aca95d | 574 | } |
95235ca2 | 575 | |
1da177e4 | 576 | /********************************************************************* |
74aca95d | 577 | * CPUFREQ GOVERNORS * |
1da177e4 LT |
578 | *********************************************************************/ |
579 | ||
1e4f63ae | 580 | #define CPUFREQ_POLICY_UNKNOWN (0) |
74aca95d VK |
581 | /* |
582 | * If (cpufreq_driver->target) exists, the ->governor decides what frequency | |
583 | * within the limits is used. If (cpufreq_driver->setpolicy> exists, these | |
584 | * two generic policies are available: | |
585 | */ | |
586 | #define CPUFREQ_POLICY_POWERSAVE (1) | |
587 | #define CPUFREQ_POLICY_PERFORMANCE (2) | |
588 | ||
74aca95d VK |
589 | struct cpufreq_governor { |
590 | char name[CPUFREQ_NAME_LEN]; | |
e788892b RW |
591 | int (*init)(struct cpufreq_policy *policy); |
592 | void (*exit)(struct cpufreq_policy *policy); | |
593 | int (*start)(struct cpufreq_policy *policy); | |
594 | void (*stop)(struct cpufreq_policy *policy); | |
595 | void (*limits)(struct cpufreq_policy *policy); | |
74aca95d VK |
596 | ssize_t (*show_setspeed) (struct cpufreq_policy *policy, |
597 | char *buf); | |
598 | int (*store_setspeed) (struct cpufreq_policy *policy, | |
599 | unsigned int freq); | |
74aca95d VK |
600 | struct list_head governor_list; |
601 | struct module *owner; | |
9a2a9ebc | 602 | u8 flags; |
74aca95d VK |
603 | }; |
604 | ||
9a2a9ebc RW |
605 | /* Governor flags */ |
606 | ||
607 | /* For governors which change frequency dynamically by themselves */ | |
608 | #define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0) | |
609 | ||
218f6687 RW |
610 | /* For governors wanting the target frequency to be set exactly */ |
611 | #define CPUFREQ_GOV_STRICT_TARGET BIT(1) | |
612 | ||
9a2a9ebc | 613 | |
74aca95d | 614 | /* Pass a target to the cpufreq driver */ |
b7898fda RW |
615 | unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, |
616 | unsigned int target_freq); | |
ee2cc427 RW |
617 | void cpufreq_driver_adjust_perf(unsigned int cpu, |
618 | unsigned long min_perf, | |
619 | unsigned long target_perf, | |
620 | unsigned long capacity); | |
621 | bool cpufreq_driver_has_adjust_perf(void); | |
74aca95d VK |
622 | int cpufreq_driver_target(struct cpufreq_policy *policy, |
623 | unsigned int target_freq, | |
624 | unsigned int relation); | |
625 | int __cpufreq_driver_target(struct cpufreq_policy *policy, | |
626 | unsigned int target_freq, | |
627 | unsigned int relation); | |
e3c06236 SM |
628 | unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, |
629 | unsigned int target_freq); | |
aa7519af | 630 | unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy); |
74aca95d VK |
631 | int cpufreq_register_governor(struct cpufreq_governor *governor); |
632 | void cpufreq_unregister_governor(struct cpufreq_governor *governor); | |
f6ebbcf0 RW |
633 | int cpufreq_start_governor(struct cpufreq_policy *policy); |
634 | void cpufreq_stop_governor(struct cpufreq_policy *policy); | |
74aca95d | 635 | |
10dd8573 QP |
636 | #define cpufreq_governor_init(__governor) \ |
637 | static int __init __governor##_init(void) \ | |
638 | { \ | |
639 | return cpufreq_register_governor(&__governor); \ | |
640 | } \ | |
641 | core_initcall(__governor##_init) | |
642 | ||
643 | #define cpufreq_governor_exit(__governor) \ | |
644 | static void __exit __governor##_exit(void) \ | |
645 | { \ | |
646 | return cpufreq_unregister_governor(&__governor); \ | |
647 | } \ | |
648 | module_exit(__governor##_exit) | |
649 | ||
de1df26b RW |
650 | struct cpufreq_governor *cpufreq_default_governor(void); |
651 | struct cpufreq_governor *cpufreq_fallback_governor(void); | |
1da177e4 | 652 | |
f42c8556 RW |
653 | #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL |
654 | bool sugov_is_governor(struct cpufreq_policy *policy); | |
655 | #else | |
656 | static inline bool sugov_is_governor(struct cpufreq_policy *policy) | |
657 | { | |
658 | return false; | |
659 | } | |
660 | #endif | |
661 | ||
bf2be2de VK |
662 | static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy) |
663 | { | |
664 | if (policy->max < policy->cur) | |
b894d20e VD |
665 | __cpufreq_driver_target(policy, policy->max, |
666 | CPUFREQ_RELATION_HE); | |
bf2be2de | 667 | else if (policy->min > policy->cur) |
b894d20e VD |
668 | __cpufreq_driver_target(policy, policy->min, |
669 | CPUFREQ_RELATION_LE); | |
bf2be2de VK |
670 | } |
671 | ||
66893b6a RW |
672 | /* Governor attribute set */ |
673 | struct gov_attr_set { | |
674 | struct kobject kobj; | |
675 | struct list_head policy_list; | |
676 | struct mutex update_lock; | |
677 | int usage_count; | |
678 | }; | |
679 | ||
680 | /* sysfs ops for cpufreq governors */ | |
681 | extern const struct sysfs_ops governor_sysfs_ops; | |
682 | ||
ae265086 KH |
683 | static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj) |
684 | { | |
685 | return container_of(kobj, struct gov_attr_set, kobj); | |
686 | } | |
687 | ||
66893b6a RW |
688 | void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node); |
689 | void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node); | |
690 | unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node); | |
691 | ||
692 | /* Governor sysfs attribute */ | |
693 | struct governor_attr { | |
694 | struct attribute attr; | |
695 | ssize_t (*show)(struct gov_attr_set *attr_set, char *buf); | |
696 | ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf, | |
697 | size_t count); | |
698 | }; | |
699 | ||
1da177e4 LT |
700 | /********************************************************************* |
701 | * FREQUENCY TABLE HELPERS * | |
702 | *********************************************************************/ | |
703 | ||
7f4b0461 | 704 | /* Special Values of .frequency field */ |
442d24a5 VD |
705 | #define CPUFREQ_ENTRY_INVALID ~0u |
706 | #define CPUFREQ_TABLE_END ~1u | |
7f4b0461 | 707 | /* Special Values of .flags field */ |
442d24a5 VD |
708 | #define CPUFREQ_BOOST_FREQ (1 << 0) |
709 | #define CPUFREQ_INEFFICIENT_FREQ (1 << 1) | |
1da177e4 LT |
710 | |
711 | struct cpufreq_frequency_table { | |
7f4b0461 | 712 | unsigned int flags; |
50701588 | 713 | unsigned int driver_data; /* driver specific data, not used by core */ |
1da177e4 LT |
714 | unsigned int frequency; /* kHz - doesn't need to be in ascending |
715 | * order */ | |
716 | }; | |
717 | ||
27e289dc SK |
718 | /* |
719 | * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table | |
720 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. | |
721 | * @table: the cpufreq_frequency_table * to iterate over. | |
722 | */ | |
723 | ||
724 | #define cpufreq_for_each_entry(pos, table) \ | |
725 | for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) | |
726 | ||
ffd81dcf DB |
727 | /* |
728 | * cpufreq_for_each_entry_idx - iterate over a cpufreq_frequency_table | |
729 | * with index | |
730 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. | |
731 | * @table: the cpufreq_frequency_table * to iterate over. | |
732 | * @idx: the table entry currently being processed | |
733 | */ | |
734 | ||
735 | #define cpufreq_for_each_entry_idx(pos, table, idx) \ | |
736 | for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \ | |
737 | pos++, idx++) | |
738 | ||
27e289dc SK |
739 | /* |
740 | * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table | |
741 | * excluding CPUFREQ_ENTRY_INVALID frequencies. | |
742 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. | |
743 | * @table: the cpufreq_frequency_table * to iterate over. | |
744 | */ | |
745 | ||
34b08705 RW |
746 | #define cpufreq_for_each_valid_entry(pos, table) \ |
747 | for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) \ | |
748 | if (pos->frequency == CPUFREQ_ENTRY_INVALID) \ | |
749 | continue; \ | |
750 | else | |
27e289dc | 751 | |
ffd81dcf DB |
752 | /* |
753 | * cpufreq_for_each_valid_entry_idx - iterate with index over a cpufreq | |
754 | * frequency_table excluding CPUFREQ_ENTRY_INVALID frequencies. | |
755 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. | |
756 | * @table: the cpufreq_frequency_table * to iterate over. | |
757 | * @idx: the table entry currently being processed | |
758 | */ | |
759 | ||
760 | #define cpufreq_for_each_valid_entry_idx(pos, table, idx) \ | |
761 | cpufreq_for_each_entry_idx(pos, table, idx) \ | |
762 | if (pos->frequency == CPUFREQ_ENTRY_INVALID) \ | |
763 | continue; \ | |
764 | else | |
765 | ||
1f39fa0d VD |
766 | /** |
767 | * cpufreq_for_each_efficient_entry_idx - iterate with index over a cpufreq | |
768 | * frequency_table excluding CPUFREQ_ENTRY_INVALID and | |
769 | * CPUFREQ_INEFFICIENT_FREQ frequencies. | |
770 | * @pos: the &struct cpufreq_frequency_table to use as a loop cursor. | |
771 | * @table: the &struct cpufreq_frequency_table to iterate over. | |
772 | * @idx: the table entry currently being processed. | |
773 | * @efficiencies: set to true to only iterate over efficient frequencies. | |
774 | */ | |
775 | ||
776 | #define cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) \ | |
777 | cpufreq_for_each_valid_entry_idx(pos, table, idx) \ | |
778 | if (efficiencies && (pos->flags & CPUFREQ_INEFFICIENT_FREQ)) \ | |
779 | continue; \ | |
780 | else | |
781 | ||
ffd81dcf | 782 | |
1da177e4 LT |
783 | int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, |
784 | struct cpufreq_frequency_table *table); | |
785 | ||
1e4f63ae | 786 | int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy, |
1da177e4 | 787 | struct cpufreq_frequency_table *table); |
1e4f63ae | 788 | int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy); |
1da177e4 | 789 | |
da0c6dc0 | 790 | int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, |
b7902803 RW |
791 | unsigned int target_freq, unsigned int min, |
792 | unsigned int max, unsigned int relation); | |
d3916691 VK |
793 | int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, |
794 | unsigned int freq); | |
1da177e4 | 795 | |
74aca95d VK |
796 | ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf); |
797 | ||
6f19efc0 | 798 | #ifdef CONFIG_CPU_FREQ |
43c0226c | 799 | bool cpufreq_boost_enabled(void); |
9a23eb8b | 800 | int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state); |
da0c6dc0 VK |
801 | |
802 | /* Find lowest freq at or above target in a table in ascending order */ | |
803 | static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy, | |
1f39fa0d VD |
804 | unsigned int target_freq, |
805 | bool efficiencies) | |
da0c6dc0 VK |
806 | { |
807 | struct cpufreq_frequency_table *table = policy->freq_table; | |
ffd81dcf | 808 | struct cpufreq_frequency_table *pos; |
da0c6dc0 | 809 | unsigned int freq; |
ffd81dcf | 810 | int idx, best = -1; |
da0c6dc0 | 811 | |
1f39fa0d | 812 | cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) { |
899bb664 | 813 | freq = pos->frequency; |
da0c6dc0 VK |
814 | |
815 | if (freq >= target_freq) | |
ffd81dcf | 816 | return idx; |
da0c6dc0 | 817 | |
ffd81dcf | 818 | best = idx; |
da0c6dc0 VK |
819 | } |
820 | ||
ffd81dcf | 821 | return best; |
da0c6dc0 VK |
822 | } |
823 | ||
824 | /* Find lowest freq at or above target in a table in descending order */ | |
825 | static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy, | |
1f39fa0d VD |
826 | unsigned int target_freq, |
827 | bool efficiencies) | |
da0c6dc0 VK |
828 | { |
829 | struct cpufreq_frequency_table *table = policy->freq_table; | |
ffd81dcf | 830 | struct cpufreq_frequency_table *pos; |
da0c6dc0 | 831 | unsigned int freq; |
ffd81dcf | 832 | int idx, best = -1; |
da0c6dc0 | 833 | |
1f39fa0d | 834 | cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) { |
899bb664 | 835 | freq = pos->frequency; |
da0c6dc0 VK |
836 | |
837 | if (freq == target_freq) | |
ffd81dcf | 838 | return idx; |
da0c6dc0 VK |
839 | |
840 | if (freq > target_freq) { | |
ffd81dcf | 841 | best = idx; |
da0c6dc0 VK |
842 | continue; |
843 | } | |
844 | ||
845 | /* No freq found above target_freq */ | |
ffd81dcf DB |
846 | if (best == -1) |
847 | return idx; | |
da0c6dc0 | 848 | |
ffd81dcf | 849 | return best; |
da0c6dc0 VK |
850 | } |
851 | ||
ffd81dcf | 852 | return best; |
da0c6dc0 VK |
853 | } |
854 | ||
b7902803 RW |
855 | static inline int find_index_l(struct cpufreq_policy *policy, |
856 | unsigned int target_freq, | |
857 | unsigned int min, unsigned int max, | |
858 | bool efficiencies) | |
da0c6dc0 | 859 | { |
b7902803 | 860 | target_freq = clamp_val(target_freq, min, max); |
da0c6dc0 VK |
861 | |
862 | if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) | |
1f39fa0d VD |
863 | return cpufreq_table_find_index_al(policy, target_freq, |
864 | efficiencies); | |
da0c6dc0 | 865 | else |
1f39fa0d VD |
866 | return cpufreq_table_find_index_dl(policy, target_freq, |
867 | efficiencies); | |
da0c6dc0 VK |
868 | } |
869 | ||
b7902803 RW |
870 | /* Works only on sorted freq-tables */ |
871 | static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, | |
872 | unsigned int target_freq, | |
873 | bool efficiencies) | |
874 | { | |
875 | return find_index_l(policy, target_freq, policy->min, policy->max, efficiencies); | |
876 | } | |
877 | ||
da0c6dc0 VK |
878 | /* Find highest freq at or below target in a table in ascending order */ |
879 | static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy, | |
1f39fa0d VD |
880 | unsigned int target_freq, |
881 | bool efficiencies) | |
da0c6dc0 VK |
882 | { |
883 | struct cpufreq_frequency_table *table = policy->freq_table; | |
ffd81dcf | 884 | struct cpufreq_frequency_table *pos; |
da0c6dc0 | 885 | unsigned int freq; |
ffd81dcf | 886 | int idx, best = -1; |
da0c6dc0 | 887 | |
1f39fa0d | 888 | cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) { |
899bb664 | 889 | freq = pos->frequency; |
da0c6dc0 VK |
890 | |
891 | if (freq == target_freq) | |
ffd81dcf | 892 | return idx; |
da0c6dc0 VK |
893 | |
894 | if (freq < target_freq) { | |
ffd81dcf | 895 | best = idx; |
da0c6dc0 VK |
896 | continue; |
897 | } | |
898 | ||
899 | /* No freq found below target_freq */ | |
ffd81dcf DB |
900 | if (best == -1) |
901 | return idx; | |
da0c6dc0 | 902 | |
ffd81dcf | 903 | return best; |
da0c6dc0 VK |
904 | } |
905 | ||
ffd81dcf | 906 | return best; |
da0c6dc0 VK |
907 | } |
908 | ||
909 | /* Find highest freq at or below target in a table in descending order */ | |
910 | static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy, | |
1f39fa0d VD |
911 | unsigned int target_freq, |
912 | bool efficiencies) | |
da0c6dc0 VK |
913 | { |
914 | struct cpufreq_frequency_table *table = policy->freq_table; | |
ffd81dcf | 915 | struct cpufreq_frequency_table *pos; |
da0c6dc0 | 916 | unsigned int freq; |
ffd81dcf | 917 | int idx, best = -1; |
da0c6dc0 | 918 | |
1f39fa0d | 919 | cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) { |
899bb664 | 920 | freq = pos->frequency; |
da0c6dc0 VK |
921 | |
922 | if (freq <= target_freq) | |
ffd81dcf | 923 | return idx; |
da0c6dc0 | 924 | |
ffd81dcf | 925 | best = idx; |
da0c6dc0 VK |
926 | } |
927 | ||
ffd81dcf | 928 | return best; |
da0c6dc0 VK |
929 | } |
930 | ||
b7902803 RW |
931 | static inline int find_index_h(struct cpufreq_policy *policy, |
932 | unsigned int target_freq, | |
933 | unsigned int min, unsigned int max, | |
934 | bool efficiencies) | |
da0c6dc0 | 935 | { |
b7902803 | 936 | target_freq = clamp_val(target_freq, min, max); |
da0c6dc0 VK |
937 | |
938 | if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) | |
1f39fa0d VD |
939 | return cpufreq_table_find_index_ah(policy, target_freq, |
940 | efficiencies); | |
da0c6dc0 | 941 | else |
1f39fa0d VD |
942 | return cpufreq_table_find_index_dh(policy, target_freq, |
943 | efficiencies); | |
da0c6dc0 VK |
944 | } |
945 | ||
b7902803 RW |
946 | /* Works only on sorted freq-tables */ |
947 | static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, | |
948 | unsigned int target_freq, | |
949 | bool efficiencies) | |
950 | { | |
951 | return find_index_h(policy, target_freq, policy->min, policy->max, efficiencies); | |
952 | } | |
953 | ||
da0c6dc0 VK |
954 | /* Find closest freq to target in a table in ascending order */ |
955 | static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy, | |
1f39fa0d VD |
956 | unsigned int target_freq, |
957 | bool efficiencies) | |
da0c6dc0 VK |
958 | { |
959 | struct cpufreq_frequency_table *table = policy->freq_table; | |
ffd81dcf | 960 | struct cpufreq_frequency_table *pos; |
da0c6dc0 | 961 | unsigned int freq; |
ffd81dcf | 962 | int idx, best = -1; |
da0c6dc0 | 963 | |
1f39fa0d | 964 | cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) { |
899bb664 | 965 | freq = pos->frequency; |
da0c6dc0 VK |
966 | |
967 | if (freq == target_freq) | |
ffd81dcf | 968 | return idx; |
da0c6dc0 VK |
969 | |
970 | if (freq < target_freq) { | |
ffd81dcf | 971 | best = idx; |
da0c6dc0 VK |
972 | continue; |
973 | } | |
974 | ||
975 | /* No freq found below target_freq */ | |
ffd81dcf DB |
976 | if (best == -1) |
977 | return idx; | |
da0c6dc0 VK |
978 | |
979 | /* Choose the closest freq */ | |
ffd81dcf DB |
980 | if (target_freq - table[best].frequency > freq - target_freq) |
981 | return idx; | |
da0c6dc0 | 982 | |
ffd81dcf | 983 | return best; |
da0c6dc0 VK |
984 | } |
985 | ||
ffd81dcf | 986 | return best; |
da0c6dc0 VK |
987 | } |
988 | ||
989 | /* Find closest freq to target in a table in descending order */ | |
990 | static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy, | |
1f39fa0d VD |
991 | unsigned int target_freq, |
992 | bool efficiencies) | |
da0c6dc0 VK |
993 | { |
994 | struct cpufreq_frequency_table *table = policy->freq_table; | |
ffd81dcf | 995 | struct cpufreq_frequency_table *pos; |
da0c6dc0 | 996 | unsigned int freq; |
ffd81dcf | 997 | int idx, best = -1; |
da0c6dc0 | 998 | |
1f39fa0d | 999 | cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) { |
899bb664 | 1000 | freq = pos->frequency; |
da0c6dc0 VK |
1001 | |
1002 | if (freq == target_freq) | |
ffd81dcf | 1003 | return idx; |
da0c6dc0 VK |
1004 | |
1005 | if (freq > target_freq) { | |
ffd81dcf | 1006 | best = idx; |
da0c6dc0 VK |
1007 | continue; |
1008 | } | |
1009 | ||
1010 | /* No freq found above target_freq */ | |
ffd81dcf DB |
1011 | if (best == -1) |
1012 | return idx; | |
da0c6dc0 VK |
1013 | |
1014 | /* Choose the closest freq */ | |
ffd81dcf DB |
1015 | if (table[best].frequency - target_freq > target_freq - freq) |
1016 | return idx; | |
da0c6dc0 | 1017 | |
ffd81dcf | 1018 | return best; |
da0c6dc0 VK |
1019 | } |
1020 | ||
ffd81dcf | 1021 | return best; |
da0c6dc0 VK |
1022 | } |
1023 | ||
b7902803 RW |
1024 | static inline int find_index_c(struct cpufreq_policy *policy, |
1025 | unsigned int target_freq, | |
1026 | unsigned int min, unsigned int max, | |
1027 | bool efficiencies) | |
da0c6dc0 | 1028 | { |
b7902803 | 1029 | target_freq = clamp_val(target_freq, min, max); |
da0c6dc0 VK |
1030 | |
1031 | if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) | |
1f39fa0d VD |
1032 | return cpufreq_table_find_index_ac(policy, target_freq, |
1033 | efficiencies); | |
da0c6dc0 | 1034 | else |
1f39fa0d VD |
1035 | return cpufreq_table_find_index_dc(policy, target_freq, |
1036 | efficiencies); | |
da0c6dc0 VK |
1037 | } |
1038 | ||
b7902803 RW |
1039 | /* Works only on sorted freq-tables */ |
1040 | static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, | |
1041 | unsigned int target_freq, | |
1042 | bool efficiencies) | |
1043 | { | |
1044 | return find_index_c(policy, target_freq, policy->min, policy->max, efficiencies); | |
1045 | } | |
1046 | ||
1047 | static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, | |
1048 | unsigned int min, unsigned int max, | |
1049 | int idx) | |
d394abcb SK |
1050 | { |
1051 | unsigned int freq; | |
1052 | ||
1053 | if (idx < 0) | |
1054 | return false; | |
1055 | ||
1056 | freq = policy->freq_table[idx].frequency; | |
1057 | ||
b7902803 | 1058 | return freq == clamp_val(freq, min, max); |
d394abcb SK |
1059 | } |
1060 | ||
da0c6dc0 VK |
1061 | static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, |
1062 | unsigned int target_freq, | |
b7902803 RW |
1063 | unsigned int min, |
1064 | unsigned int max, | |
da0c6dc0 VK |
1065 | unsigned int relation) |
1066 | { | |
1f39fa0d VD |
1067 | bool efficiencies = policy->efficiencies_available && |
1068 | (relation & CPUFREQ_RELATION_E); | |
1069 | int idx; | |
1070 | ||
1071 | /* cpufreq_table_index_unsorted() has no use for this flag anyway */ | |
1072 | relation &= ~CPUFREQ_RELATION_E; | |
1073 | ||
da0c6dc0 | 1074 | if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)) |
b7902803 RW |
1075 | return cpufreq_table_index_unsorted(policy, target_freq, min, |
1076 | max, relation); | |
1f39fa0d | 1077 | retry: |
da0c6dc0 VK |
1078 | switch (relation) { |
1079 | case CPUFREQ_RELATION_L: | |
b7902803 | 1080 | idx = find_index_l(policy, target_freq, min, max, efficiencies); |
1f39fa0d | 1081 | break; |
da0c6dc0 | 1082 | case CPUFREQ_RELATION_H: |
b7902803 | 1083 | idx = find_index_h(policy, target_freq, min, max, efficiencies); |
1f39fa0d | 1084 | break; |
da0c6dc0 | 1085 | case CPUFREQ_RELATION_C: |
b7902803 | 1086 | idx = find_index_c(policy, target_freq, min, max, efficiencies); |
1f39fa0d | 1087 | break; |
da0c6dc0 | 1088 | default: |
30b8e6b2 VK |
1089 | WARN_ON_ONCE(1); |
1090 | return 0; | |
da0c6dc0 | 1091 | } |
1f39fa0d | 1092 | |
b7902803 RW |
1093 | /* Limit frequency index to honor min and max */ |
1094 | if (!cpufreq_is_in_limits(policy, min, max, idx) && efficiencies) { | |
1f39fa0d VD |
1095 | efficiencies = false; |
1096 | goto retry; | |
1097 | } | |
1098 | ||
1099 | return idx; | |
da0c6dc0 | 1100 | } |
55d85293 VK |
1101 | |
1102 | static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy) | |
1103 | { | |
1104 | struct cpufreq_frequency_table *pos; | |
1105 | int count = 0; | |
1106 | ||
1107 | if (unlikely(!policy->freq_table)) | |
1108 | return 0; | |
1109 | ||
1110 | cpufreq_for_each_valid_entry(pos, policy->freq_table) | |
1111 | count++; | |
1112 | ||
1113 | return count; | |
1114 | } | |
8486a32d | 1115 | |
442d24a5 VD |
1116 | /** |
1117 | * cpufreq_table_set_inefficient() - Mark a frequency as inefficient | |
1118 | * @policy: the &struct cpufreq_policy containing the inefficient frequency | |
1119 | * @frequency: the inefficient frequency | |
1120 | * | |
1121 | * The &struct cpufreq_policy must use a sorted frequency table | |
1122 | * | |
1123 | * Return: %0 on success or a negative errno code | |
1124 | */ | |
1125 | ||
1126 | static inline int | |
1127 | cpufreq_table_set_inefficient(struct cpufreq_policy *policy, | |
1128 | unsigned int frequency) | |
1129 | { | |
1130 | struct cpufreq_frequency_table *pos; | |
1131 | ||
1132 | /* Not supported */ | |
1133 | if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED) | |
1134 | return -EINVAL; | |
1135 | ||
1136 | cpufreq_for_each_valid_entry(pos, policy->freq_table) { | |
1137 | if (pos->frequency == frequency) { | |
1138 | pos->flags |= CPUFREQ_INEFFICIENT_FREQ; | |
1f39fa0d | 1139 | policy->efficiencies_available = true; |
442d24a5 VD |
1140 | return 0; |
1141 | } | |
1142 | } | |
1143 | ||
1144 | return -EINVAL; | |
1145 | } | |
1146 | ||
8486a32d | 1147 | static inline int parse_perf_domain(int cpu, const char *list_name, |
d182dc6d HM |
1148 | const char *cell_name, |
1149 | struct of_phandle_args *args) | |
8486a32d | 1150 | { |
8486a32d HY |
1151 | int ret; |
1152 | ||
c0f02536 | 1153 | struct device_node *cpu_np __free(device_node) = of_cpu_device_node_get(cpu); |
8486a32d HY |
1154 | if (!cpu_np) |
1155 | return -ENODEV; | |
1156 | ||
1157 | ret = of_parse_phandle_with_args(cpu_np, list_name, cell_name, 0, | |
d182dc6d | 1158 | args); |
8486a32d HY |
1159 | if (ret < 0) |
1160 | return ret; | |
d182dc6d | 1161 | return 0; |
8486a32d HY |
1162 | } |
1163 | ||
1164 | static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name, | |
d182dc6d HM |
1165 | const char *cell_name, struct cpumask *cpumask, |
1166 | struct of_phandle_args *pargs) | |
8486a32d | 1167 | { |
8486a32d | 1168 | int cpu, ret; |
d182dc6d | 1169 | struct of_phandle_args args; |
8486a32d | 1170 | |
d182dc6d | 1171 | ret = parse_perf_domain(pcpu, list_name, cell_name, pargs); |
8486a32d HY |
1172 | if (ret < 0) |
1173 | return ret; | |
1174 | ||
8486a32d HY |
1175 | cpumask_set_cpu(pcpu, cpumask); |
1176 | ||
1177 | for_each_possible_cpu(cpu) { | |
1178 | if (cpu == pcpu) | |
1179 | continue; | |
1180 | ||
d182dc6d | 1181 | ret = parse_perf_domain(cpu, list_name, cell_name, &args); |
8486a32d HY |
1182 | if (ret < 0) |
1183 | continue; | |
1184 | ||
0f289828 | 1185 | if (of_phandle_args_equal(pargs, &args)) |
8486a32d | 1186 | cpumask_set_cpu(cpu, cpumask); |
d182dc6d HM |
1187 | |
1188 | of_node_put(args.np); | |
8486a32d HY |
1189 | } |
1190 | ||
d182dc6d | 1191 | return 0; |
8486a32d | 1192 | } |
6f19efc0 | 1193 | #else |
43c0226c | 1194 | static inline bool cpufreq_boost_enabled(void) |
6f19efc0 | 1195 | { |
43c0226c | 1196 | return false; |
6f19efc0 | 1197 | } |
44139ed4 | 1198 | |
9a23eb8b | 1199 | static inline int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state) |
44139ed4 | 1200 | { |
9a23eb8b | 1201 | return -EOPNOTSUPP; |
44139ed4 | 1202 | } |
8486a32d | 1203 | |
442d24a5 VD |
1204 | static inline int |
1205 | cpufreq_table_set_inefficient(struct cpufreq_policy *policy, | |
1206 | unsigned int frequency) | |
1207 | { | |
1208 | return -EINVAL; | |
1209 | } | |
1210 | ||
8486a32d | 1211 | static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name, |
d182dc6d HM |
1212 | const char *cell_name, struct cpumask *cpumask, |
1213 | struct of_phandle_args *pargs) | |
8486a32d HY |
1214 | { |
1215 | return -EOPNOTSUPP; | |
1216 | } | |
6f19efc0 | 1217 | #endif |
1da177e4 | 1218 | |
38e480d4 | 1219 | extern int arch_freq_get_on_cpu(int cpu); |
f8475cef | 1220 | |
a20b7053 IV |
1221 | #ifndef arch_set_freq_scale |
1222 | static __always_inline | |
1223 | void arch_set_freq_scale(const struct cpumask *cpus, | |
1224 | unsigned long cur_freq, | |
1225 | unsigned long max_freq) | |
1226 | { | |
1227 | } | |
1228 | #endif | |
599457ba | 1229 | |
1da177e4 LT |
1230 | /* the following are really really optional */ |
1231 | extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; | |
21c36d35 | 1232 | extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs; |
d417e069 | 1233 | int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy); |
f4fd3797 | 1234 | |
652ed95d | 1235 | unsigned int cpufreq_generic_get(unsigned int cpu); |
c4dcc8a1 | 1236 | void cpufreq_generic_init(struct cpufreq_policy *policy, |
70e9e778 VK |
1237 | struct cpufreq_frequency_table *table, |
1238 | unsigned int transition_latency); | |
c17495b0 | 1239 | |
4854649b RW |
1240 | bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask); |
1241 | ||
c17495b0 VK |
1242 | static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy) |
1243 | { | |
1244 | dev_pm_opp_of_register_em(get_cpu_device(policy->cpu), | |
1245 | policy->related_cpus); | |
1246 | } | |
1da177e4 | 1247 | #endif /* _LINUX_CPUFREQ_H */ |