Commit | Line | Data |
---|---|---|
4f86d3a8 LB |
1 | /* |
2 | * cpuidle.c - core cpuidle infrastructure | |
3 | * | |
4 | * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | |
5 | * Shaohua Li <shaohua.li@intel.com> | |
6 | * Adam Belay <abelay@novell.com> | |
7 | * | |
8 | * This code is licenced under the GPL. | |
9 | */ | |
10 | ||
b60e6a0e | 11 | #include <linux/clockchips.h> |
4f86d3a8 LB |
12 | #include <linux/kernel.h> |
13 | #include <linux/mutex.h> | |
14 | #include <linux/sched.h> | |
e6017571 | 15 | #include <linux/sched/clock.h> |
4f86d3a8 | 16 | #include <linux/notifier.h> |
e8db0be1 | 17 | #include <linux/pm_qos.h> |
4f86d3a8 LB |
18 | #include <linux/cpu.h> |
19 | #include <linux/cpuidle.h> | |
9a0b8415 | 20 | #include <linux/ktime.h> |
2e94d1f7 | 21 | #include <linux/hrtimer.h> |
884b17e1 | 22 | #include <linux/module.h> |
38106313 | 23 | #include <linux/suspend.h> |
124cf911 | 24 | #include <linux/tick.h> |
288f023e | 25 | #include <trace/events/power.h> |
4f86d3a8 LB |
26 | |
27 | #include "cpuidle.h" | |
28 | ||
29 | DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); | |
4c637b21 | 30 | DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev); |
4f86d3a8 LB |
31 | |
32 | DEFINE_MUTEX(cpuidle_lock); | |
33 | LIST_HEAD(cpuidle_detected_devices); | |
4f86d3a8 LB |
34 | |
35 | static int enabled_devices; | |
62027aea | 36 | static int off __read_mostly; |
a0bfa137 | 37 | static int initialized __read_mostly; |
62027aea LB |
38 | |
39 | int cpuidle_disabled(void) | |
40 | { | |
41 | return off; | |
42 | } | |
d91ee586 LB |
43 | void disable_cpuidle(void) |
44 | { | |
45 | off = 1; | |
46 | } | |
4f86d3a8 | 47 | |
ef2b22ac RW |
48 | bool cpuidle_not_available(struct cpuidle_driver *drv, |
49 | struct cpuidle_device *dev) | |
31a34090 RW |
50 | { |
51 | return off || !initialized || !drv || !dev || !dev->enabled; | |
52 | } | |
53 | ||
1a022e3f BO |
54 | /** |
55 | * cpuidle_play_dead - cpu off-lining | |
56 | * | |
ee01e663 | 57 | * Returns in case of an error or no driver |
1a022e3f BO |
58 | */ |
59 | int cpuidle_play_dead(void) | |
60 | { | |
61 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | |
bf4d1b5d | 62 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
8aef33a7 | 63 | int i; |
1a022e3f | 64 | |
ee01e663 TK |
65 | if (!drv) |
66 | return -ENODEV; | |
67 | ||
1a022e3f | 68 | /* Find lowest-power state that supports long-term idle */ |
7d51d979 | 69 | for (i = drv->state_count - 1; i >= 0; i--) |
8aef33a7 DL |
70 | if (drv->states[i].enter_dead) |
71 | return drv->states[i].enter_dead(dev, i); | |
1a022e3f BO |
72 | |
73 | return -ENODEV; | |
74 | } | |
75 | ||
ef2b22ac | 76 | static int find_deepest_state(struct cpuidle_driver *drv, |
0d94039f RW |
77 | struct cpuidle_device *dev, |
78 | unsigned int max_latency, | |
79 | unsigned int forbidden_flags, | |
28ba086e | 80 | bool s2idle) |
a6220fc1 RW |
81 | { |
82 | unsigned int latency_req = 0; | |
51164251 | 83 | int i, ret = 0; |
a6220fc1 | 84 | |
51164251 | 85 | for (i = 1; i < drv->state_count; i++) { |
a6220fc1 RW |
86 | struct cpuidle_state *s = &drv->states[i]; |
87 | struct cpuidle_state_usage *su = &dev->states_usage[i]; | |
88 | ||
124cf911 | 89 | if (s->disabled || su->disable || s->exit_latency <= latency_req |
0d94039f RW |
90 | || s->exit_latency > max_latency |
91 | || (s->flags & forbidden_flags) | |
28ba086e | 92 | || (s2idle && !s->enter_s2idle)) |
a6220fc1 RW |
93 | continue; |
94 | ||
95 | latency_req = s->exit_latency; | |
96 | ret = i; | |
97 | } | |
98 | return ret; | |
99 | } | |
100 | ||
0e7414b7 RW |
101 | /** |
102 | * cpuidle_use_deepest_state - Set/clear governor override flag. | |
103 | * @enable: New value of the flag. | |
104 | * | |
105 | * Set/unset the current CPU to use the deepest idle state (override governors | |
106 | * going forward if set). | |
107 | */ | |
bb8313b6 JP |
108 | void cpuidle_use_deepest_state(bool enable) |
109 | { | |
110 | struct cpuidle_device *dev; | |
111 | ||
112 | preempt_disable(); | |
113 | dev = cpuidle_get_device(); | |
41dc750e LF |
114 | if (dev) |
115 | dev->use_deepest_state = enable; | |
bb8313b6 JP |
116 | preempt_enable(); |
117 | } | |
118 | ||
ef2b22ac RW |
119 | /** |
120 | * cpuidle_find_deepest_state - Find the deepest available idle state. | |
121 | * @drv: cpuidle driver for the given CPU. | |
122 | * @dev: cpuidle device for the given CPU. | |
123 | */ | |
124 | int cpuidle_find_deepest_state(struct cpuidle_driver *drv, | |
125 | struct cpuidle_device *dev) | |
126 | { | |
0d94039f | 127 | return find_deepest_state(drv, dev, UINT_MAX, 0, false); |
ef2b22ac RW |
128 | } |
129 | ||
bb8313b6 | 130 | #ifdef CONFIG_SUSPEND |
28ba086e | 131 | static void enter_s2idle_proper(struct cpuidle_driver *drv, |
124cf911 RW |
132 | struct cpuidle_device *dev, int index) |
133 | { | |
64bdff69 RW |
134 | ktime_t time_start, time_end; |
135 | ||
136 | time_start = ns_to_ktime(local_clock()); | |
137 | ||
ae0afb4f RW |
138 | /* |
139 | * trace_suspend_resume() called by tick_freeze() for the last CPU | |
140 | * executing it contains RCU usage regarded as invalid in the idle | |
141 | * context, so tell RCU about that. | |
142 | */ | |
143 | RCU_NONIDLE(tick_freeze()); | |
124cf911 RW |
144 | /* |
145 | * The state used here cannot be a "coupled" one, because the "coupled" | |
146 | * cpuidle mechanism enables interrupts and doing that with timekeeping | |
147 | * suspended is generally unsafe. | |
148 | */ | |
63caae84 | 149 | stop_critical_timings(); |
28ba086e | 150 | drv->states[index].enter_s2idle(dev, drv, index); |
124cf911 RW |
151 | WARN_ON(!irqs_disabled()); |
152 | /* | |
153 | * timekeeping_resume() that will be called by tick_unfreeze() for the | |
ae0afb4f | 154 | * first CPU executing it calls functions containing RCU read-side |
124cf911 RW |
155 | * critical sections, so tell RCU about that. |
156 | */ | |
157 | RCU_NONIDLE(tick_unfreeze()); | |
63caae84 | 158 | start_critical_timings(); |
64bdff69 RW |
159 | |
160 | time_end = ns_to_ktime(local_clock()); | |
161 | ||
162 | dev->states_usage[index].s2idle_time += ktime_us_delta(time_end, time_start); | |
163 | dev->states_usage[index].s2idle_usage++; | |
124cf911 RW |
164 | } |
165 | ||
38106313 | 166 | /** |
28ba086e | 167 | * cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle. |
ef2b22ac RW |
168 | * @drv: cpuidle driver for the given CPU. |
169 | * @dev: cpuidle device for the given CPU. | |
38106313 | 170 | * |
28ba086e | 171 | * If there are states with the ->enter_s2idle callback, find the deepest of |
ef2b22ac | 172 | * them and enter it with frozen tick. |
38106313 | 173 | */ |
28ba086e | 174 | int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
38106313 | 175 | { |
38106313 RW |
176 | int index; |
177 | ||
124cf911 | 178 | /* |
28ba086e | 179 | * Find the deepest state with ->enter_s2idle present, which guarantees |
124cf911 RW |
180 | * that interrupts won't be enabled when it exits and allows the tick to |
181 | * be frozen safely. | |
182 | */ | |
0d94039f | 183 | index = find_deepest_state(drv, dev, UINT_MAX, 0, true); |
6f16886b | 184 | if (index > 0) |
28ba086e | 185 | enter_s2idle_proper(drv, dev, index); |
124cf911 | 186 | |
ef2b22ac | 187 | return index; |
38106313 | 188 | } |
87e9b9f1 | 189 | #endif /* CONFIG_SUSPEND */ |
38106313 | 190 | |
56cfbf74 CC |
191 | /** |
192 | * cpuidle_enter_state - enter the state and update stats | |
193 | * @dev: cpuidle device for this cpu | |
194 | * @drv: cpuidle driver for this cpu | |
7312280b | 195 | * @index: index into the states table in @drv of the state to enter |
56cfbf74 CC |
196 | */ |
197 | int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, | |
554c06ba | 198 | int index) |
56cfbf74 CC |
199 | { |
200 | int entered_state; | |
201 | ||
554c06ba | 202 | struct cpuidle_state *target_state = &drv->states[index]; |
df8d9eea | 203 | bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP); |
dbd1b8ea | 204 | ktime_t time_start, time_end; |
554c06ba | 205 | |
df8d9eea RW |
206 | /* |
207 | * Tell the time framework to switch to a broadcast timer because our | |
208 | * local timer will be shut down. If a local timer is used from another | |
209 | * CPU as a broadcast timer, this call may fail if it is not available. | |
210 | */ | |
827a5aef | 211 | if (broadcast && tick_broadcast_enter()) { |
0d94039f RW |
212 | index = find_deepest_state(drv, dev, target_state->exit_latency, |
213 | CPUIDLE_FLAG_TIMER_STOP, false); | |
214 | if (index < 0) { | |
215 | default_idle_call(); | |
216 | return -EBUSY; | |
217 | } | |
218 | target_state = &drv->states[index]; | |
f187851b | 219 | broadcast = false; |
827a5aef | 220 | } |
df8d9eea | 221 | |
faad3849 RW |
222 | /* Take note of the planned idle state. */ |
223 | sched_idle_set_state(target_state); | |
224 | ||
30fe6884 | 225 | trace_cpu_idle_rcuidle(index, dev->cpu); |
dbd1b8ea | 226 | time_start = ns_to_ktime(local_clock()); |
554c06ba | 227 | |
63caae84 | 228 | stop_critical_timings(); |
554c06ba | 229 | entered_state = target_state->enter(dev, drv, index); |
63caae84 | 230 | start_critical_timings(); |
554c06ba | 231 | |
f9fccdb9 | 232 | sched_clock_idle_wakeup_event(); |
dbd1b8ea | 233 | time_end = ns_to_ktime(local_clock()); |
30fe6884 | 234 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); |
554c06ba | 235 | |
faad3849 RW |
236 | /* The cpu is no longer idle or about to enter idle. */ |
237 | sched_idle_set_state(NULL); | |
238 | ||
df8d9eea RW |
239 | if (broadcast) { |
240 | if (WARN_ON_ONCE(!irqs_disabled())) | |
241 | local_irq_disable(); | |
242 | ||
243 | tick_broadcast_exit(); | |
244 | } | |
245 | ||
e7387da5 | 246 | if (!cpuidle_state_is_coupled(drv, index)) |
0b89e9aa | 247 | local_irq_enable(); |
554c06ba | 248 | |
56cfbf74 | 249 | if (entered_state >= 0) { |
04dab58a RW |
250 | s64 diff, delay = drv->states[entered_state].exit_latency; |
251 | int i; | |
252 | ||
7037b43e FL |
253 | /* |
254 | * Update cpuidle counters | |
255 | * This can be moved to within driver enter routine, | |
56cfbf74 CC |
256 | * but that results in multiple copies of same code. |
257 | */ | |
7037b43e FL |
258 | diff = ktime_us_delta(time_end, time_start); |
259 | if (diff > INT_MAX) | |
260 | diff = INT_MAX; | |
261 | ||
262 | dev->last_residency = (int)diff; | |
a474a515 | 263 | dev->states_usage[entered_state].time += dev->last_residency; |
56cfbf74 | 264 | dev->states_usage[entered_state].usage++; |
04dab58a RW |
265 | |
266 | if (diff < drv->states[entered_state].target_residency) { | |
267 | for (i = entered_state - 1; i >= 0; i--) { | |
268 | if (drv->states[i].disabled || | |
269 | dev->states_usage[i].disable) | |
270 | continue; | |
271 | ||
272 | /* Shallower states are enabled, so update. */ | |
273 | dev->states_usage[entered_state].above++; | |
274 | break; | |
275 | } | |
276 | } else if (diff > delay) { | |
277 | for (i = entered_state + 1; i < drv->state_count; i++) { | |
278 | if (drv->states[i].disabled || | |
279 | dev->states_usage[i].disable) | |
280 | continue; | |
281 | ||
282 | /* | |
283 | * Update if a deeper state would have been a | |
284 | * better match for the observed idle duration. | |
285 | */ | |
286 | if (diff - delay >= drv->states[i].target_residency) | |
287 | dev->states_usage[entered_state].below++; | |
288 | ||
289 | break; | |
290 | } | |
291 | } | |
56cfbf74 CC |
292 | } else { |
293 | dev->last_residency = 0; | |
294 | } | |
295 | ||
296 | return entered_state; | |
297 | } | |
298 | ||
4f86d3a8 | 299 | /** |
907e30f1 DL |
300 | * cpuidle_select - ask the cpuidle framework to choose an idle state |
301 | * | |
302 | * @drv: the cpuidle driver | |
303 | * @dev: the cpuidle device | |
45f1ff59 | 304 | * @stop_tick: indication on whether or not to stop the tick |
4f86d3a8 | 305 | * |
51164251 | 306 | * Returns the index of the idle state. The return value must not be negative. |
45f1ff59 RW |
307 | * |
308 | * The memory location pointed to by @stop_tick is expected to be written the | |
309 | * 'false' boolean value if the scheduler tick should not be stopped before | |
310 | * entering the returned state. | |
4f86d3a8 | 311 | */ |
45f1ff59 RW |
312 | int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, |
313 | bool *stop_tick) | |
4f86d3a8 | 314 | { |
45f1ff59 | 315 | return cpuidle_curr_governor->select(drv, dev, stop_tick); |
907e30f1 | 316 | } |
ba8f20c2 | 317 | |
907e30f1 DL |
318 | /** |
319 | * cpuidle_enter - enter into the specified idle state | |
320 | * | |
321 | * @drv: the cpuidle driver tied with the cpu | |
322 | * @dev: the cpuidle device | |
323 | * @index: the index in the idle state table | |
324 | * | |
325 | * Returns the index in the idle state, < 0 in case of error. | |
326 | * The error code depends on the backend driver | |
327 | */ | |
328 | int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, | |
329 | int index) | |
330 | { | |
6f9b83ac UH |
331 | int ret = 0; |
332 | ||
333 | /* | |
334 | * Store the next hrtimer, which becomes either next tick or the next | |
335 | * timer event, whatever expires first. Additionally, to make this data | |
336 | * useful for consumers outside cpuidle, we rely on that the governor's | |
337 | * ->select() callback have decided, whether to stop the tick or not. | |
338 | */ | |
339 | WRITE_ONCE(dev->next_hrtimer, tick_nohz_get_next_hrtimer()); | |
340 | ||
4c1ed5a6 | 341 | if (cpuidle_state_is_coupled(drv, index)) |
6f9b83ac UH |
342 | ret = cpuidle_enter_state_coupled(dev, drv, index); |
343 | else | |
344 | ret = cpuidle_enter_state(dev, drv, index); | |
345 | ||
346 | WRITE_ONCE(dev->next_hrtimer, 0); | |
347 | return ret; | |
907e30f1 | 348 | } |
b60e6a0e | 349 | |
907e30f1 DL |
350 | /** |
351 | * cpuidle_reflect - tell the underlying governor what was the state | |
352 | * we were in | |
353 | * | |
354 | * @dev : the cpuidle device | |
355 | * @index: the index in the idle state table | |
356 | * | |
357 | */ | |
358 | void cpuidle_reflect(struct cpuidle_device *dev, int index) | |
359 | { | |
a802ea96 | 360 | if (cpuidle_curr_governor->reflect && index >= 0) |
907e30f1 | 361 | cpuidle_curr_governor->reflect(dev, index); |
4f86d3a8 LB |
362 | } |
363 | ||
364 | /** | |
365 | * cpuidle_install_idle_handler - installs the cpuidle idle loop handler | |
366 | */ | |
367 | void cpuidle_install_idle_handler(void) | |
368 | { | |
a0bfa137 | 369 | if (enabled_devices) { |
4f86d3a8 LB |
370 | /* Make sure all changes finished before we switch to new idle */ |
371 | smp_wmb(); | |
a0bfa137 | 372 | initialized = 1; |
4f86d3a8 LB |
373 | } |
374 | } | |
375 | ||
376 | /** | |
377 | * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler | |
378 | */ | |
379 | void cpuidle_uninstall_idle_handler(void) | |
380 | { | |
a0bfa137 LB |
381 | if (enabled_devices) { |
382 | initialized = 0; | |
2ed903c5 | 383 | wake_up_all_idle_cpus(); |
4f86d3a8 | 384 | } |
442bf3aa DL |
385 | |
386 | /* | |
387 | * Make sure external observers (such as the scheduler) | |
388 | * are done looking at pointed idle states. | |
389 | */ | |
390 | synchronize_rcu(); | |
4f86d3a8 LB |
391 | } |
392 | ||
393 | /** | |
394 | * cpuidle_pause_and_lock - temporarily disables CPUIDLE | |
395 | */ | |
396 | void cpuidle_pause_and_lock(void) | |
397 | { | |
398 | mutex_lock(&cpuidle_lock); | |
399 | cpuidle_uninstall_idle_handler(); | |
400 | } | |
401 | ||
402 | EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); | |
403 | ||
404 | /** | |
405 | * cpuidle_resume_and_unlock - resumes CPUIDLE operation | |
406 | */ | |
407 | void cpuidle_resume_and_unlock(void) | |
408 | { | |
409 | cpuidle_install_idle_handler(); | |
410 | mutex_unlock(&cpuidle_lock); | |
411 | } | |
412 | ||
413 | EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); | |
414 | ||
8651f97b PM |
415 | /* Currently used in suspend/resume path to suspend cpuidle */ |
416 | void cpuidle_pause(void) | |
417 | { | |
418 | mutex_lock(&cpuidle_lock); | |
419 | cpuidle_uninstall_idle_handler(); | |
420 | mutex_unlock(&cpuidle_lock); | |
421 | } | |
422 | ||
423 | /* Currently used in suspend/resume path to resume cpuidle */ | |
424 | void cpuidle_resume(void) | |
425 | { | |
426 | mutex_lock(&cpuidle_lock); | |
427 | cpuidle_install_idle_handler(); | |
428 | mutex_unlock(&cpuidle_lock); | |
429 | } | |
430 | ||
4f86d3a8 LB |
431 | /** |
432 | * cpuidle_enable_device - enables idle PM for a CPU | |
433 | * @dev: the CPU | |
434 | * | |
435 | * This function must be called between cpuidle_pause_and_lock and | |
436 | * cpuidle_resume_and_unlock when used externally. | |
437 | */ | |
438 | int cpuidle_enable_device(struct cpuidle_device *dev) | |
439 | { | |
5df0aa73 | 440 | int ret; |
bf4d1b5d | 441 | struct cpuidle_driver *drv; |
4f86d3a8 | 442 | |
1b0a0e9a SB |
443 | if (!dev) |
444 | return -EINVAL; | |
445 | ||
4f86d3a8 LB |
446 | if (dev->enabled) |
447 | return 0; | |
bf4d1b5d | 448 | |
e7b06a09 GJ |
449 | if (!cpuidle_curr_governor) |
450 | return -EIO; | |
451 | ||
bf4d1b5d DL |
452 | drv = cpuidle_get_cpu_driver(dev); |
453 | ||
e7b06a09 | 454 | if (!drv) |
4f86d3a8 | 455 | return -EIO; |
bf4d1b5d | 456 | |
10b9d3f8 DL |
457 | if (!dev->registered) |
458 | return -EINVAL; | |
459 | ||
bf4d1b5d DL |
460 | ret = cpuidle_add_device_sysfs(dev); |
461 | if (ret) | |
4f86d3a8 LB |
462 | return ret; |
463 | ||
3fc74bd8 GJ |
464 | if (cpuidle_curr_governor->enable) { |
465 | ret = cpuidle_curr_governor->enable(drv, dev); | |
466 | if (ret) | |
467 | goto fail_sysfs; | |
468 | } | |
4f86d3a8 | 469 | |
4f86d3a8 LB |
470 | smp_wmb(); |
471 | ||
472 | dev->enabled = 1; | |
473 | ||
474 | enabled_devices++; | |
475 | return 0; | |
476 | ||
477 | fail_sysfs: | |
bf4d1b5d | 478 | cpuidle_remove_device_sysfs(dev); |
4f86d3a8 LB |
479 | |
480 | return ret; | |
481 | } | |
482 | ||
483 | EXPORT_SYMBOL_GPL(cpuidle_enable_device); | |
484 | ||
485 | /** | |
486 | * cpuidle_disable_device - disables idle PM for a CPU | |
487 | * @dev: the CPU | |
488 | * | |
489 | * This function must be called between cpuidle_pause_and_lock and | |
490 | * cpuidle_resume_and_unlock when used externally. | |
491 | */ | |
492 | void cpuidle_disable_device(struct cpuidle_device *dev) | |
493 | { | |
bf4d1b5d DL |
494 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
495 | ||
cf31cd1a | 496 | if (!dev || !dev->enabled) |
4f86d3a8 | 497 | return; |
bf4d1b5d DL |
498 | |
499 | if (!drv || !cpuidle_curr_governor) | |
4f86d3a8 LB |
500 | return; |
501 | ||
502 | dev->enabled = 0; | |
503 | ||
504 | if (cpuidle_curr_governor->disable) | |
bf4d1b5d | 505 | cpuidle_curr_governor->disable(drv, dev); |
4f86d3a8 | 506 | |
bf4d1b5d | 507 | cpuidle_remove_device_sysfs(dev); |
4f86d3a8 LB |
508 | enabled_devices--; |
509 | } | |
510 | ||
511 | EXPORT_SYMBOL_GPL(cpuidle_disable_device); | |
512 | ||
f6bb51a5 DL |
513 | static void __cpuidle_unregister_device(struct cpuidle_device *dev) |
514 | { | |
515 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | |
516 | ||
517 | list_del(&dev->device_list); | |
518 | per_cpu(cpuidle_devices, dev->cpu) = NULL; | |
519 | module_put(drv->owner); | |
c998c078 DG |
520 | |
521 | dev->registered = 0; | |
f6bb51a5 DL |
522 | } |
523 | ||
267d4bf8 | 524 | static void __cpuidle_device_init(struct cpuidle_device *dev) |
5df0aa73 DL |
525 | { |
526 | memset(dev->states_usage, 0, sizeof(dev->states_usage)); | |
527 | dev->last_residency = 0; | |
6f9b83ac | 528 | dev->next_hrtimer = 0; |
5df0aa73 DL |
529 | } |
530 | ||
4f86d3a8 | 531 | /** |
dcb84f33 VP |
532 | * __cpuidle_register_device - internal register function called before register |
533 | * and enable routines | |
4f86d3a8 | 534 | * @dev: the cpu |
dcb84f33 VP |
535 | * |
536 | * cpuidle_lock mutex must be held before this is called | |
4f86d3a8 | 537 | */ |
dcb84f33 | 538 | static int __cpuidle_register_device(struct cpuidle_device *dev) |
4f86d3a8 LB |
539 | { |
540 | int ret; | |
bf4d1b5d | 541 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
4f86d3a8 | 542 | |
bf4d1b5d | 543 | if (!try_module_get(drv->owner)) |
4f86d3a8 LB |
544 | return -EINVAL; |
545 | ||
4f86d3a8 LB |
546 | per_cpu(cpuidle_devices, dev->cpu) = dev; |
547 | list_add(&dev->device_list, &cpuidle_detected_devices); | |
4f86d3a8 | 548 | |
4126c019 | 549 | ret = cpuidle_coupled_register_device(dev); |
47182668 | 550 | if (ret) |
f6bb51a5 | 551 | __cpuidle_unregister_device(dev); |
47182668 VK |
552 | else |
553 | dev->registered = 1; | |
4f86d3a8 | 554 | |
47182668 | 555 | return ret; |
dcb84f33 VP |
556 | } |
557 | ||
558 | /** | |
559 | * cpuidle_register_device - registers a CPU's idle PM feature | |
560 | * @dev: the cpu | |
561 | */ | |
562 | int cpuidle_register_device(struct cpuidle_device *dev) | |
563 | { | |
c878a52d | 564 | int ret = -EBUSY; |
dcb84f33 | 565 | |
1b0a0e9a SB |
566 | if (!dev) |
567 | return -EINVAL; | |
568 | ||
dcb84f33 VP |
569 | mutex_lock(&cpuidle_lock); |
570 | ||
c878a52d DL |
571 | if (dev->registered) |
572 | goto out_unlock; | |
573 | ||
267d4bf8 | 574 | __cpuidle_device_init(dev); |
5df0aa73 | 575 | |
f6bb51a5 DL |
576 | ret = __cpuidle_register_device(dev); |
577 | if (ret) | |
578 | goto out_unlock; | |
579 | ||
580 | ret = cpuidle_add_sysfs(dev); | |
581 | if (ret) | |
582 | goto out_unregister; | |
dcb84f33 | 583 | |
10b9d3f8 | 584 | ret = cpuidle_enable_device(dev); |
f6bb51a5 DL |
585 | if (ret) |
586 | goto out_sysfs; | |
10b9d3f8 | 587 | |
4f86d3a8 LB |
588 | cpuidle_install_idle_handler(); |
589 | ||
f6bb51a5 | 590 | out_unlock: |
4f86d3a8 LB |
591 | mutex_unlock(&cpuidle_lock); |
592 | ||
f6bb51a5 DL |
593 | return ret; |
594 | ||
595 | out_sysfs: | |
596 | cpuidle_remove_sysfs(dev); | |
597 | out_unregister: | |
598 | __cpuidle_unregister_device(dev); | |
599 | goto out_unlock; | |
4f86d3a8 LB |
600 | } |
601 | ||
602 | EXPORT_SYMBOL_GPL(cpuidle_register_device); | |
603 | ||
604 | /** | |
605 | * cpuidle_unregister_device - unregisters a CPU's idle PM feature | |
606 | * @dev: the cpu | |
607 | */ | |
608 | void cpuidle_unregister_device(struct cpuidle_device *dev) | |
609 | { | |
813e8e3d | 610 | if (!dev || dev->registered == 0) |
dcb84f33 VP |
611 | return; |
612 | ||
4f86d3a8 LB |
613 | cpuidle_pause_and_lock(); |
614 | ||
615 | cpuidle_disable_device(dev); | |
616 | ||
1aef40e2 | 617 | cpuidle_remove_sysfs(dev); |
f6bb51a5 DL |
618 | |
619 | __cpuidle_unregister_device(dev); | |
4f86d3a8 | 620 | |
4126c019 CC |
621 | cpuidle_coupled_unregister_device(dev); |
622 | ||
4f86d3a8 | 623 | cpuidle_resume_and_unlock(); |
4f86d3a8 LB |
624 | } |
625 | ||
626 | EXPORT_SYMBOL_GPL(cpuidle_unregister_device); | |
627 | ||
1c192d04 | 628 | /** |
4c637b21 DL |
629 | * cpuidle_unregister: unregister a driver and the devices. This function |
630 | * can be used only if the driver has been previously registered through | |
631 | * the cpuidle_register function. | |
632 | * | |
633 | * @drv: a valid pointer to a struct cpuidle_driver | |
634 | */ | |
635 | void cpuidle_unregister(struct cpuidle_driver *drv) | |
636 | { | |
637 | int cpu; | |
638 | struct cpuidle_device *device; | |
639 | ||
82467a5a | 640 | for_each_cpu(cpu, drv->cpumask) { |
4c637b21 DL |
641 | device = &per_cpu(cpuidle_dev, cpu); |
642 | cpuidle_unregister_device(device); | |
643 | } | |
644 | ||
645 | cpuidle_unregister_driver(drv); | |
646 | } | |
647 | EXPORT_SYMBOL_GPL(cpuidle_unregister); | |
648 | ||
649 | /** | |
650 | * cpuidle_register: registers the driver and the cpu devices with the | |
651 | * coupled_cpus passed as parameter. This function is used for all common | |
652 | * initialization pattern there are in the arch specific drivers. The | |
653 | * devices is globally defined in this file. | |
654 | * | |
655 | * @drv : a valid pointer to a struct cpuidle_driver | |
656 | * @coupled_cpus: a cpumask for the coupled states | |
657 | * | |
658 | * Returns 0 on success, < 0 otherwise | |
659 | */ | |
660 | int cpuidle_register(struct cpuidle_driver *drv, | |
661 | const struct cpumask *const coupled_cpus) | |
662 | { | |
663 | int ret, cpu; | |
664 | struct cpuidle_device *device; | |
665 | ||
666 | ret = cpuidle_register_driver(drv); | |
667 | if (ret) { | |
668 | pr_err("failed to register cpuidle driver\n"); | |
669 | return ret; | |
670 | } | |
671 | ||
82467a5a | 672 | for_each_cpu(cpu, drv->cpumask) { |
4c637b21 DL |
673 | device = &per_cpu(cpuidle_dev, cpu); |
674 | device->cpu = cpu; | |
675 | ||
676 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED | |
677 | /* | |
caf4a36e | 678 | * On multiplatform for ARM, the coupled idle states could be |
4c637b21 DL |
679 | * enabled in the kernel even if the cpuidle driver does not |
680 | * use it. Note, coupled_cpus is a struct copy. | |
681 | */ | |
682 | if (coupled_cpus) | |
683 | device->coupled_cpus = *coupled_cpus; | |
684 | #endif | |
685 | ret = cpuidle_register_device(device); | |
686 | if (!ret) | |
687 | continue; | |
688 | ||
689 | pr_err("Failed to register cpuidle device for cpu%d\n", cpu); | |
690 | ||
691 | cpuidle_unregister(drv); | |
692 | break; | |
693 | } | |
694 | ||
695 | return ret; | |
696 | } | |
697 | EXPORT_SYMBOL_GPL(cpuidle_register); | |
698 | ||
4f86d3a8 LB |
699 | #ifdef CONFIG_SMP |
700 | ||
4f86d3a8 LB |
701 | /* |
702 | * This function gets called when a part of the kernel has a new latency | |
703 | * requirement. This means we need to get all processors out of their C-state, | |
704 | * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that | |
705 | * wakes them all right up. | |
706 | */ | |
707 | static int cpuidle_latency_notify(struct notifier_block *b, | |
708 | unsigned long l, void *v) | |
709 | { | |
2ed903c5 | 710 | wake_up_all_idle_cpus(); |
4f86d3a8 LB |
711 | return NOTIFY_OK; |
712 | } | |
713 | ||
714 | static struct notifier_block cpuidle_latency_notifier = { | |
715 | .notifier_call = cpuidle_latency_notify, | |
716 | }; | |
717 | ||
d82b3518 MG |
718 | static inline void latency_notifier_init(struct notifier_block *n) |
719 | { | |
720 | pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); | |
721 | } | |
4f86d3a8 LB |
722 | |
723 | #else /* CONFIG_SMP */ | |
724 | ||
725 | #define latency_notifier_init(x) do { } while (0) | |
726 | ||
727 | #endif /* CONFIG_SMP */ | |
728 | ||
729 | /** | |
730 | * cpuidle_init - core initializer | |
731 | */ | |
732 | static int __init cpuidle_init(void) | |
733 | { | |
734 | int ret; | |
735 | ||
62027aea LB |
736 | if (cpuidle_disabled()) |
737 | return -ENODEV; | |
738 | ||
8a25a2fd | 739 | ret = cpuidle_add_interface(cpu_subsys.dev_root); |
4f86d3a8 LB |
740 | if (ret) |
741 | return ret; | |
742 | ||
743 | latency_notifier_init(&cpuidle_latency_notifier); | |
744 | ||
745 | return 0; | |
746 | } | |
747 | ||
62027aea | 748 | module_param(off, int, 0444); |
61cb5758 | 749 | module_param_string(governor, param_governor, CPUIDLE_NAME_LEN, 0444); |
4f86d3a8 | 750 | core_initcall(cpuidle_init); |