Commit | Line | Data |
---|---|---|
4f86d3a8 LB |
1 | /* |
2 | * menu.c - the menu idle governor | |
3 | * | |
4 | * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com> | |
69d25870 AV |
5 | * Copyright (C) 2009 Intel Corporation |
6 | * Author: | |
7 | * Arjan van de Ven <arjan@linux.intel.com> | |
4f86d3a8 | 8 | * |
69d25870 AV |
9 | * This code is licenced under the GPL version 2 as described |
10 | * in the COPYING file that acompanies the Linux Kernel. | |
4f86d3a8 LB |
11 | */ |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/cpuidle.h> | |
e8db0be1 | 15 | #include <linux/pm_qos.h> |
4f86d3a8 LB |
16 | #include <linux/time.h> |
17 | #include <linux/ktime.h> | |
18 | #include <linux/hrtimer.h> | |
19 | #include <linux/tick.h> | |
69d25870 | 20 | #include <linux/sched.h> |
4f17722c | 21 | #include <linux/sched/loadavg.h> |
03441a34 | 22 | #include <linux/sched/stat.h> |
5787536e | 23 | #include <linux/math64.h> |
9908859a | 24 | #include <linux/cpu.h> |
4f86d3a8 | 25 | |
decd51bb TT |
26 | /* |
27 | * Please note when changing the tuning values: | |
28 | * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of | |
29 | * a scaling operation multiplication may overflow on 32 bit platforms. | |
30 | * In that case, #define RESOLUTION as ULL to get 64 bit result: | |
31 | * #define RESOLUTION 1024ULL | |
32 | * | |
33 | * The default values do not overflow. | |
34 | */ | |
69d25870 | 35 | #define BUCKETS 12 |
ae779300 MG |
36 | #define INTERVAL_SHIFT 3 |
37 | #define INTERVALS (1UL << INTERVAL_SHIFT) | |
69d25870 | 38 | #define RESOLUTION 1024 |
1f85f87d | 39 | #define DECAY 8 |
69d25870 | 40 | #define MAX_INTERESTING 50000 |
1f85f87d | 41 | |
69d25870 AV |
42 | |
43 | /* | |
44 | * Concepts and ideas behind the menu governor | |
45 | * | |
46 | * For the menu governor, there are 3 decision factors for picking a C | |
47 | * state: | |
48 | * 1) Energy break even point | |
49 | * 2) Performance impact | |
50 | * 3) Latency tolerance (from pmqos infrastructure) | |
51 | * These these three factors are treated independently. | |
52 | * | |
53 | * Energy break even point | |
54 | * ----------------------- | |
55 | * C state entry and exit have an energy cost, and a certain amount of time in | |
56 | * the C state is required to actually break even on this cost. CPUIDLE | |
57 | * provides us this duration in the "target_residency" field. So all that we | |
58 | * need is a good prediction of how long we'll be idle. Like the traditional | |
59 | * menu governor, we start with the actual known "next timer event" time. | |
60 | * | |
61 | * Since there are other source of wakeups (interrupts for example) than | |
62 | * the next timer event, this estimation is rather optimistic. To get a | |
63 | * more realistic estimate, a correction factor is applied to the estimate, | |
64 | * that is based on historic behavior. For example, if in the past the actual | |
65 | * duration always was 50% of the next timer tick, the correction factor will | |
66 | * be 0.5. | |
67 | * | |
68 | * menu uses a running average for this correction factor, however it uses a | |
69 | * set of factors, not just a single factor. This stems from the realization | |
70 | * that the ratio is dependent on the order of magnitude of the expected | |
71 | * duration; if we expect 500 milliseconds of idle time the likelihood of | |
72 | * getting an interrupt very early is much higher than if we expect 50 micro | |
73 | * seconds of idle time. A second independent factor that has big impact on | |
74 | * the actual factor is if there is (disk) IO outstanding or not. | |
75 | * (as a special twist, we consider every sleep longer than 50 milliseconds | |
76 | * as perfect; there are no power gains for sleeping longer than this) | |
77 | * | |
78 | * For these two reasons we keep an array of 12 independent factors, that gets | |
79 | * indexed based on the magnitude of the expected duration as well as the | |
80 | * "is IO outstanding" property. | |
81 | * | |
1f85f87d AV |
82 | * Repeatable-interval-detector |
83 | * ---------------------------- | |
84 | * There are some cases where "next timer" is a completely unusable predictor: | |
85 | * Those cases where the interval is fixed, for example due to hardware | |
86 | * interrupt mitigation, but also due to fixed transfer rate devices such as | |
87 | * mice. | |
88 | * For this, we use a different predictor: We track the duration of the last 8 | |
89 | * intervals and if the stand deviation of these 8 intervals is below a | |
90 | * threshold value, we use the average of these intervals as prediction. | |
91 | * | |
69d25870 AV |
92 | * Limiting Performance Impact |
93 | * --------------------------- | |
94 | * C states, especially those with large exit latencies, can have a real | |
20e3341b | 95 | * noticeable impact on workloads, which is not acceptable for most sysadmins, |
69d25870 AV |
96 | * and in addition, less performance has a power price of its own. |
97 | * | |
98 | * As a general rule of thumb, menu assumes that the following heuristic | |
99 | * holds: | |
100 | * The busier the system, the less impact of C states is acceptable | |
101 | * | |
102 | * This rule-of-thumb is implemented using a performance-multiplier: | |
103 | * If the exit latency times the performance multiplier is longer than | |
104 | * the predicted duration, the C state is not considered a candidate | |
105 | * for selection due to a too high performance impact. So the higher | |
106 | * this multiplier is, the longer we need to be idle to pick a deep C | |
107 | * state, and thus the less likely a busy CPU will hit such a deep | |
108 | * C state. | |
109 | * | |
110 | * Two factors are used in determing this multiplier: | |
111 | * a value of 10 is added for each point of "per cpu load average" we have. | |
112 | * a value of 5 points is added for each process that is waiting for | |
113 | * IO on this CPU. | |
114 | * (these values are experimentally determined) | |
115 | * | |
116 | * The load average factor gives a longer term (few seconds) input to the | |
117 | * decision, while the iowait value gives a cpu local instantanious input. | |
118 | * The iowait factor may look low, but realize that this is also already | |
119 | * represented in the system load average. | |
120 | * | |
121 | */ | |
4f86d3a8 LB |
122 | |
123 | struct menu_device { | |
124 | int last_state_idx; | |
672917dc | 125 | int needs_update; |
45f1ff59 | 126 | int tick_wakeup; |
4f86d3a8 | 127 | |
5dc2f5a3 | 128 | unsigned int next_timer_us; |
51f245b8 | 129 | unsigned int predicted_us; |
69d25870 | 130 | unsigned int bucket; |
51f245b8 | 131 | unsigned int correction_factor[BUCKETS]; |
939e33b7 | 132 | unsigned int intervals[INTERVALS]; |
1f85f87d | 133 | int interval_ptr; |
4f86d3a8 LB |
134 | }; |
135 | ||
69d25870 AV |
136 | |
137 | #define LOAD_INT(x) ((x) >> FSHIFT) | |
138 | #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) | |
139 | ||
372ba8cb | 140 | static inline int get_loadavg(unsigned long load) |
69d25870 | 141 | { |
372ba8cb | 142 | return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10; |
69d25870 AV |
143 | } |
144 | ||
64b4ca5c | 145 | static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters) |
69d25870 AV |
146 | { |
147 | int bucket = 0; | |
148 | ||
149 | /* | |
150 | * We keep two groups of stats; one with no | |
151 | * IO pending, one without. | |
152 | * This allows us to calculate | |
153 | * E(duration)|iowait | |
154 | */ | |
64b4ca5c | 155 | if (nr_iowaiters) |
69d25870 AV |
156 | bucket = BUCKETS/2; |
157 | ||
158 | if (duration < 10) | |
159 | return bucket; | |
160 | if (duration < 100) | |
161 | return bucket + 1; | |
162 | if (duration < 1000) | |
163 | return bucket + 2; | |
164 | if (duration < 10000) | |
165 | return bucket + 3; | |
166 | if (duration < 100000) | |
167 | return bucket + 4; | |
168 | return bucket + 5; | |
169 | } | |
170 | ||
171 | /* | |
172 | * Return a multiplier for the exit latency that is intended | |
173 | * to take performance requirements into account. | |
174 | * The more performance critical we estimate the system | |
175 | * to be, the higher this multiplier, and thus the higher | |
176 | * the barrier to go to an expensive C state. | |
177 | */ | |
372ba8cb | 178 | static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned long load) |
69d25870 AV |
179 | { |
180 | int mult = 1; | |
181 | ||
182 | /* for higher loadavg, we are more reluctant */ | |
183 | ||
372ba8cb | 184 | mult += 2 * get_loadavg(load); |
69d25870 AV |
185 | |
186 | /* for IO wait tasks (per cpu!) we add 5x each */ | |
64b4ca5c | 187 | mult += 10 * nr_iowaiters; |
69d25870 AV |
188 | |
189 | return mult; | |
190 | } | |
191 | ||
4f86d3a8 LB |
192 | static DEFINE_PER_CPU(struct menu_device, menu_devices); |
193 | ||
46bcfad7 | 194 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); |
672917dc | 195 | |
1f85f87d AV |
196 | /* |
197 | * Try detecting repeating patterns by keeping track of the last 8 | |
198 | * intervals, and checking if the standard deviation of that set | |
199 | * of points is below a threshold. If it is... then use the | |
200 | * average of these 8 points as the estimated value. | |
201 | */ | |
e132b9b3 | 202 | static unsigned int get_typical_interval(struct menu_device *data) |
1f85f87d | 203 | { |
4cd46bca | 204 | int i, divisor; |
3b99669b RV |
205 | unsigned int max, thresh, avg; |
206 | uint64_t sum, variance; | |
0e96d5ad TT |
207 | |
208 | thresh = UINT_MAX; /* Discard outliers above this value */ | |
1f85f87d | 209 | |
c96ca4fb | 210 | again: |
1f85f87d | 211 | |
0e96d5ad | 212 | /* First calculate the average of past intervals */ |
4cd46bca | 213 | max = 0; |
3b99669b | 214 | sum = 0; |
4cd46bca | 215 | divisor = 0; |
c96ca4fb | 216 | for (i = 0; i < INTERVALS; i++) { |
0e96d5ad | 217 | unsigned int value = data->intervals[i]; |
c96ca4fb | 218 | if (value <= thresh) { |
3b99669b | 219 | sum += value; |
c96ca4fb YS |
220 | divisor++; |
221 | if (value > max) | |
222 | max = value; | |
223 | } | |
224 | } | |
ae779300 | 225 | if (divisor == INTERVALS) |
3b99669b | 226 | avg = sum >> INTERVAL_SHIFT; |
ae779300 | 227 | else |
3b99669b | 228 | avg = div_u64(sum, divisor); |
c96ca4fb | 229 | |
7024b18c RV |
230 | /* Then try to determine variance */ |
231 | variance = 0; | |
c96ca4fb | 232 | for (i = 0; i < INTERVALS; i++) { |
0e96d5ad | 233 | unsigned int value = data->intervals[i]; |
c96ca4fb | 234 | if (value <= thresh) { |
3b99669b | 235 | int64_t diff = (int64_t)value - avg; |
7024b18c | 236 | variance += diff * diff; |
c96ca4fb YS |
237 | } |
238 | } | |
ae779300 | 239 | if (divisor == INTERVALS) |
7024b18c | 240 | variance >>= INTERVAL_SHIFT; |
ae779300 | 241 | else |
7024b18c | 242 | do_div(variance, divisor); |
ae779300 | 243 | |
1f85f87d | 244 | /* |
7024b18c RV |
245 | * The typical interval is obtained when standard deviation is |
246 | * small (stddev <= 20 us, variance <= 400 us^2) or standard | |
247 | * deviation is small compared to the average interval (avg > | |
248 | * 6*stddev, avg^2 > 36*variance). The average is smaller than | |
249 | * UINT_MAX aka U32_MAX, so computing its square does not | |
250 | * overflow a u64. We simply reject this candidate average if | |
251 | * the standard deviation is greater than 715 s (which is | |
252 | * rather unlikely). | |
0d6a7ffa | 253 | * |
330647a9 | 254 | * Use this result only if there is no timer to wake us up sooner. |
1f85f87d | 255 | */ |
7024b18c | 256 | if (likely(variance <= U64_MAX/36)) { |
3b99669b | 257 | if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3)) |
7024b18c | 258 | || variance <= 400) { |
e132b9b3 | 259 | return avg; |
0d6a7ffa | 260 | } |
69a37bea | 261 | } |
017099e2 TT |
262 | |
263 | /* | |
264 | * If we have outliers to the upside in our distribution, discard | |
265 | * those by setting the threshold to exclude these outliers, then | |
266 | * calculate the average and standard deviation again. Once we get | |
267 | * down to the bottom 3/4 of our samples, stop excluding samples. | |
268 | * | |
269 | * This can deal with workloads that have long pauses interspersed | |
270 | * with sporadic activity with a bunch of short pauses. | |
271 | */ | |
272 | if ((divisor * 4) <= INTERVALS * 3) | |
e132b9b3 | 273 | return UINT_MAX; |
017099e2 TT |
274 | |
275 | thresh = max - 1; | |
276 | goto again; | |
1f85f87d AV |
277 | } |
278 | ||
4f86d3a8 LB |
279 | /** |
280 | * menu_select - selects the next idle state to enter | |
46bcfad7 | 281 | * @drv: cpuidle driver containing state data |
4f86d3a8 | 282 | * @dev: the CPU |
45f1ff59 | 283 | * @stop_tick: indication on whether or not to stop the tick |
4f86d3a8 | 284 | */ |
45f1ff59 RW |
285 | static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, |
286 | bool *stop_tick) | |
4f86d3a8 | 287 | { |
229b6863 | 288 | struct menu_device *data = this_cpu_ptr(&menu_devices); |
9908859a | 289 | struct device *device = get_cpu_device(dev->cpu); |
ed77134b | 290 | int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); |
4f86d3a8 | 291 | int i; |
3ed09c94 NP |
292 | int first_idx; |
293 | int idx; | |
96e95182 | 294 | unsigned int interactivity_req; |
e132b9b3 | 295 | unsigned int expected_interval; |
372ba8cb | 296 | unsigned long nr_iowaiters, cpu_load; |
6dbf5cea | 297 | int resume_latency = dev_pm_qos_raw_read_value(device); |
296bb1e5 | 298 | ktime_t delta_next; |
69d25870 | 299 | |
672917dc | 300 | if (data->needs_update) { |
46bcfad7 | 301 | menu_update(drv, dev); |
672917dc CZ |
302 | data->needs_update = 0; |
303 | } | |
304 | ||
0759e80b RW |
305 | if (resume_latency < latency_req && |
306 | resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) | |
9908859a AS |
307 | latency_req = resume_latency; |
308 | ||
a2bd9202 | 309 | /* Special case when user has set very strict latency requirement */ |
45f1ff59 RW |
310 | if (unlikely(latency_req == 0)) { |
311 | *stop_tick = false; | |
a2bd9202 | 312 | return 0; |
45f1ff59 | 313 | } |
a2bd9202 | 314 | |
69d25870 | 315 | /* determine the expected residency time, round up */ |
296bb1e5 | 316 | data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next)); |
69d25870 | 317 | |
372ba8cb | 318 | get_iowait_load(&nr_iowaiters, &cpu_load); |
64b4ca5c | 319 | data->bucket = which_bucket(data->next_timer_us, nr_iowaiters); |
69d25870 | 320 | |
51f245b8 TT |
321 | /* |
322 | * Force the result of multiplication to be 64 bits even if both | |
323 | * operands are 32 bits. | |
324 | * Make sure to round up for half microseconds. | |
325 | */ | |
ee3c86f3 | 326 | data->predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us * |
51f245b8 | 327 | data->correction_factor[data->bucket], |
5787536e | 328 | RESOLUTION * DECAY); |
69d25870 | 329 | |
e132b9b3 RR |
330 | expected_interval = get_typical_interval(data); |
331 | expected_interval = min(expected_interval, data->next_timer_us); | |
96e95182 | 332 | |
dc2251bf RW |
333 | first_idx = 0; |
334 | if (drv->states[0].flags & CPUIDLE_FLAG_POLLING) { | |
335 | struct cpuidle_state *s = &drv->states[1]; | |
0c313cb2 RW |
336 | unsigned int polling_threshold; |
337 | ||
9c4b2867 RW |
338 | /* |
339 | * We want to default to C1 (hlt), not to busy polling | |
e132b9b3 RR |
340 | * unless the timer is happening really really soon, or |
341 | * C1's exit latency exceeds the user configured limit. | |
9c4b2867 | 342 | */ |
0c313cb2 RW |
343 | polling_threshold = max_t(unsigned int, 20, s->target_residency); |
344 | if (data->next_timer_us > polling_threshold && | |
345 | latency_req > s->exit_latency && !s->disabled && | |
dc2251bf RW |
346 | !dev->states_usage[1].disable) |
347 | first_idx = 1; | |
9c4b2867 | 348 | } |
4f86d3a8 | 349 | |
e132b9b3 RR |
350 | /* |
351 | * Use the lowest expected idle interval to pick the idle state. | |
352 | */ | |
353 | data->predicted_us = min(data->predicted_us, expected_interval); | |
354 | ||
87c9fe6e RW |
355 | if (tick_nohz_tick_stopped()) { |
356 | /* | |
357 | * If the tick is already stopped, the cost of possible short | |
358 | * idle duration misprediction is much higher, because the CPU | |
359 | * may be stuck in a shallow idle state for a long time as a | |
360 | * result of it. In that case say we might mispredict and try | |
361 | * to force the CPU into a state for which we would have stopped | |
362 | * the tick, unless a timer is going to expire really soon | |
363 | * anyway. | |
364 | */ | |
365 | if (data->predicted_us < TICK_USEC) | |
366 | data->predicted_us = min_t(unsigned int, TICK_USEC, | |
367 | ktime_to_us(delta_next)); | |
368 | } else { | |
369 | /* | |
370 | * Use the performance multiplier and the user-configurable | |
371 | * latency_req to determine the maximum exit latency. | |
372 | */ | |
373 | interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load); | |
374 | if (latency_req > interactivity_req) | |
375 | latency_req = interactivity_req; | |
376 | } | |
e132b9b3 | 377 | |
45f1ff59 | 378 | expected_interval = data->predicted_us; |
71abbbf8 AL |
379 | /* |
380 | * Find the idle state with the lowest power while satisfying | |
381 | * our constraints. | |
382 | */ | |
3ed09c94 NP |
383 | idx = -1; |
384 | for (i = first_idx; i < drv->state_count; i++) { | |
46bcfad7 | 385 | struct cpuidle_state *s = &drv->states[i]; |
dc7fd275 | 386 | struct cpuidle_state_usage *su = &dev->states_usage[i]; |
4f86d3a8 | 387 | |
cbc9ef02 | 388 | if (s->disabled || su->disable) |
3a53396b | 389 | continue; |
3ed09c94 NP |
390 | if (idx == -1) |
391 | idx = i; /* first enabled state */ | |
14851912 | 392 | if (s->target_residency > data->predicted_us) |
8e37e1a2 | 393 | break; |
45f1ff59 RW |
394 | if (s->exit_latency > latency_req) { |
395 | /* | |
396 | * If we break out of the loop for latency reasons, use | |
397 | * the target residency of the selected state as the | |
398 | * expected idle duration so that the tick is retained | |
399 | * as long as that target residency is low enough. | |
400 | */ | |
401 | expected_interval = drv->states[idx].target_residency; | |
8e37e1a2 | 402 | break; |
45f1ff59 | 403 | } |
3ed09c94 | 404 | idx = i; |
4f86d3a8 LB |
405 | } |
406 | ||
3ed09c94 NP |
407 | if (idx == -1) |
408 | idx = 0; /* No states enabled. Must use 0. */ | |
409 | ||
45f1ff59 RW |
410 | /* |
411 | * Don't stop the tick if the selected state is a polling one or if the | |
412 | * expected idle duration is shorter than the tick period length. | |
413 | */ | |
414 | if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || | |
296bb1e5 RW |
415 | expected_interval < TICK_USEC) { |
416 | unsigned int delta_next_us = ktime_to_us(delta_next); | |
417 | ||
45f1ff59 RW |
418 | *stop_tick = false; |
419 | ||
296bb1e5 RW |
420 | if (!tick_nohz_tick_stopped() && idx > 0 && |
421 | drv->states[idx].target_residency > delta_next_us) { | |
422 | /* | |
423 | * The tick is not going to be stopped and the target | |
424 | * residency of the state to be returned is not within | |
425 | * the time until the next timer event including the | |
426 | * tick, so try to correct that. | |
427 | */ | |
428 | for (i = idx - 1; i >= 0; i--) { | |
429 | if (drv->states[i].disabled || | |
430 | dev->states_usage[i].disable) | |
431 | continue; | |
432 | ||
433 | idx = i; | |
434 | if (drv->states[i].target_residency <= delta_next_us) | |
435 | break; | |
436 | } | |
437 | } | |
438 | } | |
439 | ||
3ed09c94 NP |
440 | data->last_state_idx = idx; |
441 | ||
69d25870 | 442 | return data->last_state_idx; |
4f86d3a8 LB |
443 | } |
444 | ||
445 | /** | |
672917dc | 446 | * menu_reflect - records that data structures need update |
4f86d3a8 | 447 | * @dev: the CPU |
e978aa7d | 448 | * @index: the index of actual entered state |
4f86d3a8 LB |
449 | * |
450 | * NOTE: it's important to be fast here because this operation will add to | |
451 | * the overall exit latency. | |
452 | */ | |
e978aa7d | 453 | static void menu_reflect(struct cpuidle_device *dev, int index) |
672917dc | 454 | { |
229b6863 | 455 | struct menu_device *data = this_cpu_ptr(&menu_devices); |
a802ea96 | 456 | |
e978aa7d | 457 | data->last_state_idx = index; |
a802ea96 | 458 | data->needs_update = 1; |
45f1ff59 | 459 | data->tick_wakeup = tick_nohz_idle_got_tick(); |
672917dc CZ |
460 | } |
461 | ||
462 | /** | |
463 | * menu_update - attempts to guess what happened after entry | |
46bcfad7 | 464 | * @drv: cpuidle driver containing state data |
672917dc CZ |
465 | * @dev: the CPU |
466 | */ | |
46bcfad7 | 467 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
4f86d3a8 | 468 | { |
229b6863 | 469 | struct menu_device *data = this_cpu_ptr(&menu_devices); |
4f86d3a8 | 470 | int last_idx = data->last_state_idx; |
46bcfad7 | 471 | struct cpuidle_state *target = &drv->states[last_idx]; |
320eee77 | 472 | unsigned int measured_us; |
51f245b8 | 473 | unsigned int new_factor; |
4f86d3a8 LB |
474 | |
475 | /* | |
61c66d6e | 476 | * Try to figure out how much time passed between entry to low |
477 | * power state and occurrence of the wakeup event. | |
478 | * | |
479 | * If the entered idle state didn't support residency measurements, | |
4108b3d9 LB |
480 | * we use them anyway if they are short, and if long, |
481 | * truncate to the whole expected time. | |
61c66d6e | 482 | * |
483 | * Any measured amount of time will include the exit latency. | |
484 | * Since we are interested in when the wakeup begun, not when it | |
2fba5376 | 485 | * was completed, we must subtract the exit latency. However, if |
61c66d6e | 486 | * the measured amount of time is less than the exit latency, |
487 | * assume the state was never reached and the exit latency is 0. | |
4f86d3a8 | 488 | */ |
69d25870 | 489 | |
45f1ff59 RW |
490 | if (data->tick_wakeup && data->next_timer_us > TICK_USEC) { |
491 | /* | |
492 | * The nohz code said that there wouldn't be any events within | |
493 | * the tick boundary (if the tick was stopped), but the idle | |
494 | * duration predictor had a differing opinion. Since the CPU | |
495 | * was woken up by a tick (that wasn't stopped after all), the | |
496 | * predictor was not quite right, so assume that the CPU could | |
497 | * have been idle long (but not forever) to help the idle | |
498 | * duration predictor do a better job next time. | |
499 | */ | |
500 | measured_us = 9 * MAX_INTERESTING / 10; | |
501 | } else { | |
502 | /* measured value */ | |
503 | measured_us = cpuidle_get_last_residency(dev); | |
504 | ||
505 | /* Deduct exit latency */ | |
506 | if (measured_us > 2 * target->exit_latency) | |
507 | measured_us -= target->exit_latency; | |
508 | else | |
509 | measured_us /= 2; | |
510 | } | |
69d25870 | 511 | |
4108b3d9 LB |
512 | /* Make sure our coefficients do not exceed unity */ |
513 | if (measured_us > data->next_timer_us) | |
514 | measured_us = data->next_timer_us; | |
69d25870 | 515 | |
51f245b8 TT |
516 | /* Update our correction ratio */ |
517 | new_factor = data->correction_factor[data->bucket]; | |
518 | new_factor -= new_factor / DECAY; | |
69d25870 | 519 | |
5dc2f5a3 | 520 | if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING) |
521 | new_factor += RESOLUTION * measured_us / data->next_timer_us; | |
320eee77 | 522 | else |
69d25870 AV |
523 | /* |
524 | * we were idle so long that we count it as a perfect | |
525 | * prediction | |
526 | */ | |
527 | new_factor += RESOLUTION; | |
320eee77 | 528 | |
69d25870 AV |
529 | /* |
530 | * We don't want 0 as factor; we always want at least | |
51f245b8 TT |
531 | * a tiny bit of estimated time. Fortunately, due to rounding, |
532 | * new_factor will stay nonzero regardless of measured_us values | |
533 | * and the compiler can eliminate this test as long as DECAY > 1. | |
69d25870 | 534 | */ |
51f245b8 | 535 | if (DECAY == 1 && unlikely(new_factor == 0)) |
69d25870 | 536 | new_factor = 1; |
320eee77 | 537 | |
69d25870 | 538 | data->correction_factor[data->bucket] = new_factor; |
1f85f87d AV |
539 | |
540 | /* update the repeating-pattern data */ | |
61c66d6e | 541 | data->intervals[data->interval_ptr++] = measured_us; |
1f85f87d AV |
542 | if (data->interval_ptr >= INTERVALS) |
543 | data->interval_ptr = 0; | |
4f86d3a8 LB |
544 | } |
545 | ||
546 | /** | |
547 | * menu_enable_device - scans a CPU's states and does setup | |
46bcfad7 | 548 | * @drv: cpuidle driver |
4f86d3a8 LB |
549 | * @dev: the CPU |
550 | */ | |
46bcfad7 DD |
551 | static int menu_enable_device(struct cpuidle_driver *drv, |
552 | struct cpuidle_device *dev) | |
4f86d3a8 LB |
553 | { |
554 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); | |
bed4d597 | 555 | int i; |
4f86d3a8 LB |
556 | |
557 | memset(data, 0, sizeof(struct menu_device)); | |
558 | ||
bed4d597 CK |
559 | /* |
560 | * if the correction factor is 0 (eg first time init or cpu hotplug | |
561 | * etc), we actually want to start out with a unity factor. | |
562 | */ | |
563 | for(i = 0; i < BUCKETS; i++) | |
564 | data->correction_factor[i] = RESOLUTION * DECAY; | |
565 | ||
4f86d3a8 LB |
566 | return 0; |
567 | } | |
568 | ||
569 | static struct cpuidle_governor menu_governor = { | |
570 | .name = "menu", | |
571 | .rating = 20, | |
572 | .enable = menu_enable_device, | |
573 | .select = menu_select, | |
574 | .reflect = menu_reflect, | |
4f86d3a8 LB |
575 | }; |
576 | ||
577 | /** | |
578 | * init_menu - initializes the governor | |
579 | */ | |
580 | static int __init init_menu(void) | |
581 | { | |
582 | return cpuidle_register_governor(&menu_governor); | |
583 | } | |
584 | ||
137b944e | 585 | postcore_initcall(init_menu); |