Commit | Line | Data |
---|---|---|
4f86d3a8 LB |
1 | /* |
2 | * menu.c - the menu idle governor | |
3 | * | |
4 | * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com> | |
69d25870 AV |
5 | * Copyright (C) 2009 Intel Corporation |
6 | * Author: | |
7 | * Arjan van de Ven <arjan@linux.intel.com> | |
4f86d3a8 | 8 | * |
69d25870 AV |
9 | * This code is licenced under the GPL version 2 as described |
10 | * in the COPYING file that acompanies the Linux Kernel. | |
4f86d3a8 LB |
11 | */ |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/cpuidle.h> | |
4f86d3a8 LB |
15 | #include <linux/time.h> |
16 | #include <linux/ktime.h> | |
17 | #include <linux/hrtimer.h> | |
18 | #include <linux/tick.h> | |
69d25870 | 19 | #include <linux/sched.h> |
4f17722c | 20 | #include <linux/sched/loadavg.h> |
03441a34 | 21 | #include <linux/sched/stat.h> |
5787536e | 22 | #include <linux/math64.h> |
4f86d3a8 | 23 | |
decd51bb TT |
24 | /* |
25 | * Please note when changing the tuning values: | |
26 | * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of | |
27 | * a scaling operation multiplication may overflow on 32 bit platforms. | |
28 | * In that case, #define RESOLUTION as ULL to get 64 bit result: | |
29 | * #define RESOLUTION 1024ULL | |
30 | * | |
31 | * The default values do not overflow. | |
32 | */ | |
69d25870 | 33 | #define BUCKETS 12 |
ae779300 MG |
34 | #define INTERVAL_SHIFT 3 |
35 | #define INTERVALS (1UL << INTERVAL_SHIFT) | |
69d25870 | 36 | #define RESOLUTION 1024 |
1f85f87d | 37 | #define DECAY 8 |
69d25870 | 38 | #define MAX_INTERESTING 50000 |
1f85f87d | 39 | |
69d25870 AV |
40 | |
41 | /* | |
42 | * Concepts and ideas behind the menu governor | |
43 | * | |
44 | * For the menu governor, there are 3 decision factors for picking a C | |
45 | * state: | |
46 | * 1) Energy break even point | |
47 | * 2) Performance impact | |
48 | * 3) Latency tolerance (from pmqos infrastructure) | |
49 | * These these three factors are treated independently. | |
50 | * | |
51 | * Energy break even point | |
52 | * ----------------------- | |
53 | * C state entry and exit have an energy cost, and a certain amount of time in | |
54 | * the C state is required to actually break even on this cost. CPUIDLE | |
55 | * provides us this duration in the "target_residency" field. So all that we | |
56 | * need is a good prediction of how long we'll be idle. Like the traditional | |
57 | * menu governor, we start with the actual known "next timer event" time. | |
58 | * | |
59 | * Since there are other source of wakeups (interrupts for example) than | |
60 | * the next timer event, this estimation is rather optimistic. To get a | |
61 | * more realistic estimate, a correction factor is applied to the estimate, | |
62 | * that is based on historic behavior. For example, if in the past the actual | |
63 | * duration always was 50% of the next timer tick, the correction factor will | |
64 | * be 0.5. | |
65 | * | |
66 | * menu uses a running average for this correction factor, however it uses a | |
67 | * set of factors, not just a single factor. This stems from the realization | |
68 | * that the ratio is dependent on the order of magnitude of the expected | |
69 | * duration; if we expect 500 milliseconds of idle time the likelihood of | |
70 | * getting an interrupt very early is much higher than if we expect 50 micro | |
71 | * seconds of idle time. A second independent factor that has big impact on | |
72 | * the actual factor is if there is (disk) IO outstanding or not. | |
73 | * (as a special twist, we consider every sleep longer than 50 milliseconds | |
74 | * as perfect; there are no power gains for sleeping longer than this) | |
75 | * | |
76 | * For these two reasons we keep an array of 12 independent factors, that gets | |
77 | * indexed based on the magnitude of the expected duration as well as the | |
78 | * "is IO outstanding" property. | |
79 | * | |
1f85f87d AV |
80 | * Repeatable-interval-detector |
81 | * ---------------------------- | |
82 | * There are some cases where "next timer" is a completely unusable predictor: | |
83 | * Those cases where the interval is fixed, for example due to hardware | |
84 | * interrupt mitigation, but also due to fixed transfer rate devices such as | |
85 | * mice. | |
86 | * For this, we use a different predictor: We track the duration of the last 8 | |
87 | * intervals and if the stand deviation of these 8 intervals is below a | |
88 | * threshold value, we use the average of these intervals as prediction. | |
89 | * | |
69d25870 AV |
90 | * Limiting Performance Impact |
91 | * --------------------------- | |
92 | * C states, especially those with large exit latencies, can have a real | |
20e3341b | 93 | * noticeable impact on workloads, which is not acceptable for most sysadmins, |
69d25870 AV |
94 | * and in addition, less performance has a power price of its own. |
95 | * | |
96 | * As a general rule of thumb, menu assumes that the following heuristic | |
97 | * holds: | |
98 | * The busier the system, the less impact of C states is acceptable | |
99 | * | |
100 | * This rule-of-thumb is implemented using a performance-multiplier: | |
101 | * If the exit latency times the performance multiplier is longer than | |
102 | * the predicted duration, the C state is not considered a candidate | |
103 | * for selection due to a too high performance impact. So the higher | |
104 | * this multiplier is, the longer we need to be idle to pick a deep C | |
105 | * state, and thus the less likely a busy CPU will hit such a deep | |
106 | * C state. | |
107 | * | |
108 | * Two factors are used in determing this multiplier: | |
109 | * a value of 10 is added for each point of "per cpu load average" we have. | |
110 | * a value of 5 points is added for each process that is waiting for | |
111 | * IO on this CPU. | |
112 | * (these values are experimentally determined) | |
113 | * | |
114 | * The load average factor gives a longer term (few seconds) input to the | |
115 | * decision, while the iowait value gives a cpu local instantanious input. | |
116 | * The iowait factor may look low, but realize that this is also already | |
117 | * represented in the system load average. | |
118 | * | |
119 | */ | |
4f86d3a8 LB |
120 | |
121 | struct menu_device { | |
122 | int last_state_idx; | |
672917dc | 123 | int needs_update; |
45f1ff59 | 124 | int tick_wakeup; |
4f86d3a8 | 125 | |
5dc2f5a3 | 126 | unsigned int next_timer_us; |
69d25870 | 127 | unsigned int bucket; |
51f245b8 | 128 | unsigned int correction_factor[BUCKETS]; |
939e33b7 | 129 | unsigned int intervals[INTERVALS]; |
1f85f87d | 130 | int interval_ptr; |
4f86d3a8 LB |
131 | }; |
132 | ||
64b4ca5c | 133 | static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters) |
69d25870 AV |
134 | { |
135 | int bucket = 0; | |
136 | ||
137 | /* | |
138 | * We keep two groups of stats; one with no | |
139 | * IO pending, one without. | |
140 | * This allows us to calculate | |
141 | * E(duration)|iowait | |
142 | */ | |
64b4ca5c | 143 | if (nr_iowaiters) |
69d25870 AV |
144 | bucket = BUCKETS/2; |
145 | ||
146 | if (duration < 10) | |
147 | return bucket; | |
148 | if (duration < 100) | |
149 | return bucket + 1; | |
150 | if (duration < 1000) | |
151 | return bucket + 2; | |
152 | if (duration < 10000) | |
153 | return bucket + 3; | |
154 | if (duration < 100000) | |
155 | return bucket + 4; | |
156 | return bucket + 5; | |
157 | } | |
158 | ||
159 | /* | |
160 | * Return a multiplier for the exit latency that is intended | |
161 | * to take performance requirements into account. | |
162 | * The more performance critical we estimate the system | |
163 | * to be, the higher this multiplier, and thus the higher | |
164 | * the barrier to go to an expensive C state. | |
165 | */ | |
a7fe5190 | 166 | static inline int performance_multiplier(unsigned long nr_iowaiters) |
69d25870 | 167 | { |
a7fe5190 DL |
168 | /* for IO wait tasks (per cpu!) we add 10x each */ |
169 | return 1 + 10 * nr_iowaiters; | |
69d25870 AV |
170 | } |
171 | ||
4f86d3a8 LB |
172 | static DEFINE_PER_CPU(struct menu_device, menu_devices); |
173 | ||
46bcfad7 | 174 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); |
672917dc | 175 | |
1f85f87d AV |
176 | /* |
177 | * Try detecting repeating patterns by keeping track of the last 8 | |
178 | * intervals, and checking if the standard deviation of that set | |
179 | * of points is below a threshold. If it is... then use the | |
180 | * average of these 8 points as the estimated value. | |
181 | */ | |
f1c8e410 RW |
182 | static unsigned int get_typical_interval(struct menu_device *data, |
183 | unsigned int predicted_us) | |
1f85f87d | 184 | { |
4cd46bca | 185 | int i, divisor; |
f1c8e410 | 186 | unsigned int min, max, thresh, avg; |
3b99669b | 187 | uint64_t sum, variance; |
0e96d5ad TT |
188 | |
189 | thresh = UINT_MAX; /* Discard outliers above this value */ | |
1f85f87d | 190 | |
c96ca4fb | 191 | again: |
1f85f87d | 192 | |
0e96d5ad | 193 | /* First calculate the average of past intervals */ |
f1c8e410 | 194 | min = UINT_MAX; |
4cd46bca | 195 | max = 0; |
3b99669b | 196 | sum = 0; |
4cd46bca | 197 | divisor = 0; |
c96ca4fb | 198 | for (i = 0; i < INTERVALS; i++) { |
0e96d5ad | 199 | unsigned int value = data->intervals[i]; |
c96ca4fb | 200 | if (value <= thresh) { |
3b99669b | 201 | sum += value; |
c96ca4fb YS |
202 | divisor++; |
203 | if (value > max) | |
204 | max = value; | |
f1c8e410 RW |
205 | |
206 | if (value < min) | |
207 | min = value; | |
c96ca4fb YS |
208 | } |
209 | } | |
f1c8e410 RW |
210 | |
211 | /* | |
212 | * If the result of the computation is going to be discarded anyway, | |
213 | * avoid the computation altogether. | |
214 | */ | |
215 | if (min >= predicted_us) | |
216 | return UINT_MAX; | |
217 | ||
ae779300 | 218 | if (divisor == INTERVALS) |
3b99669b | 219 | avg = sum >> INTERVAL_SHIFT; |
ae779300 | 220 | else |
3b99669b | 221 | avg = div_u64(sum, divisor); |
c96ca4fb | 222 | |
7024b18c RV |
223 | /* Then try to determine variance */ |
224 | variance = 0; | |
c96ca4fb | 225 | for (i = 0; i < INTERVALS; i++) { |
0e96d5ad | 226 | unsigned int value = data->intervals[i]; |
c96ca4fb | 227 | if (value <= thresh) { |
3b99669b | 228 | int64_t diff = (int64_t)value - avg; |
7024b18c | 229 | variance += diff * diff; |
c96ca4fb YS |
230 | } |
231 | } | |
ae779300 | 232 | if (divisor == INTERVALS) |
7024b18c | 233 | variance >>= INTERVAL_SHIFT; |
ae779300 | 234 | else |
7024b18c | 235 | do_div(variance, divisor); |
ae779300 | 236 | |
1f85f87d | 237 | /* |
7024b18c RV |
238 | * The typical interval is obtained when standard deviation is |
239 | * small (stddev <= 20 us, variance <= 400 us^2) or standard | |
240 | * deviation is small compared to the average interval (avg > | |
241 | * 6*stddev, avg^2 > 36*variance). The average is smaller than | |
242 | * UINT_MAX aka U32_MAX, so computing its square does not | |
243 | * overflow a u64. We simply reject this candidate average if | |
244 | * the standard deviation is greater than 715 s (which is | |
245 | * rather unlikely). | |
0d6a7ffa | 246 | * |
330647a9 | 247 | * Use this result only if there is no timer to wake us up sooner. |
1f85f87d | 248 | */ |
7024b18c | 249 | if (likely(variance <= U64_MAX/36)) { |
3b99669b | 250 | if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3)) |
7024b18c | 251 | || variance <= 400) { |
e132b9b3 | 252 | return avg; |
0d6a7ffa | 253 | } |
69a37bea | 254 | } |
017099e2 TT |
255 | |
256 | /* | |
257 | * If we have outliers to the upside in our distribution, discard | |
258 | * those by setting the threshold to exclude these outliers, then | |
259 | * calculate the average and standard deviation again. Once we get | |
260 | * down to the bottom 3/4 of our samples, stop excluding samples. | |
261 | * | |
262 | * This can deal with workloads that have long pauses interspersed | |
263 | * with sporadic activity with a bunch of short pauses. | |
264 | */ | |
265 | if ((divisor * 4) <= INTERVALS * 3) | |
e132b9b3 | 266 | return UINT_MAX; |
017099e2 TT |
267 | |
268 | thresh = max - 1; | |
269 | goto again; | |
1f85f87d AV |
270 | } |
271 | ||
4f86d3a8 LB |
272 | /** |
273 | * menu_select - selects the next idle state to enter | |
46bcfad7 | 274 | * @drv: cpuidle driver containing state data |
4f86d3a8 | 275 | * @dev: the CPU |
45f1ff59 | 276 | * @stop_tick: indication on whether or not to stop the tick |
4f86d3a8 | 277 | */ |
45f1ff59 RW |
278 | static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, |
279 | bool *stop_tick) | |
4f86d3a8 | 280 | { |
229b6863 | 281 | struct menu_device *data = this_cpu_ptr(&menu_devices); |
0fc784fb | 282 | int latency_req = cpuidle_governor_latency_req(dev->cpu); |
4f86d3a8 | 283 | int i; |
3ed09c94 | 284 | int idx; |
96e95182 | 285 | unsigned int interactivity_req; |
03dba278 | 286 | unsigned int predicted_us; |
a7fe5190 | 287 | unsigned long nr_iowaiters; |
296bb1e5 | 288 | ktime_t delta_next; |
69d25870 | 289 | |
672917dc | 290 | if (data->needs_update) { |
46bcfad7 | 291 | menu_update(drv, dev); |
672917dc CZ |
292 | data->needs_update = 0; |
293 | } | |
294 | ||
69d25870 | 295 | /* determine the expected residency time, round up */ |
296bb1e5 | 296 | data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next)); |
69d25870 | 297 | |
a7fe5190 | 298 | nr_iowaiters = nr_iowait_cpu(dev->cpu); |
64b4ca5c | 299 | data->bucket = which_bucket(data->next_timer_us, nr_iowaiters); |
69d25870 | 300 | |
53812cdc | 301 | if (unlikely(drv->state_count <= 1 || latency_req == 0) || |
8b007ebe RW |
302 | ((data->next_timer_us < drv->states[1].target_residency || |
303 | latency_req < drv->states[1].exit_latency) && | |
304 | !drv->states[0].disabled && !dev->states_usage[0].disable)) { | |
305 | /* | |
306 | * In this case state[0] will be used no matter what, so return | |
307 | * it right away and keep the tick running. | |
308 | */ | |
309 | *stop_tick = false; | |
310 | return 0; | |
311 | } | |
312 | ||
51f245b8 TT |
313 | /* |
314 | * Force the result of multiplication to be 64 bits even if both | |
315 | * operands are 32 bits. | |
316 | * Make sure to round up for half microseconds. | |
317 | */ | |
03dba278 | 318 | predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us * |
51f245b8 | 319 | data->correction_factor[data->bucket], |
5787536e | 320 | RESOLUTION * DECAY); |
e132b9b3 RR |
321 | /* |
322 | * Use the lowest expected idle interval to pick the idle state. | |
323 | */ | |
f1c8e410 | 324 | predicted_us = min(predicted_us, get_typical_interval(data, predicted_us)); |
e132b9b3 | 325 | |
87c9fe6e RW |
326 | if (tick_nohz_tick_stopped()) { |
327 | /* | |
328 | * If the tick is already stopped, the cost of possible short | |
329 | * idle duration misprediction is much higher, because the CPU | |
330 | * may be stuck in a shallow idle state for a long time as a | |
5ef499cd RW |
331 | * result of it. In that case say we might mispredict and use |
332 | * the known time till the closest timer event for the idle | |
333 | * state selection. | |
87c9fe6e | 334 | */ |
03dba278 RW |
335 | if (predicted_us < TICK_USEC) |
336 | predicted_us = ktime_to_us(delta_next); | |
87c9fe6e RW |
337 | } else { |
338 | /* | |
339 | * Use the performance multiplier and the user-configurable | |
340 | * latency_req to determine the maximum exit latency. | |
341 | */ | |
a7fe5190 | 342 | interactivity_req = predicted_us / performance_multiplier(nr_iowaiters); |
87c9fe6e RW |
343 | if (latency_req > interactivity_req) |
344 | latency_req = interactivity_req; | |
345 | } | |
e132b9b3 | 346 | |
71abbbf8 AL |
347 | /* |
348 | * Find the idle state with the lowest power while satisfying | |
349 | * our constraints. | |
350 | */ | |
3ed09c94 | 351 | idx = -1; |
96c3d11d | 352 | for (i = 0; i < drv->state_count; i++) { |
46bcfad7 | 353 | struct cpuidle_state *s = &drv->states[i]; |
dc7fd275 | 354 | struct cpuidle_state_usage *su = &dev->states_usage[i]; |
4f86d3a8 | 355 | |
cbc9ef02 | 356 | if (s->disabled || su->disable) |
3a53396b | 357 | continue; |
96c3d11d | 358 | |
3ed09c94 NP |
359 | if (idx == -1) |
360 | idx = i; /* first enabled state */ | |
96c3d11d | 361 | |
03dba278 | 362 | if (s->target_residency > predicted_us) { |
96c3d11d RW |
363 | /* |
364 | * Use a physical idle state, not busy polling, unless | |
bde091ec | 365 | * a timer is going to trigger soon enough. |
96c3d11d RW |
366 | */ |
367 | if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && | |
bde091ec RW |
368 | s->exit_latency <= latency_req && |
369 | s->target_residency <= data->next_timer_us) { | |
370 | predicted_us = s->target_residency; | |
96c3d11d RW |
371 | idx = i; |
372 | break; | |
373 | } | |
03dba278 | 374 | if (predicted_us < TICK_USEC) |
5ef499cd RW |
375 | break; |
376 | ||
757ab15c RW |
377 | if (!tick_nohz_tick_stopped()) { |
378 | /* | |
379 | * If the state selected so far is shallow, | |
380 | * waking up early won't hurt, so retain the | |
381 | * tick in that case and let the governor run | |
382 | * again in the next iteration of the loop. | |
383 | */ | |
03dba278 | 384 | predicted_us = drv->states[idx].target_residency; |
757ab15c RW |
385 | break; |
386 | } | |
387 | ||
5ef499cd RW |
388 | /* |
389 | * If the state selected so far is shallow and this | |
390 | * state's target residency matches the time till the | |
391 | * closest timer event, select this one to avoid getting | |
392 | * stuck in the shallow one for too long. | |
393 | */ | |
394 | if (drv->states[idx].target_residency < TICK_USEC && | |
395 | s->target_residency <= ktime_to_us(delta_next)) | |
396 | idx = i; | |
397 | ||
eb40a380 | 398 | return idx; |
5ef499cd | 399 | } |
45f1ff59 RW |
400 | if (s->exit_latency > latency_req) { |
401 | /* | |
402 | * If we break out of the loop for latency reasons, use | |
403 | * the target residency of the selected state as the | |
404 | * expected idle duration so that the tick is retained | |
405 | * as long as that target residency is low enough. | |
406 | */ | |
03dba278 | 407 | predicted_us = drv->states[idx].target_residency; |
8e37e1a2 | 408 | break; |
45f1ff59 | 409 | } |
3ed09c94 | 410 | idx = i; |
4f86d3a8 LB |
411 | } |
412 | ||
3ed09c94 NP |
413 | if (idx == -1) |
414 | idx = 0; /* No states enabled. Must use 0. */ | |
415 | ||
45f1ff59 RW |
416 | /* |
417 | * Don't stop the tick if the selected state is a polling one or if the | |
418 | * expected idle duration is shorter than the tick period length. | |
419 | */ | |
5ef499cd | 420 | if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || |
03dba278 | 421 | predicted_us < TICK_USEC) && !tick_nohz_tick_stopped()) { |
296bb1e5 RW |
422 | unsigned int delta_next_us = ktime_to_us(delta_next); |
423 | ||
45f1ff59 RW |
424 | *stop_tick = false; |
425 | ||
5ef499cd | 426 | if (idx > 0 && drv->states[idx].target_residency > delta_next_us) { |
296bb1e5 RW |
427 | /* |
428 | * The tick is not going to be stopped and the target | |
429 | * residency of the state to be returned is not within | |
430 | * the time until the next timer event including the | |
431 | * tick, so try to correct that. | |
432 | */ | |
433 | for (i = idx - 1; i >= 0; i--) { | |
f390c5eb RW |
434 | if (drv->states[i].disabled || |
435 | dev->states_usage[i].disable) | |
296bb1e5 RW |
436 | continue; |
437 | ||
438 | idx = i; | |
439 | if (drv->states[i].target_residency <= delta_next_us) | |
440 | break; | |
441 | } | |
442 | } | |
443 | } | |
444 | ||
eb40a380 | 445 | return idx; |
4f86d3a8 LB |
446 | } |
447 | ||
448 | /** | |
672917dc | 449 | * menu_reflect - records that data structures need update |
4f86d3a8 | 450 | * @dev: the CPU |
e978aa7d | 451 | * @index: the index of actual entered state |
4f86d3a8 LB |
452 | * |
453 | * NOTE: it's important to be fast here because this operation will add to | |
454 | * the overall exit latency. | |
455 | */ | |
e978aa7d | 456 | static void menu_reflect(struct cpuidle_device *dev, int index) |
672917dc | 457 | { |
229b6863 | 458 | struct menu_device *data = this_cpu_ptr(&menu_devices); |
a802ea96 | 459 | |
e978aa7d | 460 | data->last_state_idx = index; |
a802ea96 | 461 | data->needs_update = 1; |
45f1ff59 | 462 | data->tick_wakeup = tick_nohz_idle_got_tick(); |
672917dc CZ |
463 | } |
464 | ||
465 | /** | |
466 | * menu_update - attempts to guess what happened after entry | |
46bcfad7 | 467 | * @drv: cpuidle driver containing state data |
672917dc CZ |
468 | * @dev: the CPU |
469 | */ | |
46bcfad7 | 470 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
4f86d3a8 | 471 | { |
229b6863 | 472 | struct menu_device *data = this_cpu_ptr(&menu_devices); |
4f86d3a8 | 473 | int last_idx = data->last_state_idx; |
46bcfad7 | 474 | struct cpuidle_state *target = &drv->states[last_idx]; |
320eee77 | 475 | unsigned int measured_us; |
51f245b8 | 476 | unsigned int new_factor; |
4f86d3a8 LB |
477 | |
478 | /* | |
61c66d6e | 479 | * Try to figure out how much time passed between entry to low |
480 | * power state and occurrence of the wakeup event. | |
481 | * | |
482 | * If the entered idle state didn't support residency measurements, | |
4108b3d9 LB |
483 | * we use them anyway if they are short, and if long, |
484 | * truncate to the whole expected time. | |
61c66d6e | 485 | * |
486 | * Any measured amount of time will include the exit latency. | |
487 | * Since we are interested in when the wakeup begun, not when it | |
2fba5376 | 488 | * was completed, we must subtract the exit latency. However, if |
61c66d6e | 489 | * the measured amount of time is less than the exit latency, |
490 | * assume the state was never reached and the exit latency is 0. | |
4f86d3a8 | 491 | */ |
69d25870 | 492 | |
45f1ff59 RW |
493 | if (data->tick_wakeup && data->next_timer_us > TICK_USEC) { |
494 | /* | |
495 | * The nohz code said that there wouldn't be any events within | |
496 | * the tick boundary (if the tick was stopped), but the idle | |
497 | * duration predictor had a differing opinion. Since the CPU | |
498 | * was woken up by a tick (that wasn't stopped after all), the | |
499 | * predictor was not quite right, so assume that the CPU could | |
500 | * have been idle long (but not forever) to help the idle | |
501 | * duration predictor do a better job next time. | |
502 | */ | |
503 | measured_us = 9 * MAX_INTERESTING / 10; | |
5f26bdce RW |
504 | } else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) && |
505 | dev->poll_time_limit) { | |
506 | /* | |
507 | * The CPU exited the "polling" state due to a time limit, so | |
508 | * the idle duration prediction leading to the selection of that | |
509 | * state was inaccurate. If a better prediction had been made, | |
510 | * the CPU might have been woken up from idle by the next timer. | |
511 | * Assume that to be the case. | |
512 | */ | |
513 | measured_us = data->next_timer_us; | |
45f1ff59 RW |
514 | } else { |
515 | /* measured value */ | |
6a5f95b5 | 516 | measured_us = dev->last_residency; |
45f1ff59 RW |
517 | |
518 | /* Deduct exit latency */ | |
519 | if (measured_us > 2 * target->exit_latency) | |
520 | measured_us -= target->exit_latency; | |
521 | else | |
522 | measured_us /= 2; | |
523 | } | |
69d25870 | 524 | |
4108b3d9 LB |
525 | /* Make sure our coefficients do not exceed unity */ |
526 | if (measured_us > data->next_timer_us) | |
527 | measured_us = data->next_timer_us; | |
69d25870 | 528 | |
51f245b8 TT |
529 | /* Update our correction ratio */ |
530 | new_factor = data->correction_factor[data->bucket]; | |
531 | new_factor -= new_factor / DECAY; | |
69d25870 | 532 | |
5dc2f5a3 | 533 | if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING) |
534 | new_factor += RESOLUTION * measured_us / data->next_timer_us; | |
320eee77 | 535 | else |
69d25870 AV |
536 | /* |
537 | * we were idle so long that we count it as a perfect | |
538 | * prediction | |
539 | */ | |
540 | new_factor += RESOLUTION; | |
320eee77 | 541 | |
69d25870 AV |
542 | /* |
543 | * We don't want 0 as factor; we always want at least | |
51f245b8 TT |
544 | * a tiny bit of estimated time. Fortunately, due to rounding, |
545 | * new_factor will stay nonzero regardless of measured_us values | |
546 | * and the compiler can eliminate this test as long as DECAY > 1. | |
69d25870 | 547 | */ |
51f245b8 | 548 | if (DECAY == 1 && unlikely(new_factor == 0)) |
69d25870 | 549 | new_factor = 1; |
320eee77 | 550 | |
69d25870 | 551 | data->correction_factor[data->bucket] = new_factor; |
1f85f87d AV |
552 | |
553 | /* update the repeating-pattern data */ | |
61c66d6e | 554 | data->intervals[data->interval_ptr++] = measured_us; |
1f85f87d AV |
555 | if (data->interval_ptr >= INTERVALS) |
556 | data->interval_ptr = 0; | |
4f86d3a8 LB |
557 | } |
558 | ||
559 | /** | |
560 | * menu_enable_device - scans a CPU's states and does setup | |
46bcfad7 | 561 | * @drv: cpuidle driver |
4f86d3a8 LB |
562 | * @dev: the CPU |
563 | */ | |
46bcfad7 DD |
564 | static int menu_enable_device(struct cpuidle_driver *drv, |
565 | struct cpuidle_device *dev) | |
4f86d3a8 LB |
566 | { |
567 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); | |
bed4d597 | 568 | int i; |
4f86d3a8 LB |
569 | |
570 | memset(data, 0, sizeof(struct menu_device)); | |
571 | ||
bed4d597 CK |
572 | /* |
573 | * if the correction factor is 0 (eg first time init or cpu hotplug | |
574 | * etc), we actually want to start out with a unity factor. | |
575 | */ | |
576 | for(i = 0; i < BUCKETS; i++) | |
577 | data->correction_factor[i] = RESOLUTION * DECAY; | |
578 | ||
4f86d3a8 LB |
579 | return 0; |
580 | } | |
581 | ||
582 | static struct cpuidle_governor menu_governor = { | |
583 | .name = "menu", | |
584 | .rating = 20, | |
585 | .enable = menu_enable_device, | |
586 | .select = menu_select, | |
587 | .reflect = menu_reflect, | |
4f86d3a8 LB |
588 | }; |
589 | ||
590 | /** | |
591 | * init_menu - initializes the governor | |
592 | */ | |
593 | static int __init init_menu(void) | |
594 | { | |
595 | return cpuidle_register_governor(&menu_governor); | |
596 | } | |
597 | ||
137b944e | 598 | postcore_initcall(init_menu); |