Commit | Line | Data |
---|---|---|
4f86d3a8 LB |
1 | /* |
2 | * menu.c - the menu idle governor | |
3 | * | |
4 | * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com> | |
69d25870 AV |
5 | * Copyright (C) 2009 Intel Corporation |
6 | * Author: | |
7 | * Arjan van de Ven <arjan@linux.intel.com> | |
4f86d3a8 | 8 | * |
69d25870 AV |
9 | * This code is licenced under the GPL version 2 as described |
10 | * in the COPYING file that acompanies the Linux Kernel. | |
4f86d3a8 LB |
11 | */ |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/cpuidle.h> | |
e8db0be1 | 15 | #include <linux/pm_qos.h> |
4f86d3a8 LB |
16 | #include <linux/time.h> |
17 | #include <linux/ktime.h> | |
18 | #include <linux/hrtimer.h> | |
19 | #include <linux/tick.h> | |
69d25870 | 20 | #include <linux/sched.h> |
5787536e | 21 | #include <linux/math64.h> |
884b17e1 | 22 | #include <linux/module.h> |
4f86d3a8 | 23 | |
69d25870 | 24 | #define BUCKETS 12 |
1f85f87d | 25 | #define INTERVALS 8 |
69d25870 | 26 | #define RESOLUTION 1024 |
1f85f87d | 27 | #define DECAY 8 |
69d25870 | 28 | #define MAX_INTERESTING 50000 |
1f85f87d AV |
29 | #define STDDEV_THRESH 400 |
30 | ||
69d25870 AV |
31 | |
32 | /* | |
33 | * Concepts and ideas behind the menu governor | |
34 | * | |
35 | * For the menu governor, there are 3 decision factors for picking a C | |
36 | * state: | |
37 | * 1) Energy break even point | |
38 | * 2) Performance impact | |
39 | * 3) Latency tolerance (from pmqos infrastructure) | |
40 | * These these three factors are treated independently. | |
41 | * | |
42 | * Energy break even point | |
43 | * ----------------------- | |
44 | * C state entry and exit have an energy cost, and a certain amount of time in | |
45 | * the C state is required to actually break even on this cost. CPUIDLE | |
46 | * provides us this duration in the "target_residency" field. So all that we | |
47 | * need is a good prediction of how long we'll be idle. Like the traditional | |
48 | * menu governor, we start with the actual known "next timer event" time. | |
49 | * | |
50 | * Since there are other source of wakeups (interrupts for example) than | |
51 | * the next timer event, this estimation is rather optimistic. To get a | |
52 | * more realistic estimate, a correction factor is applied to the estimate, | |
53 | * that is based on historic behavior. For example, if in the past the actual | |
54 | * duration always was 50% of the next timer tick, the correction factor will | |
55 | * be 0.5. | |
56 | * | |
57 | * menu uses a running average for this correction factor, however it uses a | |
58 | * set of factors, not just a single factor. This stems from the realization | |
59 | * that the ratio is dependent on the order of magnitude of the expected | |
60 | * duration; if we expect 500 milliseconds of idle time the likelihood of | |
61 | * getting an interrupt very early is much higher than if we expect 50 micro | |
62 | * seconds of idle time. A second independent factor that has big impact on | |
63 | * the actual factor is if there is (disk) IO outstanding or not. | |
64 | * (as a special twist, we consider every sleep longer than 50 milliseconds | |
65 | * as perfect; there are no power gains for sleeping longer than this) | |
66 | * | |
67 | * For these two reasons we keep an array of 12 independent factors, that gets | |
68 | * indexed based on the magnitude of the expected duration as well as the | |
69 | * "is IO outstanding" property. | |
70 | * | |
1f85f87d AV |
71 | * Repeatable-interval-detector |
72 | * ---------------------------- | |
73 | * There are some cases where "next timer" is a completely unusable predictor: | |
74 | * Those cases where the interval is fixed, for example due to hardware | |
75 | * interrupt mitigation, but also due to fixed transfer rate devices such as | |
76 | * mice. | |
77 | * For this, we use a different predictor: We track the duration of the last 8 | |
78 | * intervals and if the stand deviation of these 8 intervals is below a | |
79 | * threshold value, we use the average of these intervals as prediction. | |
80 | * | |
69d25870 AV |
81 | * Limiting Performance Impact |
82 | * --------------------------- | |
83 | * C states, especially those with large exit latencies, can have a real | |
20e3341b | 84 | * noticeable impact on workloads, which is not acceptable for most sysadmins, |
69d25870 AV |
85 | * and in addition, less performance has a power price of its own. |
86 | * | |
87 | * As a general rule of thumb, menu assumes that the following heuristic | |
88 | * holds: | |
89 | * The busier the system, the less impact of C states is acceptable | |
90 | * | |
91 | * This rule-of-thumb is implemented using a performance-multiplier: | |
92 | * If the exit latency times the performance multiplier is longer than | |
93 | * the predicted duration, the C state is not considered a candidate | |
94 | * for selection due to a too high performance impact. So the higher | |
95 | * this multiplier is, the longer we need to be idle to pick a deep C | |
96 | * state, and thus the less likely a busy CPU will hit such a deep | |
97 | * C state. | |
98 | * | |
99 | * Two factors are used in determing this multiplier: | |
100 | * a value of 10 is added for each point of "per cpu load average" we have. | |
101 | * a value of 5 points is added for each process that is waiting for | |
102 | * IO on this CPU. | |
103 | * (these values are experimentally determined) | |
104 | * | |
105 | * The load average factor gives a longer term (few seconds) input to the | |
106 | * decision, while the iowait value gives a cpu local instantanious input. | |
107 | * The iowait factor may look low, but realize that this is also already | |
108 | * represented in the system load average. | |
109 | * | |
110 | */ | |
4f86d3a8 LB |
111 | |
112 | struct menu_device { | |
113 | int last_state_idx; | |
672917dc | 114 | int needs_update; |
4f86d3a8 LB |
115 | |
116 | unsigned int expected_us; | |
56e6943b | 117 | u64 predicted_us; |
69d25870 AV |
118 | unsigned int exit_us; |
119 | unsigned int bucket; | |
120 | u64 correction_factor[BUCKETS]; | |
1f85f87d AV |
121 | u32 intervals[INTERVALS]; |
122 | int interval_ptr; | |
4f86d3a8 LB |
123 | }; |
124 | ||
69d25870 AV |
125 | |
126 | #define LOAD_INT(x) ((x) >> FSHIFT) | |
127 | #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) | |
128 | ||
129 | static int get_loadavg(void) | |
130 | { | |
131 | unsigned long this = this_cpu_load(); | |
132 | ||
133 | ||
134 | return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10; | |
135 | } | |
136 | ||
137 | static inline int which_bucket(unsigned int duration) | |
138 | { | |
139 | int bucket = 0; | |
140 | ||
141 | /* | |
142 | * We keep two groups of stats; one with no | |
143 | * IO pending, one without. | |
144 | * This allows us to calculate | |
145 | * E(duration)|iowait | |
146 | */ | |
8c215bd3 | 147 | if (nr_iowait_cpu(smp_processor_id())) |
69d25870 AV |
148 | bucket = BUCKETS/2; |
149 | ||
150 | if (duration < 10) | |
151 | return bucket; | |
152 | if (duration < 100) | |
153 | return bucket + 1; | |
154 | if (duration < 1000) | |
155 | return bucket + 2; | |
156 | if (duration < 10000) | |
157 | return bucket + 3; | |
158 | if (duration < 100000) | |
159 | return bucket + 4; | |
160 | return bucket + 5; | |
161 | } | |
162 | ||
163 | /* | |
164 | * Return a multiplier for the exit latency that is intended | |
165 | * to take performance requirements into account. | |
166 | * The more performance critical we estimate the system | |
167 | * to be, the higher this multiplier, and thus the higher | |
168 | * the barrier to go to an expensive C state. | |
169 | */ | |
170 | static inline int performance_multiplier(void) | |
171 | { | |
172 | int mult = 1; | |
173 | ||
174 | /* for higher loadavg, we are more reluctant */ | |
175 | ||
176 | mult += 2 * get_loadavg(); | |
177 | ||
178 | /* for IO wait tasks (per cpu!) we add 5x each */ | |
8c215bd3 | 179 | mult += 10 * nr_iowait_cpu(smp_processor_id()); |
69d25870 AV |
180 | |
181 | return mult; | |
182 | } | |
183 | ||
4f86d3a8 LB |
184 | static DEFINE_PER_CPU(struct menu_device, menu_devices); |
185 | ||
46bcfad7 | 186 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); |
672917dc | 187 | |
5787536e SH |
188 | /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ |
189 | static u64 div_round64(u64 dividend, u32 divisor) | |
190 | { | |
191 | return div_u64(dividend + (divisor / 2), divisor); | |
192 | } | |
193 | ||
1f85f87d AV |
194 | /* |
195 | * Try detecting repeating patterns by keeping track of the last 8 | |
196 | * intervals, and checking if the standard deviation of that set | |
197 | * of points is below a threshold. If it is... then use the | |
198 | * average of these 8 points as the estimated value. | |
199 | */ | |
14851912 | 200 | static void get_typical_interval(struct menu_device *data) |
1f85f87d | 201 | { |
c96ca4fb YS |
202 | int i = 0, divisor = 0; |
203 | uint64_t max = 0, avg = 0, stddev = 0; | |
204 | int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */ | |
1f85f87d | 205 | |
c96ca4fb | 206 | again: |
1f85f87d | 207 | |
c96ca4fb YS |
208 | /* first calculate average and standard deviation of the past */ |
209 | max = avg = divisor = stddev = 0; | |
210 | for (i = 0; i < INTERVALS; i++) { | |
211 | int64_t value = data->intervals[i]; | |
212 | if (value <= thresh) { | |
213 | avg += value; | |
214 | divisor++; | |
215 | if (value > max) | |
216 | max = value; | |
217 | } | |
218 | } | |
219 | do_div(avg, divisor); | |
220 | ||
221 | for (i = 0; i < INTERVALS; i++) { | |
222 | int64_t value = data->intervals[i]; | |
223 | if (value <= thresh) { | |
224 | int64_t diff = value - avg; | |
225 | stddev += diff * diff; | |
226 | } | |
227 | } | |
228 | do_div(stddev, divisor); | |
229 | stddev = int_sqrt(stddev); | |
1f85f87d | 230 | /* |
c96ca4fb YS |
231 | * The typical interval is obtained when standard deviation is small |
232 | * or standard deviation is small compared to the average interval. | |
330647a9 TT |
233 | * |
234 | * Use this result only if there is no timer to wake us up sooner. | |
1f85f87d | 235 | */ |
c96ca4fb YS |
236 | if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3)) |
237 | || stddev <= 20) { | |
330647a9 TT |
238 | if (data->expected_us > avg) |
239 | data->predicted_us = avg; | |
14851912 | 240 | return; |
69a37bea | 241 | } |
017099e2 TT |
242 | |
243 | /* | |
244 | * If we have outliers to the upside in our distribution, discard | |
245 | * those by setting the threshold to exclude these outliers, then | |
246 | * calculate the average and standard deviation again. Once we get | |
247 | * down to the bottom 3/4 of our samples, stop excluding samples. | |
248 | * | |
249 | * This can deal with workloads that have long pauses interspersed | |
250 | * with sporadic activity with a bunch of short pauses. | |
251 | */ | |
252 | if ((divisor * 4) <= INTERVALS * 3) | |
253 | return; | |
254 | ||
255 | thresh = max - 1; | |
256 | goto again; | |
1f85f87d AV |
257 | } |
258 | ||
4f86d3a8 LB |
259 | /** |
260 | * menu_select - selects the next idle state to enter | |
46bcfad7 | 261 | * @drv: cpuidle driver containing state data |
4f86d3a8 LB |
262 | * @dev: the CPU |
263 | */ | |
46bcfad7 | 264 | static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
4f86d3a8 LB |
265 | { |
266 | struct menu_device *data = &__get_cpu_var(menu_devices); | |
ed77134b | 267 | int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); |
4f86d3a8 | 268 | int i; |
69d25870 | 269 | int multiplier; |
7467571f | 270 | struct timespec t; |
69d25870 | 271 | |
672917dc | 272 | if (data->needs_update) { |
46bcfad7 | 273 | menu_update(drv, dev); |
672917dc CZ |
274 | data->needs_update = 0; |
275 | } | |
276 | ||
1c6fe036 AV |
277 | data->last_state_idx = 0; |
278 | data->exit_us = 0; | |
279 | ||
a2bd9202 | 280 | /* Special case when user has set very strict latency requirement */ |
69d25870 | 281 | if (unlikely(latency_req == 0)) |
a2bd9202 | 282 | return 0; |
a2bd9202 | 283 | |
69d25870 | 284 | /* determine the expected residency time, round up */ |
7467571f | 285 | t = ktime_to_timespec(tick_nohz_get_sleep_length()); |
4f86d3a8 | 286 | data->expected_us = |
7467571f | 287 | t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC; |
69d25870 AV |
288 | |
289 | ||
290 | data->bucket = which_bucket(data->expected_us); | |
291 | ||
292 | multiplier = performance_multiplier(); | |
293 | ||
294 | /* | |
295 | * if the correction factor is 0 (eg first time init or cpu hotplug | |
296 | * etc), we actually want to start out with a unity factor. | |
297 | */ | |
298 | if (data->correction_factor[data->bucket] == 0) | |
299 | data->correction_factor[data->bucket] = RESOLUTION * DECAY; | |
300 | ||
301 | /* Make sure to round up for half microseconds */ | |
5787536e SH |
302 | data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], |
303 | RESOLUTION * DECAY); | |
69d25870 | 304 | |
14851912 | 305 | get_typical_interval(data); |
1f85f87d | 306 | |
69d25870 AV |
307 | /* |
308 | * We want to default to C1 (hlt), not to busy polling | |
309 | * unless the timer is happening really really soon. | |
310 | */ | |
3a53396b | 311 | if (data->expected_us > 5 && |
cbc9ef02 | 312 | !drv->states[CPUIDLE_DRIVER_STATE_START].disabled && |
dc7fd275 | 313 | dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0) |
69d25870 | 314 | data->last_state_idx = CPUIDLE_DRIVER_STATE_START; |
4f86d3a8 | 315 | |
71abbbf8 AL |
316 | /* |
317 | * Find the idle state with the lowest power while satisfying | |
318 | * our constraints. | |
319 | */ | |
46bcfad7 DD |
320 | for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { |
321 | struct cpuidle_state *s = &drv->states[i]; | |
dc7fd275 | 322 | struct cpuidle_state_usage *su = &dev->states_usage[i]; |
4f86d3a8 | 323 | |
cbc9ef02 | 324 | if (s->disabled || su->disable) |
3a53396b | 325 | continue; |
14851912 | 326 | if (s->target_residency > data->predicted_us) |
71abbbf8 | 327 | continue; |
a2bd9202 | 328 | if (s->exit_latency > latency_req) |
71abbbf8 | 329 | continue; |
69d25870 | 330 | if (s->exit_latency * multiplier > data->predicted_us) |
71abbbf8 AL |
331 | continue; |
332 | ||
8aef33a7 DL |
333 | data->last_state_idx = i; |
334 | data->exit_us = s->exit_latency; | |
4f86d3a8 LB |
335 | } |
336 | ||
69d25870 | 337 | return data->last_state_idx; |
4f86d3a8 LB |
338 | } |
339 | ||
340 | /** | |
672917dc | 341 | * menu_reflect - records that data structures need update |
4f86d3a8 | 342 | * @dev: the CPU |
e978aa7d | 343 | * @index: the index of actual entered state |
4f86d3a8 LB |
344 | * |
345 | * NOTE: it's important to be fast here because this operation will add to | |
346 | * the overall exit latency. | |
347 | */ | |
e978aa7d | 348 | static void menu_reflect(struct cpuidle_device *dev, int index) |
672917dc CZ |
349 | { |
350 | struct menu_device *data = &__get_cpu_var(menu_devices); | |
e978aa7d DD |
351 | data->last_state_idx = index; |
352 | if (index >= 0) | |
353 | data->needs_update = 1; | |
672917dc CZ |
354 | } |
355 | ||
356 | /** | |
357 | * menu_update - attempts to guess what happened after entry | |
46bcfad7 | 358 | * @drv: cpuidle driver containing state data |
672917dc CZ |
359 | * @dev: the CPU |
360 | */ | |
46bcfad7 | 361 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
4f86d3a8 LB |
362 | { |
363 | struct menu_device *data = &__get_cpu_var(menu_devices); | |
364 | int last_idx = data->last_state_idx; | |
320eee77 | 365 | unsigned int last_idle_us = cpuidle_get_last_residency(dev); |
46bcfad7 | 366 | struct cpuidle_state *target = &drv->states[last_idx]; |
320eee77 | 367 | unsigned int measured_us; |
69d25870 | 368 | u64 new_factor; |
4f86d3a8 LB |
369 | |
370 | /* | |
371 | * Ugh, this idle state doesn't support residency measurements, so we | |
372 | * are basically lost in the dark. As a compromise, assume we slept | |
69d25870 | 373 | * for the whole expected time. |
4f86d3a8 | 374 | */ |
320eee77 | 375 | if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID))) |
69d25870 AV |
376 | last_idle_us = data->expected_us; |
377 | ||
378 | ||
379 | measured_us = last_idle_us; | |
4f86d3a8 | 380 | |
320eee77 | 381 | /* |
69d25870 AV |
382 | * We correct for the exit latency; we are assuming here that the |
383 | * exit latency happens after the event that we're interested in. | |
320eee77 | 384 | */ |
69d25870 AV |
385 | if (measured_us > data->exit_us) |
386 | measured_us -= data->exit_us; | |
387 | ||
388 | ||
389 | /* update our correction ratio */ | |
390 | ||
391 | new_factor = data->correction_factor[data->bucket] | |
392 | * (DECAY - 1) / DECAY; | |
393 | ||
1c6fe036 | 394 | if (data->expected_us > 0 && measured_us < MAX_INTERESTING) |
69d25870 | 395 | new_factor += RESOLUTION * measured_us / data->expected_us; |
320eee77 | 396 | else |
69d25870 AV |
397 | /* |
398 | * we were idle so long that we count it as a perfect | |
399 | * prediction | |
400 | */ | |
401 | new_factor += RESOLUTION; | |
320eee77 | 402 | |
69d25870 AV |
403 | /* |
404 | * We don't want 0 as factor; we always want at least | |
405 | * a tiny bit of estimated time. | |
406 | */ | |
407 | if (new_factor == 0) | |
408 | new_factor = 1; | |
320eee77 | 409 | |
69d25870 | 410 | data->correction_factor[data->bucket] = new_factor; |
1f85f87d AV |
411 | |
412 | /* update the repeating-pattern data */ | |
413 | data->intervals[data->interval_ptr++] = last_idle_us; | |
414 | if (data->interval_ptr >= INTERVALS) | |
415 | data->interval_ptr = 0; | |
4f86d3a8 LB |
416 | } |
417 | ||
418 | /** | |
419 | * menu_enable_device - scans a CPU's states and does setup | |
46bcfad7 | 420 | * @drv: cpuidle driver |
4f86d3a8 LB |
421 | * @dev: the CPU |
422 | */ | |
46bcfad7 DD |
423 | static int menu_enable_device(struct cpuidle_driver *drv, |
424 | struct cpuidle_device *dev) | |
4f86d3a8 LB |
425 | { |
426 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); | |
427 | ||
428 | memset(data, 0, sizeof(struct menu_device)); | |
429 | ||
430 | return 0; | |
431 | } | |
432 | ||
433 | static struct cpuidle_governor menu_governor = { | |
434 | .name = "menu", | |
435 | .rating = 20, | |
436 | .enable = menu_enable_device, | |
437 | .select = menu_select, | |
438 | .reflect = menu_reflect, | |
439 | .owner = THIS_MODULE, | |
440 | }; | |
441 | ||
442 | /** | |
443 | * init_menu - initializes the governor | |
444 | */ | |
445 | static int __init init_menu(void) | |
446 | { | |
447 | return cpuidle_register_governor(&menu_governor); | |
448 | } | |
449 | ||
137b944e | 450 | postcore_initcall(init_menu); |