2 * menu.c - the menu idle governor
4 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
5 * Copyright (C) 2009 Intel Corporation
7 * Arjan van de Ven <arjan@linux.intel.com>
9 * This code is licenced under the GPL version 2 as described
10 * in the COPYING file that acompanies the Linux Kernel.
13 #include <linux/kernel.h>
14 #include <linux/cpuidle.h>
15 #include <linux/time.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/sched.h>
20 #include <linux/sched/loadavg.h>
21 #include <linux/sched/stat.h>
22 #include <linux/math64.h>
25 * Please note when changing the tuning values:
26 * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
27 * a scaling operation multiplication may overflow on 32 bit platforms.
28 * In that case, #define RESOLUTION as ULL to get 64 bit result:
29 * #define RESOLUTION 1024ULL
31 * The default values do not overflow.
34 #define INTERVAL_SHIFT 3
35 #define INTERVALS (1UL << INTERVAL_SHIFT)
36 #define RESOLUTION 1024
38 #define MAX_INTERESTING 50000
42 * Concepts and ideas behind the menu governor
44 * For the menu governor, there are 3 decision factors for picking a C
46 * 1) Energy break even point
47 * 2) Performance impact
48 * 3) Latency tolerance (from pmqos infrastructure)
49 * These these three factors are treated independently.
51 * Energy break even point
52 * -----------------------
53 * C state entry and exit have an energy cost, and a certain amount of time in
54 * the C state is required to actually break even on this cost. CPUIDLE
55 * provides us this duration in the "target_residency" field. So all that we
56 * need is a good prediction of how long we'll be idle. Like the traditional
57 * menu governor, we start with the actual known "next timer event" time.
59 * Since there are other source of wakeups (interrupts for example) than
60 * the next timer event, this estimation is rather optimistic. To get a
61 * more realistic estimate, a correction factor is applied to the estimate,
62 * that is based on historic behavior. For example, if in the past the actual
63 * duration always was 50% of the next timer tick, the correction factor will
66 * menu uses a running average for this correction factor, however it uses a
67 * set of factors, not just a single factor. This stems from the realization
68 * that the ratio is dependent on the order of magnitude of the expected
69 * duration; if we expect 500 milliseconds of idle time the likelihood of
70 * getting an interrupt very early is much higher than if we expect 50 micro
71 * seconds of idle time. A second independent factor that has big impact on
72 * the actual factor is if there is (disk) IO outstanding or not.
73 * (as a special twist, we consider every sleep longer than 50 milliseconds
74 * as perfect; there are no power gains for sleeping longer than this)
76 * For these two reasons we keep an array of 12 independent factors, that gets
77 * indexed based on the magnitude of the expected duration as well as the
78 * "is IO outstanding" property.
80 * Repeatable-interval-detector
81 * ----------------------------
82 * There are some cases where "next timer" is a completely unusable predictor:
83 * Those cases where the interval is fixed, for example due to hardware
84 * interrupt mitigation, but also due to fixed transfer rate devices such as
86 * For this, we use a different predictor: We track the duration of the last 8
87 * intervals and if the stand deviation of these 8 intervals is below a
88 * threshold value, we use the average of these intervals as prediction.
90 * Limiting Performance Impact
91 * ---------------------------
92 * C states, especially those with large exit latencies, can have a real
93 * noticeable impact on workloads, which is not acceptable for most sysadmins,
94 * and in addition, less performance has a power price of its own.
96 * As a general rule of thumb, menu assumes that the following heuristic
98 * The busier the system, the less impact of C states is acceptable
100 * This rule-of-thumb is implemented using a performance-multiplier:
101 * If the exit latency times the performance multiplier is longer than
102 * the predicted duration, the C state is not considered a candidate
103 * for selection due to a too high performance impact. So the higher
104 * this multiplier is, the longer we need to be idle to pick a deep C
105 * state, and thus the less likely a busy CPU will hit such a deep
108 * Two factors are used in determing this multiplier:
109 * a value of 10 is added for each point of "per cpu load average" we have.
110 * a value of 5 points is added for each process that is waiting for
112 * (these values are experimentally determined)
114 * The load average factor gives a longer term (few seconds) input to the
115 * decision, while the iowait value gives a cpu local instantanious input.
116 * The iowait factor may look low, but realize that this is also already
117 * represented in the system load average.
126 unsigned int next_timer_us;
128 unsigned int correction_factor[BUCKETS];
129 unsigned int intervals[INTERVALS];
133 static inline int get_loadavg(unsigned long load)
135 return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10;
138 static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
143 * We keep two groups of stats; one with no
144 * IO pending, one without.
145 * This allows us to calculate
157 if (duration < 10000)
159 if (duration < 100000)
165 * Return a multiplier for the exit latency that is intended
166 * to take performance requirements into account.
167 * The more performance critical we estimate the system
168 * to be, the higher this multiplier, and thus the higher
169 * the barrier to go to an expensive C state.
171 static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned long load)
175 /* for higher loadavg, we are more reluctant */
177 mult += 2 * get_loadavg(load);
179 /* for IO wait tasks (per cpu!) we add 5x each */
180 mult += 10 * nr_iowaiters;
185 static DEFINE_PER_CPU(struct menu_device, menu_devices);
187 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
190 * Try detecting repeating patterns by keeping track of the last 8
191 * intervals, and checking if the standard deviation of that set
192 * of points is below a threshold. If it is... then use the
193 * average of these 8 points as the estimated value.
195 static unsigned int get_typical_interval(struct menu_device *data,
196 unsigned int predicted_us)
199 unsigned int min, max, thresh, avg;
200 uint64_t sum, variance;
202 thresh = UINT_MAX; /* Discard outliers above this value */
206 /* First calculate the average of past intervals */
211 for (i = 0; i < INTERVALS; i++) {
212 unsigned int value = data->intervals[i];
213 if (value <= thresh) {
225 * If the result of the computation is going to be discarded anyway,
226 * avoid the computation altogether.
228 if (min >= predicted_us)
231 if (divisor == INTERVALS)
232 avg = sum >> INTERVAL_SHIFT;
234 avg = div_u64(sum, divisor);
236 /* Then try to determine variance */
238 for (i = 0; i < INTERVALS; i++) {
239 unsigned int value = data->intervals[i];
240 if (value <= thresh) {
241 int64_t diff = (int64_t)value - avg;
242 variance += diff * diff;
245 if (divisor == INTERVALS)
246 variance >>= INTERVAL_SHIFT;
248 do_div(variance, divisor);
251 * The typical interval is obtained when standard deviation is
252 * small (stddev <= 20 us, variance <= 400 us^2) or standard
253 * deviation is small compared to the average interval (avg >
254 * 6*stddev, avg^2 > 36*variance). The average is smaller than
255 * UINT_MAX aka U32_MAX, so computing its square does not
256 * overflow a u64. We simply reject this candidate average if
257 * the standard deviation is greater than 715 s (which is
260 * Use this result only if there is no timer to wake us up sooner.
262 if (likely(variance <= U64_MAX/36)) {
263 if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
264 || variance <= 400) {
270 * If we have outliers to the upside in our distribution, discard
271 * those by setting the threshold to exclude these outliers, then
272 * calculate the average and standard deviation again. Once we get
273 * down to the bottom 3/4 of our samples, stop excluding samples.
275 * This can deal with workloads that have long pauses interspersed
276 * with sporadic activity with a bunch of short pauses.
278 if ((divisor * 4) <= INTERVALS * 3)
286 * menu_select - selects the next idle state to enter
287 * @drv: cpuidle driver containing state data
289 * @stop_tick: indication on whether or not to stop the tick
291 static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
294 struct menu_device *data = this_cpu_ptr(&menu_devices);
295 int latency_req = cpuidle_governor_latency_req(dev->cpu);
298 unsigned int interactivity_req;
299 unsigned int predicted_us;
300 unsigned long nr_iowaiters, cpu_load;
303 if (data->needs_update) {
304 menu_update(drv, dev);
305 data->needs_update = 0;
308 /* determine the expected residency time, round up */
309 data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
311 get_iowait_load(&nr_iowaiters, &cpu_load);
312 data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
314 if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
315 ((data->next_timer_us < drv->states[1].target_residency ||
316 latency_req < drv->states[1].exit_latency) &&
317 !drv->states[0].disabled && !dev->states_usage[0].disable)) {
319 * In this case state[0] will be used no matter what, so return
320 * it right away and keep the tick running.
327 * Force the result of multiplication to be 64 bits even if both
328 * operands are 32 bits.
329 * Make sure to round up for half microseconds.
331 predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
332 data->correction_factor[data->bucket],
335 * Use the lowest expected idle interval to pick the idle state.
337 predicted_us = min(predicted_us, get_typical_interval(data, predicted_us));
339 if (tick_nohz_tick_stopped()) {
341 * If the tick is already stopped, the cost of possible short
342 * idle duration misprediction is much higher, because the CPU
343 * may be stuck in a shallow idle state for a long time as a
344 * result of it. In that case say we might mispredict and use
345 * the known time till the closest timer event for the idle
348 if (predicted_us < TICK_USEC)
349 predicted_us = ktime_to_us(delta_next);
352 * Use the performance multiplier and the user-configurable
353 * latency_req to determine the maximum exit latency.
355 interactivity_req = predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
356 if (latency_req > interactivity_req)
357 latency_req = interactivity_req;
361 * Find the idle state with the lowest power while satisfying
365 for (i = 0; i < drv->state_count; i++) {
366 struct cpuidle_state *s = &drv->states[i];
367 struct cpuidle_state_usage *su = &dev->states_usage[i];
369 if (s->disabled || su->disable)
373 idx = i; /* first enabled state */
375 if (s->target_residency > predicted_us) {
377 * Use a physical idle state, not busy polling, unless
378 * a timer is going to trigger soon enough.
380 if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
381 s->exit_latency <= latency_req &&
382 s->target_residency <= data->next_timer_us) {
383 predicted_us = s->target_residency;
387 if (predicted_us < TICK_USEC)
390 if (!tick_nohz_tick_stopped()) {
392 * If the state selected so far is shallow,
393 * waking up early won't hurt, so retain the
394 * tick in that case and let the governor run
395 * again in the next iteration of the loop.
397 predicted_us = drv->states[idx].target_residency;
402 * If the state selected so far is shallow and this
403 * state's target residency matches the time till the
404 * closest timer event, select this one to avoid getting
405 * stuck in the shallow one for too long.
407 if (drv->states[idx].target_residency < TICK_USEC &&
408 s->target_residency <= ktime_to_us(delta_next))
413 if (s->exit_latency > latency_req) {
415 * If we break out of the loop for latency reasons, use
416 * the target residency of the selected state as the
417 * expected idle duration so that the tick is retained
418 * as long as that target residency is low enough.
420 predicted_us = drv->states[idx].target_residency;
427 idx = 0; /* No states enabled. Must use 0. */
430 * Don't stop the tick if the selected state is a polling one or if the
431 * expected idle duration is shorter than the tick period length.
433 if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
434 predicted_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
435 unsigned int delta_next_us = ktime_to_us(delta_next);
439 if (idx > 0 && drv->states[idx].target_residency > delta_next_us) {
441 * The tick is not going to be stopped and the target
442 * residency of the state to be returned is not within
443 * the time until the next timer event including the
444 * tick, so try to correct that.
446 for (i = idx - 1; i >= 0; i--) {
447 if (drv->states[i].disabled ||
448 dev->states_usage[i].disable)
452 if (drv->states[i].target_residency <= delta_next_us)
462 * menu_reflect - records that data structures need update
464 * @index: the index of actual entered state
466 * NOTE: it's important to be fast here because this operation will add to
467 * the overall exit latency.
469 static void menu_reflect(struct cpuidle_device *dev, int index)
471 struct menu_device *data = this_cpu_ptr(&menu_devices);
473 data->last_state_idx = index;
474 data->needs_update = 1;
475 data->tick_wakeup = tick_nohz_idle_got_tick();
479 * menu_update - attempts to guess what happened after entry
480 * @drv: cpuidle driver containing state data
483 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
485 struct menu_device *data = this_cpu_ptr(&menu_devices);
486 int last_idx = data->last_state_idx;
487 struct cpuidle_state *target = &drv->states[last_idx];
488 unsigned int measured_us;
489 unsigned int new_factor;
492 * Try to figure out how much time passed between entry to low
493 * power state and occurrence of the wakeup event.
495 * If the entered idle state didn't support residency measurements,
496 * we use them anyway if they are short, and if long,
497 * truncate to the whole expected time.
499 * Any measured amount of time will include the exit latency.
500 * Since we are interested in when the wakeup begun, not when it
501 * was completed, we must subtract the exit latency. However, if
502 * the measured amount of time is less than the exit latency,
503 * assume the state was never reached and the exit latency is 0.
506 if (data->tick_wakeup && data->next_timer_us > TICK_USEC) {
508 * The nohz code said that there wouldn't be any events within
509 * the tick boundary (if the tick was stopped), but the idle
510 * duration predictor had a differing opinion. Since the CPU
511 * was woken up by a tick (that wasn't stopped after all), the
512 * predictor was not quite right, so assume that the CPU could
513 * have been idle long (but not forever) to help the idle
514 * duration predictor do a better job next time.
516 measured_us = 9 * MAX_INTERESTING / 10;
517 } else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&
518 dev->poll_time_limit) {
520 * The CPU exited the "polling" state due to a time limit, so
521 * the idle duration prediction leading to the selection of that
522 * state was inaccurate. If a better prediction had been made,
523 * the CPU might have been woken up from idle by the next timer.
524 * Assume that to be the case.
526 measured_us = data->next_timer_us;
529 measured_us = dev->last_residency;
531 /* Deduct exit latency */
532 if (measured_us > 2 * target->exit_latency)
533 measured_us -= target->exit_latency;
538 /* Make sure our coefficients do not exceed unity */
539 if (measured_us > data->next_timer_us)
540 measured_us = data->next_timer_us;
542 /* Update our correction ratio */
543 new_factor = data->correction_factor[data->bucket];
544 new_factor -= new_factor / DECAY;
546 if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
547 new_factor += RESOLUTION * measured_us / data->next_timer_us;
550 * we were idle so long that we count it as a perfect
553 new_factor += RESOLUTION;
556 * We don't want 0 as factor; we always want at least
557 * a tiny bit of estimated time. Fortunately, due to rounding,
558 * new_factor will stay nonzero regardless of measured_us values
559 * and the compiler can eliminate this test as long as DECAY > 1.
561 if (DECAY == 1 && unlikely(new_factor == 0))
564 data->correction_factor[data->bucket] = new_factor;
566 /* update the repeating-pattern data */
567 data->intervals[data->interval_ptr++] = measured_us;
568 if (data->interval_ptr >= INTERVALS)
569 data->interval_ptr = 0;
573 * menu_enable_device - scans a CPU's states and does setup
574 * @drv: cpuidle driver
577 static int menu_enable_device(struct cpuidle_driver *drv,
578 struct cpuidle_device *dev)
580 struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
583 memset(data, 0, sizeof(struct menu_device));
586 * if the correction factor is 0 (eg first time init or cpu hotplug
587 * etc), we actually want to start out with a unity factor.
589 for(i = 0; i < BUCKETS; i++)
590 data->correction_factor[i] = RESOLUTION * DECAY;
595 static struct cpuidle_governor menu_governor = {
598 .enable = menu_enable_device,
599 .select = menu_select,
600 .reflect = menu_reflect,
604 * init_menu - initializes the governor
606 static int __init init_menu(void)
608 return cpuidle_register_governor(&menu_governor);
611 postcore_initcall(init_menu);