1 // SPDX-License-Identifier: GPL-2.0-only
3 * menu.c - the menu idle governor
5 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
6 * Copyright (C) 2009 Intel Corporation
8 * Arjan van de Ven <arjan@linux.intel.com>
11 #include <linux/kernel.h>
12 #include <linux/cpuidle.h>
13 #include <linux/time.h>
14 #include <linux/ktime.h>
15 #include <linux/hrtimer.h>
16 #include <linux/tick.h>
17 #include <linux/sched/stat.h>
18 #include <linux/math64.h>
23 #define INTERVAL_SHIFT 3
24 #define INTERVALS (1UL << INTERVAL_SHIFT)
25 #define RESOLUTION 1024
27 #define MAX_INTERESTING (50000 * NSEC_PER_USEC)
30 * Concepts and ideas behind the menu governor
32 * For the menu governor, there are 3 decision factors for picking a C
34 * 1) Energy break even point
35 * 2) Performance impact
36 * 3) Latency tolerance (from pmqos infrastructure)
37 * These three factors are treated independently.
39 * Energy break even point
40 * -----------------------
41 * C state entry and exit have an energy cost, and a certain amount of time in
42 * the C state is required to actually break even on this cost. CPUIDLE
43 * provides us this duration in the "target_residency" field. So all that we
44 * need is a good prediction of how long we'll be idle. Like the traditional
45 * menu governor, we start with the actual known "next timer event" time.
47 * Since there are other source of wakeups (interrupts for example) than
48 * the next timer event, this estimation is rather optimistic. To get a
49 * more realistic estimate, a correction factor is applied to the estimate,
50 * that is based on historic behavior. For example, if in the past the actual
51 * duration always was 50% of the next timer tick, the correction factor will
54 * menu uses a running average for this correction factor, however it uses a
55 * set of factors, not just a single factor. This stems from the realization
56 * that the ratio is dependent on the order of magnitude of the expected
57 * duration; if we expect 500 milliseconds of idle time the likelihood of
58 * getting an interrupt very early is much higher than if we expect 50 micro
59 * seconds of idle time. A second independent factor that has big impact on
60 * the actual factor is if there is (disk) IO outstanding or not.
61 * (as a special twist, we consider every sleep longer than 50 milliseconds
62 * as perfect; there are no power gains for sleeping longer than this)
64 * For these two reasons we keep an array of 12 independent factors, that gets
65 * indexed based on the magnitude of the expected duration as well as the
66 * "is IO outstanding" property.
68 * Repeatable-interval-detector
69 * ----------------------------
70 * There are some cases where "next timer" is a completely unusable predictor:
71 * Those cases where the interval is fixed, for example due to hardware
72 * interrupt mitigation, but also due to fixed transfer rate devices such as
74 * For this, we use a different predictor: We track the duration of the last 8
75 * intervals and if the stand deviation of these 8 intervals is below a
76 * threshold value, we use the average of these intervals as prediction.
78 * Limiting Performance Impact
79 * ---------------------------
80 * C states, especially those with large exit latencies, can have a real
81 * noticeable impact on workloads, which is not acceptable for most sysadmins,
82 * and in addition, less performance has a power price of its own.
84 * As a general rule of thumb, menu assumes that the following heuristic
86 * The busier the system, the less impact of C states is acceptable
88 * This rule-of-thumb is implemented using a performance-multiplier:
89 * If the exit latency times the performance multiplier is longer than
90 * the predicted duration, the C state is not considered a candidate
91 * for selection due to a too high performance impact. So the higher
92 * this multiplier is, the longer we need to be idle to pick a deep C
93 * state, and thus the less likely a busy CPU will hit such a deep
96 * Currently there is only one value determining the factor:
97 * 10 points are added for each process that is waiting for IO on this CPU.
98 * (This value was experimentally determined.)
99 * Utilization is no longer a factor as it was shown that it never contributed
100 * significantly to the performance multiplier in the first place.
110 unsigned int correction_factor[BUCKETS];
111 unsigned int intervals[INTERVALS];
115 static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters)
120 * We keep two groups of stats; one with no
121 * IO pending, one without.
122 * This allows us to calculate
128 if (duration_ns < 10ULL * NSEC_PER_USEC)
130 if (duration_ns < 100ULL * NSEC_PER_USEC)
132 if (duration_ns < 1000ULL * NSEC_PER_USEC)
134 if (duration_ns < 10000ULL * NSEC_PER_USEC)
136 if (duration_ns < 100000ULL * NSEC_PER_USEC)
142 * Return a multiplier for the exit latency that is intended
143 * to take performance requirements into account.
144 * The more performance critical we estimate the system
145 * to be, the higher this multiplier, and thus the higher
146 * the barrier to go to an expensive C state.
148 static inline int performance_multiplier(unsigned int nr_iowaiters)
150 /* for IO wait tasks (per cpu!) we add 10x each */
151 return 1 + 10 * nr_iowaiters;
154 static DEFINE_PER_CPU(struct menu_device, menu_devices);
156 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
159 * Try detecting repeating patterns by keeping track of the last 8
160 * intervals, and checking if the standard deviation of that set
161 * of points is below a threshold. If it is... then use the
162 * average of these 8 points as the estimated value.
164 static unsigned int get_typical_interval(struct menu_device *data)
167 unsigned int min, max, thresh, avg;
168 uint64_t sum, variance;
170 thresh = INT_MAX; /* Discard outliers above this value */
174 /* First calculate the average of past intervals */
179 for (i = 0; i < INTERVALS; i++) {
180 unsigned int value = data->intervals[i];
181 if (value <= thresh) {
195 if (divisor == INTERVALS)
196 avg = sum >> INTERVAL_SHIFT;
198 avg = div_u64(sum, divisor);
200 /* Then try to determine variance */
202 for (i = 0; i < INTERVALS; i++) {
203 unsigned int value = data->intervals[i];
204 if (value <= thresh) {
205 int64_t diff = (int64_t)value - avg;
206 variance += diff * diff;
209 if (divisor == INTERVALS)
210 variance >>= INTERVAL_SHIFT;
212 do_div(variance, divisor);
215 * The typical interval is obtained when standard deviation is
216 * small (stddev <= 20 us, variance <= 400 us^2) or standard
217 * deviation is small compared to the average interval (avg >
218 * 6*stddev, avg^2 > 36*variance). The average is smaller than
219 * UINT_MAX aka U32_MAX, so computing its square does not
220 * overflow a u64. We simply reject this candidate average if
221 * the standard deviation is greater than 715 s (which is
224 * Use this result only if there is no timer to wake us up sooner.
226 if (likely(variance <= U64_MAX/36)) {
227 if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
228 || variance <= 400) {
234 * If we have outliers to the upside in our distribution, discard
235 * those by setting the threshold to exclude these outliers, then
236 * calculate the average and standard deviation again. Once we get
237 * down to the bottom 3/4 of our samples, stop excluding samples.
239 * This can deal with workloads that have long pauses interspersed
240 * with sporadic activity with a bunch of short pauses.
242 if ((divisor * 4) <= INTERVALS * 3)
250 * menu_select - selects the next idle state to enter
251 * @drv: cpuidle driver containing state data
253 * @stop_tick: indication on whether or not to stop the tick
255 static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
258 struct menu_device *data = this_cpu_ptr(&menu_devices);
259 s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
261 u64 interactivity_req;
262 unsigned int nr_iowaiters;
263 ktime_t delta, delta_tick;
266 if (data->needs_update) {
267 menu_update(drv, dev);
268 data->needs_update = 0;
271 nr_iowaiters = nr_iowait_cpu(dev->cpu);
273 /* Find the shortest expected idle interval. */
274 predicted_ns = get_typical_interval(data) * NSEC_PER_USEC;
275 if (predicted_ns > RESIDENCY_THRESHOLD_NS) {
276 unsigned int timer_us;
278 /* Determine the time till the closest timer. */
279 delta = tick_nohz_get_sleep_length(&delta_tick);
280 if (unlikely(delta < 0)) {
285 data->next_timer_ns = delta;
286 data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters);
288 /* Round up the result for half microseconds. */
289 timer_us = div_u64((RESOLUTION * DECAY * NSEC_PER_USEC) / 2 +
290 data->next_timer_ns *
291 data->correction_factor[data->bucket],
292 RESOLUTION * DECAY * NSEC_PER_USEC);
293 /* Use the lowest expected idle interval to pick the idle state. */
294 predicted_ns = min((u64)timer_us * NSEC_PER_USEC, predicted_ns);
297 * Because the next timer event is not going to be determined
298 * in this case, assume that without the tick the closest timer
299 * will be in distant future and that the closest tick will occur
300 * after 1/2 of the tick period.
302 data->next_timer_ns = KTIME_MAX;
303 delta_tick = TICK_NSEC / 2;
304 data->bucket = which_bucket(KTIME_MAX, nr_iowaiters);
307 if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
308 ((data->next_timer_ns < drv->states[1].target_residency_ns ||
309 latency_req < drv->states[1].exit_latency_ns) &&
310 !dev->states_usage[0].disable)) {
312 * In this case state[0] will be used no matter what, so return
313 * it right away and keep the tick running if state[0] is a
316 *stop_tick = !(drv->states[0].flags & CPUIDLE_FLAG_POLLING);
320 if (tick_nohz_tick_stopped()) {
322 * If the tick is already stopped, the cost of possible short
323 * idle duration misprediction is much higher, because the CPU
324 * may be stuck in a shallow idle state for a long time as a
325 * result of it. In that case say we might mispredict and use
326 * the known time till the closest timer event for the idle
329 if (predicted_ns < TICK_NSEC)
330 predicted_ns = data->next_timer_ns;
333 * Use the performance multiplier and the user-configurable
334 * latency_req to determine the maximum exit latency.
336 interactivity_req = div64_u64(predicted_ns,
337 performance_multiplier(nr_iowaiters));
338 if (latency_req > interactivity_req)
339 latency_req = interactivity_req;
343 * Find the idle state with the lowest power while satisfying
347 for (i = 0; i < drv->state_count; i++) {
348 struct cpuidle_state *s = &drv->states[i];
350 if (dev->states_usage[i].disable)
354 idx = i; /* first enabled state */
356 if (s->target_residency_ns > predicted_ns) {
358 * Use a physical idle state, not busy polling, unless
359 * a timer is going to trigger soon enough.
361 if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
362 s->exit_latency_ns <= latency_req &&
363 s->target_residency_ns <= data->next_timer_ns) {
364 predicted_ns = s->target_residency_ns;
368 if (predicted_ns < TICK_NSEC)
371 if (!tick_nohz_tick_stopped()) {
373 * If the state selected so far is shallow,
374 * waking up early won't hurt, so retain the
375 * tick in that case and let the governor run
376 * again in the next iteration of the loop.
378 predicted_ns = drv->states[idx].target_residency_ns;
383 * If the state selected so far is shallow and this
384 * state's target residency matches the time till the
385 * closest timer event, select this one to avoid getting
386 * stuck in the shallow one for too long.
388 if (drv->states[idx].target_residency_ns < TICK_NSEC &&
389 s->target_residency_ns <= delta_tick)
394 if (s->exit_latency_ns > latency_req)
401 idx = 0; /* No states enabled. Must use 0. */
404 * Don't stop the tick if the selected state is a polling one or if the
405 * expected idle duration is shorter than the tick period length.
407 if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
408 predicted_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {
411 if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick) {
413 * The tick is not going to be stopped and the target
414 * residency of the state to be returned is not within
415 * the time until the next timer event including the
416 * tick, so try to correct that.
418 for (i = idx - 1; i >= 0; i--) {
419 if (dev->states_usage[i].disable)
423 if (drv->states[i].target_residency_ns <= delta_tick)
433 * menu_reflect - records that data structures need update
435 * @index: the index of actual entered state
437 * NOTE: it's important to be fast here because this operation will add to
438 * the overall exit latency.
440 static void menu_reflect(struct cpuidle_device *dev, int index)
442 struct menu_device *data = this_cpu_ptr(&menu_devices);
444 dev->last_state_idx = index;
445 data->needs_update = 1;
446 data->tick_wakeup = tick_nohz_idle_got_tick();
450 * menu_update - attempts to guess what happened after entry
451 * @drv: cpuidle driver containing state data
454 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
456 struct menu_device *data = this_cpu_ptr(&menu_devices);
457 int last_idx = dev->last_state_idx;
458 struct cpuidle_state *target = &drv->states[last_idx];
460 unsigned int new_factor;
463 * Try to figure out how much time passed between entry to low
464 * power state and occurrence of the wakeup event.
466 * If the entered idle state didn't support residency measurements,
467 * we use them anyway if they are short, and if long,
468 * truncate to the whole expected time.
470 * Any measured amount of time will include the exit latency.
471 * Since we are interested in when the wakeup begun, not when it
472 * was completed, we must subtract the exit latency. However, if
473 * the measured amount of time is less than the exit latency,
474 * assume the state was never reached and the exit latency is 0.
477 if (data->tick_wakeup && data->next_timer_ns > TICK_NSEC) {
479 * The nohz code said that there wouldn't be any events within
480 * the tick boundary (if the tick was stopped), but the idle
481 * duration predictor had a differing opinion. Since the CPU
482 * was woken up by a tick (that wasn't stopped after all), the
483 * predictor was not quite right, so assume that the CPU could
484 * have been idle long (but not forever) to help the idle
485 * duration predictor do a better job next time.
487 measured_ns = 9 * MAX_INTERESTING / 10;
488 } else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&
489 dev->poll_time_limit) {
491 * The CPU exited the "polling" state due to a time limit, so
492 * the idle duration prediction leading to the selection of that
493 * state was inaccurate. If a better prediction had been made,
494 * the CPU might have been woken up from idle by the next timer.
495 * Assume that to be the case.
497 measured_ns = data->next_timer_ns;
500 measured_ns = dev->last_residency_ns;
502 /* Deduct exit latency */
503 if (measured_ns > 2 * target->exit_latency_ns)
504 measured_ns -= target->exit_latency_ns;
509 /* Make sure our coefficients do not exceed unity */
510 if (measured_ns > data->next_timer_ns)
511 measured_ns = data->next_timer_ns;
513 /* Update our correction ratio */
514 new_factor = data->correction_factor[data->bucket];
515 new_factor -= new_factor / DECAY;
517 if (data->next_timer_ns > 0 && measured_ns < MAX_INTERESTING)
518 new_factor += div64_u64(RESOLUTION * measured_ns,
519 data->next_timer_ns);
522 * we were idle so long that we count it as a perfect
525 new_factor += RESOLUTION;
528 * We don't want 0 as factor; we always want at least
529 * a tiny bit of estimated time. Fortunately, due to rounding,
530 * new_factor will stay nonzero regardless of measured_us values
531 * and the compiler can eliminate this test as long as DECAY > 1.
533 if (DECAY == 1 && unlikely(new_factor == 0))
536 data->correction_factor[data->bucket] = new_factor;
538 /* update the repeating-pattern data */
539 data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns);
540 if (data->interval_ptr >= INTERVALS)
541 data->interval_ptr = 0;
545 * menu_enable_device - scans a CPU's states and does setup
546 * @drv: cpuidle driver
549 static int menu_enable_device(struct cpuidle_driver *drv,
550 struct cpuidle_device *dev)
552 struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
555 memset(data, 0, sizeof(struct menu_device));
558 * if the correction factor is 0 (eg first time init or cpu hotplug
559 * etc), we actually want to start out with a unity factor.
561 for(i = 0; i < BUCKETS; i++)
562 data->correction_factor[i] = RESOLUTION * DECAY;
567 static struct cpuidle_governor menu_governor = {
570 .enable = menu_enable_device,
571 .select = menu_select,
572 .reflect = menu_reflect,
576 * init_menu - initializes the governor
578 static int __init init_menu(void)
580 return cpuidle_register_governor(&menu_governor);
583 postcore_initcall(init_menu);