Merge tag 'perf-tools-fixes-for-v6.9-2024-04-19' of git://git.kernel.org/pub/scm...
[linux-2.6-block.git] / include / linux / energy_model.h
CommitLineData
27871f7a
QP
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_ENERGY_MODEL_H
3#define _LINUX_ENERGY_MODEL_H
4#include <linux/cpumask.h>
7d9895c7 5#include <linux/device.h>
27871f7a
QP
6#include <linux/jump_label.h>
7#include <linux/kobject.h>
ffcf9bce 8#include <linux/kref.h>
27871f7a
QP
9#include <linux/rcupdate.h>
10#include <linux/sched/cpufreq.h>
11#include <linux/sched/topology.h>
12#include <linux/types.h>
13
27871f7a 14/**
ca67408a 15 * struct em_perf_state - Performance state of a performance domain
5a367f7b 16 * @performance: CPU performance (capacity) at a given frequency
1bc138c6 17 * @frequency: The frequency in KHz, for consistency with CPUFreq
f2c90b12
LL
18 * @power: The power consumed at this level (by 1 CPU or by a registered
19 * device). It can be a total power: static and dynamic.
27871f7a
QP
20 * @cost: The cost coefficient associated with this level, used during
21 * energy calculation. Equal to: power * max_frequency / frequency
c8ed9953 22 * @flags: see "em_perf_state flags" description below.
27871f7a 23 */
521b512b 24struct em_perf_state {
5a367f7b 25 unsigned long performance;
27871f7a
QP
26 unsigned long frequency;
27 unsigned long power;
28 unsigned long cost;
c8ed9953 29 unsigned long flags;
27871f7a
QP
30};
31
c8ed9953
VD
32/*
33 * em_perf_state flags:
34 *
35 * EM_PERF_STATE_INEFFICIENT: The performance state is inefficient. There is
36 * in this em_perf_domain, another performance state with a higher frequency
37 * but a lower or equal power cost. Such inefficient states are ignored when
38 * using em_pd_get_efficient_*() functions.
39 */
40#define EM_PERF_STATE_INEFFICIENT BIT(0)
41
ca0fc871
LL
42/**
43 * struct em_perf_table - Performance states table
44 * @rcu: RCU used for safe access and destruction
ffcf9bce 45 * @kref: Reference counter to track the users
ca0fc871
LL
46 * @state: List of performance states, in ascending order
47 */
48struct em_perf_table {
49 struct rcu_head rcu;
ffcf9bce 50 struct kref kref;
ca0fc871
LL
51 struct em_perf_state state[];
52};
53
27871f7a 54/**
ca67408a 55 * struct em_perf_domain - Performance domain
ca0fc871 56 * @em_table: Pointer to the runtime modifiable em_perf_table
521b512b 57 * @nr_perf_states: Number of performance states
88f7a895 58 * @flags: See "em_perf_domain flags"
1bc138c6
LL
59 * @cpus: Cpumask covering the CPUs of the domain. It's here
60 * for performance reasons to avoid potential cache
61 * misses during energy calculations in the scheduler
62 * and simplifies allocating/freeing that memory region.
27871f7a 63 *
1bc138c6
LL
64 * In case of CPU device, a "performance domain" represents a group of CPUs
65 * whose performance is scaled together. All CPUs of a performance domain
66 * must have the same micro-architecture. Performance domains often have
67 * a 1-to-1 mapping with CPUFreq policies. In case of other devices the @cpus
68 * field is unused.
27871f7a
QP
69 */
70struct em_perf_domain {
ca0fc871 71 struct em_perf_table __rcu *em_table;
521b512b 72 int nr_perf_states;
88f7a895 73 unsigned long flags;
beb69f15 74 unsigned long cpus[];
27871f7a
QP
75};
76
88f7a895
VD
77/*
78 * em_perf_domain flags:
79 *
ae6ccaa6 80 * EM_PERF_DOMAIN_MICROWATTS: The power values are in micro-Watts or some
88f7a895 81 * other scale.
8354eb9e
VD
82 *
83 * EM_PERF_DOMAIN_SKIP_INEFFICIENCIES: Skip inefficient states when estimating
84 * energy consumption.
fc3a9a98
PG
85 *
86 * EM_PERF_DOMAIN_ARTIFICIAL: The power values are artificial and might be
87 * created by platform missing real power information
88f7a895 88 */
ae6ccaa6 89#define EM_PERF_DOMAIN_MICROWATTS BIT(0)
8354eb9e 90#define EM_PERF_DOMAIN_SKIP_INEFFICIENCIES BIT(1)
fc3a9a98 91#define EM_PERF_DOMAIN_ARTIFICIAL BIT(2)
88f7a895 92
521b512b 93#define em_span_cpus(em) (to_cpumask((em)->cpus))
fc3a9a98 94#define em_is_artificial(em) ((em)->flags & EM_PERF_DOMAIN_ARTIFICIAL)
521b512b 95
27a47e42 96#ifdef CONFIG_ENERGY_MODEL
ae6ccaa6
LL
97/*
98 * The max power value in micro-Watts. The limit of 64 Watts is set as
99 * a safety net to not overflow multiplications on 32bit platforms. The
100 * 32bit value limit for total Perf Domain power implies a limit of
101 * maximum CPUs in such domain to 64.
102 */
103#define EM_MAX_POWER (64000000) /* 64 Watts */
104
105/*
106 * To avoid possible energy estimation overflow on 32bit machines add
107 * limits to number of CPUs in the Perf. Domain.
108 * We are safe on 64bit machine, thus some big number.
109 */
110#ifdef CONFIG_64BIT
111#define EM_MAX_NUM_CPUS 4096
112#else
113#define EM_MAX_NUM_CPUS 16
114#endif
27871f7a
QP
115
116struct em_data_callback {
117 /**
521b512b 118 * active_power() - Provide power at the next performance state of
d0351cc3 119 * a device
75a3a99a 120 * @dev : Device for which we do this operation (can be a CPU)
f2c90b12 121 * @power : Active power at the performance state
521b512b
LL
122 * (modified)
123 * @freq : Frequency at the performance state in kHz
124 * (modified)
27871f7a 125 *
d0351cc3 126 * active_power() must find the lowest performance state of 'dev' above
27871f7a
QP
127 * 'freq' and update 'power' and 'freq' to the matching active power
128 * and frequency.
129 *
d0351cc3 130 * In case of CPUs, the power is the one of a single CPU in the domain,
ae6ccaa6 131 * expressed in micro-Watts or an abstract scale. It is expected to
f2c90b12 132 * fit in the [0, EM_MAX_POWER] range.
27871f7a
QP
133 *
134 * Return 0 on success.
135 */
75a3a99a
LL
136 int (*active_power)(struct device *dev, unsigned long *power,
137 unsigned long *freq);
bdc21a4d
LL
138
139 /**
140 * get_cost() - Provide the cost at the given performance state of
141 * a device
142 * @dev : Device for which we do this operation (can be a CPU)
143 * @freq : Frequency at the performance state in kHz
144 * @cost : The cost value for the performance state
145 * (modified)
146 *
147 * In case of CPUs, the cost is the one of a single CPU in the domain.
148 * It is expected to fit in the [0, EM_MAX_POWER] range due to internal
149 * usage in EAS calculation.
150 *
151 * Return 0 on success, or appropriate error value in case of failure.
152 */
153 int (*get_cost)(struct device *dev, unsigned long freq,
154 unsigned long *cost);
27871f7a 155};
caeea9e6 156#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) ((em_cb).active_power = cb)
bdc21a4d
LL
157#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) \
158 { .active_power = _active_power_cb, \
159 .get_cost = _cost_cb }
160#define EM_DATA_CB(_active_power_cb) \
161 EM_ADV_DATA_CB(_active_power_cb, NULL)
27871f7a
QP
162
163struct em_perf_domain *em_cpu_get(int cpu);
1bc138c6 164struct em_perf_domain *em_pd_get(struct device *dev);
977230d5
LL
165int em_dev_update_perf_domain(struct device *dev,
166 struct em_perf_table __rcu *new_table);
7d9895c7 167int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
c250d50f 168 struct em_data_callback *cb, cpumask_t *span,
ae6ccaa6 169 bool microwatts);
1bc138c6 170void em_dev_unregister_perf_domain(struct device *dev);
ffcf9bce
LL
171struct em_perf_table __rcu *em_table_alloc(struct em_perf_domain *pd);
172void em_table_free(struct em_perf_table __rcu *table);
22ea0284
LL
173int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
174 int nr_states);
27871f7a 175
8354eb9e
VD
176/**
177 * em_pd_get_efficient_state() - Get an efficient performance state from the EM
a3c78778
LL
178 * @table: List of performance states, in ascending order
179 * @nr_perf_states: Number of performance states
5a367f7b 180 * @max_util: Max utilization to map with the EM
a3c78778 181 * @pd_flags: Performance Domain flags
8354eb9e
VD
182 *
183 * It is called from the scheduler code quite frequently and as a consequence
184 * doesn't implement any check.
185 *
5a367f7b 186 * Return: An efficient performance state id, high enough to meet @max_util
8354eb9e
VD
187 * requirement.
188 */
a3c78778
LL
189static inline int
190em_pd_get_efficient_state(struct em_perf_state *table, int nr_perf_states,
5a367f7b 191 unsigned long max_util, unsigned long pd_flags)
8354eb9e
VD
192{
193 struct em_perf_state *ps;
194 int i;
195
a3c78778
LL
196 for (i = 0; i < nr_perf_states; i++) {
197 ps = &table[i];
5a367f7b 198 if (ps->performance >= max_util) {
a3c78778 199 if (pd_flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES &&
8354eb9e
VD
200 ps->flags & EM_PERF_STATE_INEFFICIENT)
201 continue;
a3c78778 202 return i;
8354eb9e
VD
203 }
204 }
205
a3c78778 206 return nr_perf_states - 1;
8354eb9e
VD
207}
208
27871f7a 209/**
f0b56947 210 * em_cpu_energy() - Estimates the energy consumed by the CPUs of a
ca67408a 211 * performance domain
27871f7a
QP
212 * @pd : performance domain for which energy has to be estimated
213 * @max_util : highest utilization among CPUs of the domain
214 * @sum_util : sum of the utilization of all CPUs in the domain
8f1b971b 215 * @allowed_cpu_cap : maximum allowed CPU capacity for the @pd, which
ca67408a 216 * might reflect reduced frequency (due to thermal)
27871f7a 217 *
f0b56947
LL
218 * This function must be used only for CPU devices. There is no validation,
219 * i.e. if the EM is a CPU type and has cpumask allocated. It is called from
220 * the scheduler code quite frequently and that is why there is not checks.
221 *
27871f7a
QP
222 * Return: the sum of the energy consumed by the CPUs of the domain assuming
223 * a capacity state satisfying the max utilization of the domain.
224 */
f0b56947 225static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
8f1b971b
LL
226 unsigned long max_util, unsigned long sum_util,
227 unsigned long allowed_cpu_cap)
27871f7a 228{
aa11a7eb 229 struct em_perf_table *em_table;
521b512b 230 struct em_perf_state *ps;
1b600da5 231 int i;
27871f7a 232
aa11a7eb
LL
233#ifdef CONFIG_SCHED_DEBUG
234 WARN_ONCE(!rcu_read_lock_held(), "EM: rcu read lock needed\n");
235#endif
236
10803995
PK
237 if (!sum_util)
238 return 0;
239
27871f7a 240 /*
521b512b
LL
241 * In order to predict the performance state, map the utilization of
242 * the most utilized CPU of the performance domain to a requested
5a367f7b
LL
243 * performance, like schedutil. Take also into account that the real
244 * performance might be set lower (due to thermal capping). Thus, clamp
8f1b971b 245 * max utilization to the allowed CPU capacity before calculating
5a367f7b 246 * effective performance.
27871f7a 247 */
8f1b971b 248 max_util = min(max_util, allowed_cpu_cap);
27871f7a
QP
249
250 /*
521b512b 251 * Find the lowest performance state of the Energy Model above the
5a367f7b 252 * requested performance.
27871f7a 253 */
aa11a7eb
LL
254 em_table = rcu_dereference(pd->em_table);
255 i = em_pd_get_efficient_state(em_table->state, pd->nr_perf_states,
5a367f7b 256 max_util, pd->flags);
aa11a7eb 257 ps = &em_table->state[i];
27871f7a
QP
258
259 /*
1b600da5
LL
260 * The performance (capacity) of a CPU in the domain at the performance
261 * state (ps) can be computed as:
27871f7a 262 *
1b600da5
LL
263 * ps->freq * scale_cpu
264 * ps->performance = -------------------- (1)
265 * cpu_max_freq
27871f7a
QP
266 *
267 * So, ignoring the costs of idle states (which are not available in
521b512b
LL
268 * the EM), the energy consumed by this CPU at that performance state
269 * is estimated as:
27871f7a 270 *
521b512b 271 * ps->power * cpu_util
27871f7a 272 * cpu_nrg = -------------------- (2)
1b600da5 273 * ps->performance
27871f7a 274 *
1b600da5
LL
275 * since 'cpu_util / ps->performance' represents its percentage of busy
276 * time.
27871f7a
QP
277 *
278 * NOTE: Although the result of this computation actually is in
279 * units of power, it can be manipulated as an energy value
280 * over a scheduling period, since it is assumed to be
281 * constant during that interval.
282 *
283 * By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product
284 * of two terms:
285 *
1b600da5
LL
286 * ps->power * cpu_max_freq
287 * cpu_nrg = ------------------------ * cpu_util (3)
288 * ps->freq * scale_cpu
27871f7a 289 *
521b512b
LL
290 * The first term is static, and is stored in the em_perf_state struct
291 * as 'ps->cost'.
27871f7a
QP
292 *
293 * Since all CPUs of the domain have the same micro-architecture, they
521b512b 294 * share the same 'ps->cost', and the same CPU capacity. Hence, the
27871f7a
QP
295 * total energy of the domain (which is the simple sum of the energy of
296 * all of its CPUs) can be factorized as:
297 *
1b600da5 298 * pd_nrg = ps->cost * \Sum cpu_util (4)
27871f7a 299 */
1b600da5 300 return ps->cost * sum_util;
27871f7a
QP
301}
302
303/**
521b512b
LL
304 * em_pd_nr_perf_states() - Get the number of performance states of a perf.
305 * domain
27871f7a
QP
306 * @pd : performance domain for which this must be done
307 *
521b512b 308 * Return: the number of performance states in the performance domain table
27871f7a 309 */
521b512b 310static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
27871f7a 311{
521b512b 312 return pd->nr_perf_states;
27871f7a
QP
313}
314
ee1a1987
LL
315/**
316 * em_perf_state_from_pd() - Get the performance states table of perf.
317 * domain
318 * @pd : performance domain for which this must be done
319 *
320 * To use this function the rcu_read_lock() should be hold. After the usage
321 * of the performance states table is finished, the rcu_read_unlock() should
322 * be called.
323 *
324 * Return: the pointer to performance states table of the performance domain
325 */
326static inline
327struct em_perf_state *em_perf_state_from_pd(struct em_perf_domain *pd)
328{
329 return rcu_dereference(pd->em_table)->state;
330}
331
27871f7a 332#else
27871f7a 333struct em_data_callback {};
bdc21a4d 334#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) { }
27871f7a 335#define EM_DATA_CB(_active_power_cb) { }
caeea9e6 336#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) do { } while (0)
27871f7a 337
7d9895c7
LL
338static inline
339int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
c250d50f 340 struct em_data_callback *cb, cpumask_t *span,
ae6ccaa6 341 bool microwatts)
7d9895c7
LL
342{
343 return -EINVAL;
344}
1bc138c6
LL
345static inline void em_dev_unregister_perf_domain(struct device *dev)
346{
347}
27871f7a
QP
348static inline struct em_perf_domain *em_cpu_get(int cpu)
349{
350 return NULL;
351}
1bc138c6
LL
352static inline struct em_perf_domain *em_pd_get(struct device *dev)
353{
354 return NULL;
355}
f0b56947 356static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
8f1b971b
LL
357 unsigned long max_util, unsigned long sum_util,
358 unsigned long allowed_cpu_cap)
27871f7a
QP
359{
360 return 0;
361}
521b512b 362static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
27871f7a
QP
363{
364 return 0;
365}
ffcf9bce
LL
366static inline
367struct em_perf_table __rcu *em_table_alloc(struct em_perf_domain *pd)
368{
369 return NULL;
370}
371static inline void em_table_free(struct em_perf_table __rcu *table) {}
977230d5
LL
372static inline
373int em_dev_update_perf_domain(struct device *dev,
374 struct em_perf_table __rcu *new_table)
375{
376 return -EINVAL;
377}
ee1a1987
LL
378static inline
379struct em_perf_state *em_perf_state_from_pd(struct em_perf_domain *pd)
380{
381 return NULL;
382}
22ea0284
LL
383static inline
384int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
385 int nr_states)
386{
387 return -EINVAL;
388}
27871f7a
QP
389#endif
390
391#endif