Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
105ab3d8 IM |
2 | #ifndef _LINUX_SCHED_TOPOLOGY_H |
3 | #define _LINUX_SCHED_TOPOLOGY_H | |
4 | ||
ee6a3d19 IM |
5 | #include <linux/topology.h> |
6 | ||
4c822698 IM |
7 | #include <linux/sched/idle.h> |
8 | ||
a60b9eda IM |
9 | /* |
10 | * sched-domains (multiprocessor balancing) declarations: | |
11 | */ | |
12 | #ifdef CONFIG_SMP | |
13 | ||
d54a9658 | 14 | /* Generate SD flag indexes */ |
b6e862f3 | 15 | #define SD_FLAG(name, mflags) __##name, |
d54a9658 VS |
16 | enum { |
17 | #include <linux/sched/sd_flags.h> | |
18 | __SD_FLAG_CNT, | |
19 | }; | |
20 | #undef SD_FLAG | |
21 | /* Generate SD flag bits */ | |
b6e862f3 | 22 | #define SD_FLAG(name, mflags) name = 1 << __##name, |
d54a9658 VS |
23 | enum { |
24 | #include <linux/sched/sd_flags.h> | |
25 | }; | |
26 | #undef SD_FLAG | |
a60b9eda | 27 | |
8fca9494 | 28 | struct sd_flag_debug { |
b6e862f3 VS |
29 | unsigned int meta_flags; |
30 | char *name; | |
b6e862f3 | 31 | }; |
8fca9494 VS |
32 | extern const struct sd_flag_debug sd_flag_debug[]; |
33 | ||
a60b9eda IM |
34 | #ifdef CONFIG_SCHED_SMT |
35 | static inline int cpu_smt_flags(void) | |
36 | { | |
54de4427 | 37 | return SD_SHARE_CPUCAPACITY | SD_SHARE_LLC; |
a60b9eda IM |
38 | } |
39 | #endif | |
40 | ||
778c558f BS |
41 | #ifdef CONFIG_SCHED_CLUSTER |
42 | static inline int cpu_cluster_flags(void) | |
43 | { | |
54de4427 | 44 | return SD_CLUSTER | SD_SHARE_LLC; |
778c558f BS |
45 | } |
46 | #endif | |
47 | ||
a60b9eda IM |
48 | #ifdef CONFIG_SCHED_MC |
49 | static inline int cpu_core_flags(void) | |
50 | { | |
54de4427 | 51 | return SD_SHARE_LLC; |
a60b9eda IM |
52 | } |
53 | #endif | |
54 | ||
55 | #ifdef CONFIG_NUMA | |
56 | static inline int cpu_numa_flags(void) | |
57 | { | |
58 | return SD_NUMA; | |
59 | } | |
60 | #endif | |
61 | ||
62 | extern int arch_asym_cpu_priority(int cpu); | |
63 | ||
64 | struct sched_domain_attr { | |
65 | int relax_domain_level; | |
66 | }; | |
67 | ||
68 | #define SD_ATTR_INIT (struct sched_domain_attr) { \ | |
69 | .relax_domain_level = -1, \ | |
70 | } | |
71 | ||
72 | extern int sched_domain_level_max; | |
73 | ||
74 | struct sched_group; | |
75 | ||
76 | struct sched_domain_shared { | |
77 | atomic_t ref; | |
78 | atomic_t nr_busy_cpus; | |
79 | int has_idle_cores; | |
70fb5ccf | 80 | int nr_idle_scan; |
a60b9eda IM |
81 | }; |
82 | ||
83 | struct sched_domain { | |
84 | /* These fields must be setup */ | |
994aeb7a JFG |
85 | struct sched_domain __rcu *parent; /* top domain must be null terminated */ |
86 | struct sched_domain __rcu *child; /* bottom domain must be null terminated */ | |
a60b9eda IM |
87 | struct sched_group *groups; /* the balancing groups of the domain */ |
88 | unsigned long min_interval; /* Minimum balance interval ms */ | |
89 | unsigned long max_interval; /* Maximum balance interval ms */ | |
90 | unsigned int busy_factor; /* less balancing by factor if busy */ | |
91 | unsigned int imbalance_pct; /* No balance until over watermark */ | |
92 | unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ | |
e496132e | 93 | unsigned int imb_numa_nr; /* Nr running tasks that allows a NUMA imbalance */ |
a60b9eda IM |
94 | |
95 | int nohz_idle; /* NOHZ IDLE status */ | |
96 | int flags; /* See SD_* */ | |
97 | int level; | |
98 | ||
99 | /* Runtime fields. */ | |
100 | unsigned long last_balance; /* init to jiffies. units in jiffies */ | |
101 | unsigned int balance_interval; /* initialise to 1. units in ms. */ | |
102 | unsigned int nr_balance_failed; /* initialise to 0 */ | |
103 | ||
104 | /* idle_balance() stats */ | |
105 | u64 max_newidle_lb_cost; | |
e60b56e4 | 106 | unsigned long last_decay_max_lb_cost; |
a60b9eda | 107 | |
a60b9eda | 108 | #ifdef CONFIG_SCHEDSTATS |
4c3e509e | 109 | /* sched_balance_rq() stats */ |
a60b9eda IM |
110 | unsigned int lb_count[CPU_MAX_IDLE_TYPES]; |
111 | unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; | |
112 | unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; | |
3b2a793e SS |
113 | unsigned int lb_imbalance_load[CPU_MAX_IDLE_TYPES]; |
114 | unsigned int lb_imbalance_util[CPU_MAX_IDLE_TYPES]; | |
115 | unsigned int lb_imbalance_task[CPU_MAX_IDLE_TYPES]; | |
116 | unsigned int lb_imbalance_misfit[CPU_MAX_IDLE_TYPES]; | |
a60b9eda IM |
117 | unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; |
118 | unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; | |
119 | unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; | |
120 | unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; | |
121 | ||
122 | /* Active load balancing */ | |
123 | unsigned int alb_count; | |
124 | unsigned int alb_failed; | |
125 | unsigned int alb_pushed; | |
126 | ||
127 | /* SD_BALANCE_EXEC stats */ | |
128 | unsigned int sbe_count; | |
129 | unsigned int sbe_balanced; | |
130 | unsigned int sbe_pushed; | |
131 | ||
132 | /* SD_BALANCE_FORK stats */ | |
133 | unsigned int sbf_count; | |
134 | unsigned int sbf_balanced; | |
135 | unsigned int sbf_pushed; | |
136 | ||
137 | /* try_to_wake_up() stats */ | |
138 | unsigned int ttwu_wake_remote; | |
139 | unsigned int ttwu_move_affine; | |
140 | unsigned int ttwu_move_balance; | |
141 | #endif | |
a60b9eda | 142 | char *name; |
a60b9eda IM |
143 | union { |
144 | void *private; /* used during construction */ | |
145 | struct rcu_head rcu; /* used during destruction */ | |
146 | }; | |
147 | struct sched_domain_shared *shared; | |
148 | ||
149 | unsigned int span_weight; | |
150 | /* | |
151 | * Span of all CPUs in this domain. | |
152 | * | |
153 | * NOTE: this field is variable length. (Allocated dynamically | |
154 | * by attaching extra space to the end of the structure, | |
155 | * depending on how many CPUs the kernel has booted up with) | |
156 | */ | |
fe946db6 | 157 | unsigned long span[]; |
a60b9eda IM |
158 | }; |
159 | ||
160 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) | |
161 | { | |
162 | return to_cpumask(sd->span); | |
163 | } | |
164 | ||
165 | extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], | |
166 | struct sched_domain_attr *dattr_new); | |
167 | ||
168 | /* Allocate an array of sched domains, for partition_sched_domains(). */ | |
169 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms); | |
170 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); | |
171 | ||
b361c902 | 172 | bool cpus_equal_capacity(int this_cpu, int that_cpu); |
a60b9eda | 173 | bool cpus_share_cache(int this_cpu, int that_cpu); |
b95303e0 | 174 | bool cpus_share_resources(int this_cpu, int that_cpu); |
a60b9eda IM |
175 | |
176 | typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); | |
177 | typedef int (*sched_domain_flags_f)(void); | |
178 | ||
179 | #define SDTL_OVERLAP 0x01 | |
180 | ||
181 | struct sd_data { | |
99687cdb LVO |
182 | struct sched_domain *__percpu *sd; |
183 | struct sched_domain_shared *__percpu *sds; | |
184 | struct sched_group *__percpu *sg; | |
185 | struct sched_group_capacity *__percpu *sgc; | |
a60b9eda IM |
186 | }; |
187 | ||
188 | struct sched_domain_topology_level { | |
189 | sched_domain_mask_f mask; | |
190 | sched_domain_flags_f sd_flags; | |
191 | int flags; | |
192 | int numa_level; | |
193 | struct sd_data data; | |
a60b9eda | 194 | char *name; |
a60b9eda IM |
195 | }; |
196 | ||
0cce0fde | 197 | extern void __init set_sched_topology(struct sched_domain_topology_level *tl); |
0e3f6c36 PN |
198 | extern void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio); |
199 | ||
a60b9eda | 200 | |
a60b9eda | 201 | # define SD_INIT_NAME(type) .name = #type |
a60b9eda IM |
202 | |
203 | #else /* CONFIG_SMP */ | |
204 | ||
205 | struct sched_domain_attr; | |
206 | ||
207 | static inline void | |
208 | partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], | |
209 | struct sched_domain_attr *dattr_new) | |
210 | { | |
211 | } | |
212 | ||
b361c902 QY |
213 | static inline bool cpus_equal_capacity(int this_cpu, int that_cpu) |
214 | { | |
215 | return true; | |
216 | } | |
217 | ||
a60b9eda IM |
218 | static inline bool cpus_share_cache(int this_cpu, int that_cpu) |
219 | { | |
220 | return true; | |
221 | } | |
222 | ||
b95303e0 BS |
223 | static inline bool cpus_share_resources(int this_cpu, int that_cpu) |
224 | { | |
225 | return true; | |
226 | } | |
227 | ||
0e3f6c36 PN |
228 | static inline void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio) |
229 | { | |
230 | } | |
231 | ||
8ec59c0f VG |
232 | #endif /* !CONFIG_SMP */ |
233 | ||
31f6a8c0 IV |
234 | #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) |
235 | extern void rebuild_sched_domains_energy(void); | |
236 | #else | |
237 | static inline void rebuild_sched_domains_energy(void) | |
238 | { | |
239 | } | |
240 | #endif | |
241 | ||
5bd0988b | 242 | #ifndef arch_scale_cpu_capacity |
f4470cdf VS |
243 | /** |
244 | * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU. | |
245 | * @cpu: the CPU in question. | |
246 | * | |
247 | * Return: the CPU scale factor normalized against SCHED_CAPACITY_SCALE, i.e. | |
248 | * | |
249 | * max_perf(cpu) | |
250 | * ----------------------------- * SCHED_CAPACITY_SCALE | |
251 | * max(max_perf(c) : c \in CPUs) | |
252 | */ | |
5bd0988b | 253 | static __always_inline |
8ec59c0f | 254 | unsigned long arch_scale_cpu_capacity(int cpu) |
5bd0988b QP |
255 | { |
256 | return SCHED_CAPACITY_SCALE; | |
257 | } | |
258 | #endif | |
259 | ||
d4dbc991 | 260 | #ifndef arch_scale_hw_pressure |
36a0df85 | 261 | static __always_inline |
d4dbc991 | 262 | unsigned long arch_scale_hw_pressure(int cpu) |
36a0df85 TG |
263 | { |
264 | return 0; | |
265 | } | |
266 | #endif | |
267 | ||
d4dbc991 | 268 | #ifndef arch_update_hw_pressure |
c214f124 | 269 | static __always_inline |
d4dbc991 | 270 | void arch_update_hw_pressure(const struct cpumask *cpus, |
c214f124 LL |
271 | unsigned long capped_frequency) |
272 | { } | |
273 | #endif | |
274 | ||
9942cb22 VG |
275 | #ifndef arch_scale_freq_ref |
276 | static __always_inline | |
277 | unsigned int arch_scale_freq_ref(int cpu) | |
278 | { | |
279 | return 0; | |
280 | } | |
281 | #endif | |
282 | ||
ee6a3d19 IM |
283 | static inline int task_node(const struct task_struct *p) |
284 | { | |
285 | return cpu_to_node(task_cpu(p)); | |
286 | } | |
287 | ||
105ab3d8 | 288 | #endif /* _LINUX_SCHED_TOPOLOGY_H */ |