Commit | Line | Data |
---|---|---|
105ab3d8 IM |
1 | #ifndef _LINUX_SCHED_TOPOLOGY_H |
2 | #define _LINUX_SCHED_TOPOLOGY_H | |
3 | ||
4c822698 IM |
4 | #include <linux/sched/idle.h> |
5 | ||
a60b9eda IM |
6 | /* |
7 | * sched-domains (multiprocessor balancing) declarations: | |
8 | */ | |
9 | #ifdef CONFIG_SMP | |
10 | ||
11 | #define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ | |
12 | #define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ | |
13 | #define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ | |
14 | #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ | |
15 | #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ | |
16 | #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ | |
17 | #define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */ | |
18 | #define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */ | |
19 | #define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ | |
20 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ | |
21 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ | |
22 | #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ | |
23 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ | |
24 | #define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ | |
25 | #define SD_NUMA 0x4000 /* cross-node balancing */ | |
26 | ||
27 | /* | |
28 | * Increase resolution of cpu_capacity calculations | |
29 | */ | |
30 | #define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT | |
31 | #define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) | |
32 | ||
33 | #ifdef CONFIG_SCHED_SMT | |
34 | static inline int cpu_smt_flags(void) | |
35 | { | |
36 | return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; | |
37 | } | |
38 | #endif | |
39 | ||
40 | #ifdef CONFIG_SCHED_MC | |
41 | static inline int cpu_core_flags(void) | |
42 | { | |
43 | return SD_SHARE_PKG_RESOURCES; | |
44 | } | |
45 | #endif | |
46 | ||
47 | #ifdef CONFIG_NUMA | |
48 | static inline int cpu_numa_flags(void) | |
49 | { | |
50 | return SD_NUMA; | |
51 | } | |
52 | #endif | |
53 | ||
54 | extern int arch_asym_cpu_priority(int cpu); | |
55 | ||
56 | struct sched_domain_attr { | |
57 | int relax_domain_level; | |
58 | }; | |
59 | ||
60 | #define SD_ATTR_INIT (struct sched_domain_attr) { \ | |
61 | .relax_domain_level = -1, \ | |
62 | } | |
63 | ||
64 | extern int sched_domain_level_max; | |
65 | ||
66 | struct sched_group; | |
67 | ||
68 | struct sched_domain_shared { | |
69 | atomic_t ref; | |
70 | atomic_t nr_busy_cpus; | |
71 | int has_idle_cores; | |
72 | }; | |
73 | ||
74 | struct sched_domain { | |
75 | /* These fields must be setup */ | |
76 | struct sched_domain *parent; /* top domain must be null terminated */ | |
77 | struct sched_domain *child; /* bottom domain must be null terminated */ | |
78 | struct sched_group *groups; /* the balancing groups of the domain */ | |
79 | unsigned long min_interval; /* Minimum balance interval ms */ | |
80 | unsigned long max_interval; /* Maximum balance interval ms */ | |
81 | unsigned int busy_factor; /* less balancing by factor if busy */ | |
82 | unsigned int imbalance_pct; /* No balance until over watermark */ | |
83 | unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ | |
84 | unsigned int busy_idx; | |
85 | unsigned int idle_idx; | |
86 | unsigned int newidle_idx; | |
87 | unsigned int wake_idx; | |
88 | unsigned int forkexec_idx; | |
89 | unsigned int smt_gain; | |
90 | ||
91 | int nohz_idle; /* NOHZ IDLE status */ | |
92 | int flags; /* See SD_* */ | |
93 | int level; | |
94 | ||
95 | /* Runtime fields. */ | |
96 | unsigned long last_balance; /* init to jiffies. units in jiffies */ | |
97 | unsigned int balance_interval; /* initialise to 1. units in ms. */ | |
98 | unsigned int nr_balance_failed; /* initialise to 0 */ | |
99 | ||
100 | /* idle_balance() stats */ | |
101 | u64 max_newidle_lb_cost; | |
102 | unsigned long next_decay_max_lb_cost; | |
103 | ||
104 | u64 avg_scan_cost; /* select_idle_sibling */ | |
105 | ||
106 | #ifdef CONFIG_SCHEDSTATS | |
107 | /* load_balance() stats */ | |
108 | unsigned int lb_count[CPU_MAX_IDLE_TYPES]; | |
109 | unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; | |
110 | unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; | |
111 | unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; | |
112 | unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; | |
113 | unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; | |
114 | unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; | |
115 | unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; | |
116 | ||
117 | /* Active load balancing */ | |
118 | unsigned int alb_count; | |
119 | unsigned int alb_failed; | |
120 | unsigned int alb_pushed; | |
121 | ||
122 | /* SD_BALANCE_EXEC stats */ | |
123 | unsigned int sbe_count; | |
124 | unsigned int sbe_balanced; | |
125 | unsigned int sbe_pushed; | |
126 | ||
127 | /* SD_BALANCE_FORK stats */ | |
128 | unsigned int sbf_count; | |
129 | unsigned int sbf_balanced; | |
130 | unsigned int sbf_pushed; | |
131 | ||
132 | /* try_to_wake_up() stats */ | |
133 | unsigned int ttwu_wake_remote; | |
134 | unsigned int ttwu_move_affine; | |
135 | unsigned int ttwu_move_balance; | |
136 | #endif | |
137 | #ifdef CONFIG_SCHED_DEBUG | |
138 | char *name; | |
139 | #endif | |
140 | union { | |
141 | void *private; /* used during construction */ | |
142 | struct rcu_head rcu; /* used during destruction */ | |
143 | }; | |
144 | struct sched_domain_shared *shared; | |
145 | ||
146 | unsigned int span_weight; | |
147 | /* | |
148 | * Span of all CPUs in this domain. | |
149 | * | |
150 | * NOTE: this field is variable length. (Allocated dynamically | |
151 | * by attaching extra space to the end of the structure, | |
152 | * depending on how many CPUs the kernel has booted up with) | |
153 | */ | |
154 | unsigned long span[0]; | |
155 | }; | |
156 | ||
157 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) | |
158 | { | |
159 | return to_cpumask(sd->span); | |
160 | } | |
161 | ||
162 | extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], | |
163 | struct sched_domain_attr *dattr_new); | |
164 | ||
165 | /* Allocate an array of sched domains, for partition_sched_domains(). */ | |
166 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms); | |
167 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); | |
168 | ||
169 | bool cpus_share_cache(int this_cpu, int that_cpu); | |
170 | ||
171 | typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); | |
172 | typedef int (*sched_domain_flags_f)(void); | |
173 | ||
174 | #define SDTL_OVERLAP 0x01 | |
175 | ||
176 | struct sd_data { | |
177 | struct sched_domain **__percpu sd; | |
178 | struct sched_domain_shared **__percpu sds; | |
179 | struct sched_group **__percpu sg; | |
180 | struct sched_group_capacity **__percpu sgc; | |
181 | }; | |
182 | ||
183 | struct sched_domain_topology_level { | |
184 | sched_domain_mask_f mask; | |
185 | sched_domain_flags_f sd_flags; | |
186 | int flags; | |
187 | int numa_level; | |
188 | struct sd_data data; | |
189 | #ifdef CONFIG_SCHED_DEBUG | |
190 | char *name; | |
191 | #endif | |
192 | }; | |
193 | ||
194 | extern void set_sched_topology(struct sched_domain_topology_level *tl); | |
195 | ||
196 | #ifdef CONFIG_SCHED_DEBUG | |
197 | # define SD_INIT_NAME(type) .name = #type | |
198 | #else | |
199 | # define SD_INIT_NAME(type) | |
200 | #endif | |
201 | ||
202 | #else /* CONFIG_SMP */ | |
203 | ||
204 | struct sched_domain_attr; | |
205 | ||
206 | static inline void | |
207 | partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], | |
208 | struct sched_domain_attr *dattr_new) | |
209 | { | |
210 | } | |
211 | ||
212 | static inline bool cpus_share_cache(int this_cpu, int that_cpu) | |
213 | { | |
214 | return true; | |
215 | } | |
216 | ||
217 | #endif /* !CONFIG_SMP */ | |
218 | ||
105ab3d8 | 219 | #endif /* _LINUX_SCHED_TOPOLOGY_H */ |