Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_CPUSET_H |
3 | #define _LINUX_CPUSET_H | |
4 | /* | |
5 | * cpuset interface | |
6 | * | |
7 | * Copyright (C) 2003 BULL SA | |
825a46af | 8 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. |
1da177e4 LT |
9 | * |
10 | */ | |
11 | ||
12 | #include <linux/sched.h> | |
105ab3d8 | 13 | #include <linux/sched/topology.h> |
f719ff9b | 14 | #include <linux/sched/task.h> |
1da177e4 LT |
15 | #include <linux/cpumask.h> |
16 | #include <linux/nodemask.h> | |
a1bc5a4e | 17 | #include <linux/mm.h> |
d4b96fb9 | 18 | #include <linux/mmu_context.h> |
664eedde | 19 | #include <linux/jump_label.h> |
1da177e4 LT |
20 | |
21 | #ifdef CONFIG_CPUSETS | |
22 | ||
89affbf5 DZ |
23 | /* |
24 | * Static branch rewrites can happen in an arbitrary order for a given | |
25 | * key. In code paths where we need to loop with read_mems_allowed_begin() and | |
26 | * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need | |
27 | * to ensure that begin() always gets rewritten before retry() in the | |
28 | * disabled -> enabled transition. If not, then if local irqs are disabled | |
29 | * around the loop, we can deadlock since retry() would always be | |
30 | * comparing the latest value of the mems_allowed seqcount against 0 as | |
31 | * begin() still would see cpusets_enabled() as false. The enabled -> disabled | |
32 | * transition should happen in reverse order for the same reasons (want to stop | |
33 | * looking at real value of mems_allowed.sequence in retry() first). | |
34 | */ | |
35 | extern struct static_key_false cpusets_pre_enable_key; | |
002f2906 | 36 | extern struct static_key_false cpusets_enabled_key; |
8ca1b5a4 FT |
37 | extern struct static_key_false cpusets_insane_config_key; |
38 | ||
664eedde MG |
39 | static inline bool cpusets_enabled(void) |
40 | { | |
002f2906 | 41 | return static_branch_unlikely(&cpusets_enabled_key); |
664eedde MG |
42 | } |
43 | ||
664eedde MG |
44 | static inline void cpuset_inc(void) |
45 | { | |
d74b27d6 JL |
46 | static_branch_inc_cpuslocked(&cpusets_pre_enable_key); |
47 | static_branch_inc_cpuslocked(&cpusets_enabled_key); | |
664eedde MG |
48 | } |
49 | ||
50 | static inline void cpuset_dec(void) | |
51 | { | |
d74b27d6 JL |
52 | static_branch_dec_cpuslocked(&cpusets_enabled_key); |
53 | static_branch_dec_cpuslocked(&cpusets_pre_enable_key); | |
664eedde | 54 | } |
202f72d5 | 55 | |
8ca1b5a4 FT |
56 | /* |
57 | * This will get enabled whenever a cpuset configuration is considered | |
58 | * unsupportable in general. E.g. movable only node which cannot satisfy | |
59 | * any non movable allocations (see update_nodemask). Page allocator | |
60 | * needs to make additional checks for those configurations and this | |
61 | * check is meant to guard those checks without any overhead for sane | |
62 | * configurations. | |
63 | */ | |
64 | static inline bool cpusets_insane_config(void) | |
65 | { | |
66 | return static_branch_unlikely(&cpusets_insane_config_key); | |
67 | } | |
68 | ||
1da177e4 LT |
69 | extern int cpuset_init(void); |
70 | extern void cpuset_init_smp(void); | |
50e76632 | 71 | extern void cpuset_force_rebuild(void); |
30e03acd | 72 | extern void cpuset_update_active_cpus(void); |
6c24849f JL |
73 | extern void inc_dl_tasks_cs(struct task_struct *task); |
74 | extern void dec_dl_tasks_cs(struct task_struct *task); | |
111cd11b JL |
75 | extern void cpuset_lock(void); |
76 | extern void cpuset_unlock(void); | |
6af866af | 77 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); |
97c0054d | 78 | extern bool cpuset_cpus_allowed_fallback(struct task_struct *p); |
3232e7aa | 79 | extern bool cpuset_cpu_is_isolated(int cpu); |
909d75a3 | 80 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
9276b1bc | 81 | #define cpuset_current_mems_allowed (current->mems_allowed) |
1da177e4 | 82 | void cpuset_init_current_mems_allowed(void); |
19770b32 | 83 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); |
202f72d5 | 84 | |
8e464522 | 85 | extern bool cpuset_node_allowed(int node, gfp_t gfp_mask); |
02a0e53d | 86 | |
002f2906 | 87 | static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) |
202f72d5 | 88 | { |
8e464522 | 89 | return cpuset_node_allowed(zone_to_nid(z), gfp_mask); |
002f2906 VB |
90 | } |
91 | ||
92 | static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) | |
93 | { | |
94 | if (cpusets_enabled()) | |
95 | return __cpuset_zone_allowed(z, gfp_mask); | |
96 | return true; | |
202f72d5 PJ |
97 | } |
98 | ||
bbe373f2 DR |
99 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
100 | const struct task_struct *tsk2); | |
3e0d98b9 PJ |
101 | |
102 | #define cpuset_memory_pressure_bump() \ | |
103 | do { \ | |
104 | if (cpuset_memory_pressure_enabled) \ | |
105 | __cpuset_memory_pressure_bump(); \ | |
106 | } while (0) | |
107 | extern int cpuset_memory_pressure_enabled; | |
108 | extern void __cpuset_memory_pressure_bump(void); | |
109 | ||
df5f8314 EB |
110 | extern void cpuset_task_status_allowed(struct seq_file *m, |
111 | struct task_struct *task); | |
52de4779 ZL |
112 | extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, |
113 | struct pid *pid, struct task_struct *tsk); | |
1da177e4 | 114 | |
825a46af | 115 | extern int cpuset_mem_spread_node(void); |
6adef3eb | 116 | extern int cpuset_slab_spread_node(void); |
825a46af PJ |
117 | |
118 | static inline int cpuset_do_page_mem_spread(void) | |
119 | { | |
2ad654bc | 120 | return task_spread_page(current); |
825a46af PJ |
121 | } |
122 | ||
77ef80c6 | 123 | extern bool current_cpuset_is_being_rebound(void); |
8793d854 | 124 | |
e761b772 MK |
125 | extern void rebuild_sched_domains(void); |
126 | ||
da39da3a | 127 | extern void cpuset_print_current_mems_allowed(void); |
75aa1994 | 128 | |
c0ff7453 | 129 | /* |
d26914d1 MG |
130 | * read_mems_allowed_begin is required when making decisions involving |
131 | * mems_allowed such as during page allocation. mems_allowed can be updated in | |
132 | * parallel and depending on the new value an operation can fail potentially | |
133 | * causing process failure. A retry loop with read_mems_allowed_begin and | |
134 | * read_mems_allowed_retry prevents these artificial failures. | |
c0ff7453 | 135 | */ |
d26914d1 | 136 | static inline unsigned int read_mems_allowed_begin(void) |
c0ff7453 | 137 | { |
89affbf5 | 138 | if (!static_branch_unlikely(&cpusets_pre_enable_key)) |
46e700ab MG |
139 | return 0; |
140 | ||
cc9a6c87 | 141 | return read_seqcount_begin(¤t->mems_allowed_seq); |
c0ff7453 MX |
142 | } |
143 | ||
cc9a6c87 | 144 | /* |
d26914d1 MG |
145 | * If this returns true, the operation that took place after |
146 | * read_mems_allowed_begin may have failed artificially due to a concurrent | |
147 | * update of mems_allowed. It is up to the caller to retry the operation if | |
cc9a6c87 MG |
148 | * appropriate. |
149 | */ | |
d26914d1 | 150 | static inline bool read_mems_allowed_retry(unsigned int seq) |
c0ff7453 | 151 | { |
89affbf5 | 152 | if (!static_branch_unlikely(&cpusets_enabled_key)) |
46e700ab MG |
153 | return false; |
154 | ||
d26914d1 | 155 | return read_seqcount_retry(¤t->mems_allowed_seq, seq); |
c0ff7453 MX |
156 | } |
157 | ||
58568d2a MX |
158 | static inline void set_mems_allowed(nodemask_t nodemask) |
159 | { | |
db751fe3 JS |
160 | unsigned long flags; |
161 | ||
c0ff7453 | 162 | task_lock(current); |
db751fe3 | 163 | local_irq_save(flags); |
cc9a6c87 | 164 | write_seqcount_begin(¤t->mems_allowed_seq); |
58568d2a | 165 | current->mems_allowed = nodemask; |
cc9a6c87 | 166 | write_seqcount_end(¤t->mems_allowed_seq); |
db751fe3 | 167 | local_irq_restore(flags); |
c0ff7453 | 168 | task_unlock(current); |
58568d2a MX |
169 | } |
170 | ||
1da177e4 LT |
171 | #else /* !CONFIG_CPUSETS */ |
172 | ||
664eedde MG |
173 | static inline bool cpusets_enabled(void) { return false; } |
174 | ||
8ca1b5a4 FT |
175 | static inline bool cpusets_insane_config(void) { return false; } |
176 | ||
1da177e4 LT |
177 | static inline int cpuset_init(void) { return 0; } |
178 | static inline void cpuset_init_smp(void) {} | |
1da177e4 | 179 | |
50e76632 PZ |
180 | static inline void cpuset_force_rebuild(void) { } |
181 | ||
30e03acd | 182 | static inline void cpuset_update_active_cpus(void) |
3a101d05 TH |
183 | { |
184 | partition_sched_domains(1, NULL, NULL); | |
185 | } | |
186 | ||
6c24849f JL |
187 | static inline void inc_dl_tasks_cs(struct task_struct *task) { } |
188 | static inline void dec_dl_tasks_cs(struct task_struct *task) { } | |
111cd11b JL |
189 | static inline void cpuset_lock(void) { } |
190 | static inline void cpuset_unlock(void) { } | |
710da3c8 | 191 | |
6af866af LZ |
192 | static inline void cpuset_cpus_allowed(struct task_struct *p, |
193 | struct cpumask *mask) | |
1da177e4 | 194 | { |
431c69fa | 195 | cpumask_copy(mask, task_cpu_possible_mask(p)); |
1da177e4 LT |
196 | } |
197 | ||
97c0054d | 198 | static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p) |
9084bb82 | 199 | { |
97c0054d | 200 | return false; |
9084bb82 ON |
201 | } |
202 | ||
3232e7aa WL |
203 | static inline bool cpuset_cpu_is_isolated(int cpu) |
204 | { | |
205 | return false; | |
206 | } | |
207 | ||
909d75a3 PJ |
208 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) |
209 | { | |
210 | return node_possible_map; | |
211 | } | |
212 | ||
38d7bee9 | 213 | #define cpuset_current_mems_allowed (node_states[N_MEMORY]) |
1da177e4 | 214 | static inline void cpuset_init_current_mems_allowed(void) {} |
1da177e4 | 215 | |
19770b32 | 216 | static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
1da177e4 LT |
217 | { |
218 | return 1; | |
219 | } | |
220 | ||
002f2906 | 221 | static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) |
1da177e4 | 222 | { |
002f2906 VB |
223 | return true; |
224 | } | |
225 | ||
226 | static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) | |
227 | { | |
228 | return true; | |
1da177e4 LT |
229 | } |
230 | ||
bbe373f2 DR |
231 | static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
232 | const struct task_struct *tsk2) | |
ef08e3b4 PJ |
233 | { |
234 | return 1; | |
235 | } | |
236 | ||
3e0d98b9 PJ |
237 | static inline void cpuset_memory_pressure_bump(void) {} |
238 | ||
df5f8314 EB |
239 | static inline void cpuset_task_status_allowed(struct seq_file *m, |
240 | struct task_struct *task) | |
1da177e4 | 241 | { |
1da177e4 LT |
242 | } |
243 | ||
825a46af PJ |
244 | static inline int cpuset_mem_spread_node(void) |
245 | { | |
246 | return 0; | |
247 | } | |
248 | ||
6adef3eb JS |
249 | static inline int cpuset_slab_spread_node(void) |
250 | { | |
251 | return 0; | |
252 | } | |
253 | ||
825a46af PJ |
254 | static inline int cpuset_do_page_mem_spread(void) |
255 | { | |
256 | return 0; | |
257 | } | |
258 | ||
77ef80c6 | 259 | static inline bool current_cpuset_is_being_rebound(void) |
8793d854 | 260 | { |
77ef80c6 | 261 | return false; |
8793d854 PM |
262 | } |
263 | ||
e761b772 MK |
264 | static inline void rebuild_sched_domains(void) |
265 | { | |
dfb512ec | 266 | partition_sched_domains(1, NULL, NULL); |
e761b772 MK |
267 | } |
268 | ||
da39da3a | 269 | static inline void cpuset_print_current_mems_allowed(void) |
75aa1994 DR |
270 | { |
271 | } | |
272 | ||
58568d2a MX |
273 | static inline void set_mems_allowed(nodemask_t nodemask) |
274 | { | |
275 | } | |
276 | ||
d26914d1 | 277 | static inline unsigned int read_mems_allowed_begin(void) |
c0ff7453 | 278 | { |
cc9a6c87 | 279 | return 0; |
c0ff7453 MX |
280 | } |
281 | ||
d26914d1 | 282 | static inline bool read_mems_allowed_retry(unsigned int seq) |
c0ff7453 | 283 | { |
d26914d1 | 284 | return false; |
c0ff7453 MX |
285 | } |
286 | ||
1da177e4 LT |
287 | #endif /* !CONFIG_CPUSETS */ |
288 | ||
289 | #endif /* _LINUX_CPUSET_H */ |