Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_CPUSET_H |
3 | #define _LINUX_CPUSET_H | |
4 | /* | |
5 | * cpuset interface | |
6 | * | |
7 | * Copyright (C) 2003 BULL SA | |
825a46af | 8 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. |
1da177e4 LT |
9 | * |
10 | */ | |
11 | ||
12 | #include <linux/sched.h> | |
105ab3d8 | 13 | #include <linux/sched/topology.h> |
f719ff9b | 14 | #include <linux/sched/task.h> |
1da177e4 LT |
15 | #include <linux/cpumask.h> |
16 | #include <linux/nodemask.h> | |
a1bc5a4e | 17 | #include <linux/mm.h> |
d4b96fb9 | 18 | #include <linux/mmu_context.h> |
664eedde | 19 | #include <linux/jump_label.h> |
1da177e4 LT |
20 | |
21 | #ifdef CONFIG_CPUSETS | |
22 | ||
89affbf5 DZ |
23 | /* |
24 | * Static branch rewrites can happen in an arbitrary order for a given | |
25 | * key. In code paths where we need to loop with read_mems_allowed_begin() and | |
26 | * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need | |
27 | * to ensure that begin() always gets rewritten before retry() in the | |
28 | * disabled -> enabled transition. If not, then if local irqs are disabled | |
29 | * around the loop, we can deadlock since retry() would always be | |
30 | * comparing the latest value of the mems_allowed seqcount against 0 as | |
31 | * begin() still would see cpusets_enabled() as false. The enabled -> disabled | |
32 | * transition should happen in reverse order for the same reasons (want to stop | |
33 | * looking at real value of mems_allowed.sequence in retry() first). | |
34 | */ | |
35 | extern struct static_key_false cpusets_pre_enable_key; | |
002f2906 | 36 | extern struct static_key_false cpusets_enabled_key; |
664eedde MG |
37 | static inline bool cpusets_enabled(void) |
38 | { | |
002f2906 | 39 | return static_branch_unlikely(&cpusets_enabled_key); |
664eedde MG |
40 | } |
41 | ||
664eedde MG |
42 | static inline void cpuset_inc(void) |
43 | { | |
d74b27d6 JL |
44 | static_branch_inc_cpuslocked(&cpusets_pre_enable_key); |
45 | static_branch_inc_cpuslocked(&cpusets_enabled_key); | |
664eedde MG |
46 | } |
47 | ||
48 | static inline void cpuset_dec(void) | |
49 | { | |
d74b27d6 JL |
50 | static_branch_dec_cpuslocked(&cpusets_enabled_key); |
51 | static_branch_dec_cpuslocked(&cpusets_pre_enable_key); | |
664eedde | 52 | } |
202f72d5 | 53 | |
1da177e4 LT |
54 | extern int cpuset_init(void); |
55 | extern void cpuset_init_smp(void); | |
50e76632 | 56 | extern void cpuset_force_rebuild(void); |
30e03acd | 57 | extern void cpuset_update_active_cpus(void); |
50e76632 | 58 | extern void cpuset_wait_for_hotplug(void); |
710da3c8 JL |
59 | extern void cpuset_read_lock(void); |
60 | extern void cpuset_read_unlock(void); | |
6af866af | 61 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); |
2baab4e9 | 62 | extern void cpuset_cpus_allowed_fallback(struct task_struct *p); |
909d75a3 | 63 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
9276b1bc | 64 | #define cpuset_current_mems_allowed (current->mems_allowed) |
1da177e4 | 65 | void cpuset_init_current_mems_allowed(void); |
19770b32 | 66 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); |
202f72d5 | 67 | |
002f2906 | 68 | extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask); |
02a0e53d | 69 | |
002f2906 | 70 | static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) |
02a0e53d | 71 | { |
002f2906 VB |
72 | if (cpusets_enabled()) |
73 | return __cpuset_node_allowed(node, gfp_mask); | |
74 | return true; | |
02a0e53d PJ |
75 | } |
76 | ||
002f2906 | 77 | static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) |
202f72d5 | 78 | { |
002f2906 VB |
79 | return __cpuset_node_allowed(zone_to_nid(z), gfp_mask); |
80 | } | |
81 | ||
82 | static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) | |
83 | { | |
84 | if (cpusets_enabled()) | |
85 | return __cpuset_zone_allowed(z, gfp_mask); | |
86 | return true; | |
202f72d5 PJ |
87 | } |
88 | ||
bbe373f2 DR |
89 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
90 | const struct task_struct *tsk2); | |
3e0d98b9 PJ |
91 | |
92 | #define cpuset_memory_pressure_bump() \ | |
93 | do { \ | |
94 | if (cpuset_memory_pressure_enabled) \ | |
95 | __cpuset_memory_pressure_bump(); \ | |
96 | } while (0) | |
97 | extern int cpuset_memory_pressure_enabled; | |
98 | extern void __cpuset_memory_pressure_bump(void); | |
99 | ||
df5f8314 EB |
100 | extern void cpuset_task_status_allowed(struct seq_file *m, |
101 | struct task_struct *task); | |
52de4779 ZL |
102 | extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, |
103 | struct pid *pid, struct task_struct *tsk); | |
1da177e4 | 104 | |
825a46af | 105 | extern int cpuset_mem_spread_node(void); |
6adef3eb | 106 | extern int cpuset_slab_spread_node(void); |
825a46af PJ |
107 | |
108 | static inline int cpuset_do_page_mem_spread(void) | |
109 | { | |
2ad654bc | 110 | return task_spread_page(current); |
825a46af PJ |
111 | } |
112 | ||
113 | static inline int cpuset_do_slab_mem_spread(void) | |
114 | { | |
2ad654bc | 115 | return task_spread_slab(current); |
825a46af PJ |
116 | } |
117 | ||
77ef80c6 | 118 | extern bool current_cpuset_is_being_rebound(void); |
8793d854 | 119 | |
e761b772 MK |
120 | extern void rebuild_sched_domains(void); |
121 | ||
da39da3a | 122 | extern void cpuset_print_current_mems_allowed(void); |
75aa1994 | 123 | |
c0ff7453 | 124 | /* |
d26914d1 MG |
125 | * read_mems_allowed_begin is required when making decisions involving |
126 | * mems_allowed such as during page allocation. mems_allowed can be updated in | |
127 | * parallel and depending on the new value an operation can fail potentially | |
128 | * causing process failure. A retry loop with read_mems_allowed_begin and | |
129 | * read_mems_allowed_retry prevents these artificial failures. | |
c0ff7453 | 130 | */ |
d26914d1 | 131 | static inline unsigned int read_mems_allowed_begin(void) |
c0ff7453 | 132 | { |
89affbf5 | 133 | if (!static_branch_unlikely(&cpusets_pre_enable_key)) |
46e700ab MG |
134 | return 0; |
135 | ||
cc9a6c87 | 136 | return read_seqcount_begin(¤t->mems_allowed_seq); |
c0ff7453 MX |
137 | } |
138 | ||
cc9a6c87 | 139 | /* |
d26914d1 MG |
140 | * If this returns true, the operation that took place after |
141 | * read_mems_allowed_begin may have failed artificially due to a concurrent | |
142 | * update of mems_allowed. It is up to the caller to retry the operation if | |
cc9a6c87 MG |
143 | * appropriate. |
144 | */ | |
d26914d1 | 145 | static inline bool read_mems_allowed_retry(unsigned int seq) |
c0ff7453 | 146 | { |
89affbf5 | 147 | if (!static_branch_unlikely(&cpusets_enabled_key)) |
46e700ab MG |
148 | return false; |
149 | ||
d26914d1 | 150 | return read_seqcount_retry(¤t->mems_allowed_seq, seq); |
c0ff7453 MX |
151 | } |
152 | ||
58568d2a MX |
153 | static inline void set_mems_allowed(nodemask_t nodemask) |
154 | { | |
db751fe3 JS |
155 | unsigned long flags; |
156 | ||
c0ff7453 | 157 | task_lock(current); |
db751fe3 | 158 | local_irq_save(flags); |
cc9a6c87 | 159 | write_seqcount_begin(¤t->mems_allowed_seq); |
58568d2a | 160 | current->mems_allowed = nodemask; |
cc9a6c87 | 161 | write_seqcount_end(¤t->mems_allowed_seq); |
db751fe3 | 162 | local_irq_restore(flags); |
c0ff7453 | 163 | task_unlock(current); |
58568d2a MX |
164 | } |
165 | ||
1da177e4 LT |
166 | #else /* !CONFIG_CPUSETS */ |
167 | ||
664eedde MG |
168 | static inline bool cpusets_enabled(void) { return false; } |
169 | ||
1da177e4 LT |
170 | static inline int cpuset_init(void) { return 0; } |
171 | static inline void cpuset_init_smp(void) {} | |
1da177e4 | 172 | |
50e76632 PZ |
173 | static inline void cpuset_force_rebuild(void) { } |
174 | ||
30e03acd | 175 | static inline void cpuset_update_active_cpus(void) |
3a101d05 TH |
176 | { |
177 | partition_sched_domains(1, NULL, NULL); | |
178 | } | |
179 | ||
50e76632 PZ |
180 | static inline void cpuset_wait_for_hotplug(void) { } |
181 | ||
710da3c8 JL |
182 | static inline void cpuset_read_lock(void) { } |
183 | static inline void cpuset_read_unlock(void) { } | |
184 | ||
6af866af LZ |
185 | static inline void cpuset_cpus_allowed(struct task_struct *p, |
186 | struct cpumask *mask) | |
1da177e4 | 187 | { |
aa85ea5b | 188 | cpumask_copy(mask, cpu_possible_mask); |
1da177e4 LT |
189 | } |
190 | ||
2baab4e9 | 191 | static inline void cpuset_cpus_allowed_fallback(struct task_struct *p) |
9084bb82 | 192 | { |
9084bb82 ON |
193 | } |
194 | ||
909d75a3 PJ |
195 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) |
196 | { | |
197 | return node_possible_map; | |
198 | } | |
199 | ||
38d7bee9 | 200 | #define cpuset_current_mems_allowed (node_states[N_MEMORY]) |
1da177e4 | 201 | static inline void cpuset_init_current_mems_allowed(void) {} |
1da177e4 | 202 | |
19770b32 | 203 | static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
1da177e4 LT |
204 | { |
205 | return 1; | |
206 | } | |
207 | ||
002f2906 | 208 | static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) |
02a0e53d | 209 | { |
002f2906 | 210 | return true; |
02a0e53d PJ |
211 | } |
212 | ||
002f2906 | 213 | static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) |
1da177e4 | 214 | { |
002f2906 VB |
215 | return true; |
216 | } | |
217 | ||
218 | static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) | |
219 | { | |
220 | return true; | |
1da177e4 LT |
221 | } |
222 | ||
bbe373f2 DR |
223 | static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
224 | const struct task_struct *tsk2) | |
ef08e3b4 PJ |
225 | { |
226 | return 1; | |
227 | } | |
228 | ||
3e0d98b9 PJ |
229 | static inline void cpuset_memory_pressure_bump(void) {} |
230 | ||
df5f8314 EB |
231 | static inline void cpuset_task_status_allowed(struct seq_file *m, |
232 | struct task_struct *task) | |
1da177e4 | 233 | { |
1da177e4 LT |
234 | } |
235 | ||
825a46af PJ |
236 | static inline int cpuset_mem_spread_node(void) |
237 | { | |
238 | return 0; | |
239 | } | |
240 | ||
6adef3eb JS |
241 | static inline int cpuset_slab_spread_node(void) |
242 | { | |
243 | return 0; | |
244 | } | |
245 | ||
825a46af PJ |
246 | static inline int cpuset_do_page_mem_spread(void) |
247 | { | |
248 | return 0; | |
249 | } | |
250 | ||
251 | static inline int cpuset_do_slab_mem_spread(void) | |
252 | { | |
253 | return 0; | |
254 | } | |
255 | ||
77ef80c6 | 256 | static inline bool current_cpuset_is_being_rebound(void) |
8793d854 | 257 | { |
77ef80c6 | 258 | return false; |
8793d854 PM |
259 | } |
260 | ||
e761b772 MK |
261 | static inline void rebuild_sched_domains(void) |
262 | { | |
dfb512ec | 263 | partition_sched_domains(1, NULL, NULL); |
e761b772 MK |
264 | } |
265 | ||
da39da3a | 266 | static inline void cpuset_print_current_mems_allowed(void) |
75aa1994 DR |
267 | { |
268 | } | |
269 | ||
58568d2a MX |
270 | static inline void set_mems_allowed(nodemask_t nodemask) |
271 | { | |
272 | } | |
273 | ||
d26914d1 | 274 | static inline unsigned int read_mems_allowed_begin(void) |
c0ff7453 | 275 | { |
cc9a6c87 | 276 | return 0; |
c0ff7453 MX |
277 | } |
278 | ||
d26914d1 | 279 | static inline bool read_mems_allowed_retry(unsigned int seq) |
c0ff7453 | 280 | { |
d26914d1 | 281 | return false; |
c0ff7453 MX |
282 | } |
283 | ||
1da177e4 LT |
284 | #endif /* !CONFIG_CPUSETS */ |
285 | ||
286 | #endif /* _LINUX_CPUSET_H */ |