Merge tag 'sparc-for-6.10-tag1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / include / linux / cpuset.h
... / ...
CommitLineData
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_CPUSET_H
3#define _LINUX_CPUSET_H
4/*
5 * cpuset interface
6 *
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 *
10 */
11
12#include <linux/sched.h>
13#include <linux/sched/topology.h>
14#include <linux/sched/task.h>
15#include <linux/cpumask.h>
16#include <linux/nodemask.h>
17#include <linux/mm.h>
18#include <linux/mmu_context.h>
19#include <linux/jump_label.h>
20
21#ifdef CONFIG_CPUSETS
22
23/*
24 * Static branch rewrites can happen in an arbitrary order for a given
25 * key. In code paths where we need to loop with read_mems_allowed_begin() and
26 * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
27 * to ensure that begin() always gets rewritten before retry() in the
28 * disabled -> enabled transition. If not, then if local irqs are disabled
29 * around the loop, we can deadlock since retry() would always be
30 * comparing the latest value of the mems_allowed seqcount against 0 as
31 * begin() still would see cpusets_enabled() as false. The enabled -> disabled
32 * transition should happen in reverse order for the same reasons (want to stop
33 * looking at real value of mems_allowed.sequence in retry() first).
34 */
35extern struct static_key_false cpusets_pre_enable_key;
36extern struct static_key_false cpusets_enabled_key;
37extern struct static_key_false cpusets_insane_config_key;
38
39static inline bool cpusets_enabled(void)
40{
41 return static_branch_unlikely(&cpusets_enabled_key);
42}
43
44static inline void cpuset_inc(void)
45{
46 static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
47 static_branch_inc_cpuslocked(&cpusets_enabled_key);
48}
49
50static inline void cpuset_dec(void)
51{
52 static_branch_dec_cpuslocked(&cpusets_enabled_key);
53 static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
54}
55
56/*
57 * This will get enabled whenever a cpuset configuration is considered
58 * unsupportable in general. E.g. movable only node which cannot satisfy
59 * any non movable allocations (see update_nodemask). Page allocator
60 * needs to make additional checks for those configurations and this
61 * check is meant to guard those checks without any overhead for sane
62 * configurations.
63 */
64static inline bool cpusets_insane_config(void)
65{
66 return static_branch_unlikely(&cpusets_insane_config_key);
67}
68
69extern int cpuset_init(void);
70extern void cpuset_init_smp(void);
71extern void cpuset_force_rebuild(void);
72extern void cpuset_update_active_cpus(void);
73extern void inc_dl_tasks_cs(struct task_struct *task);
74extern void dec_dl_tasks_cs(struct task_struct *task);
75extern void cpuset_lock(void);
76extern void cpuset_unlock(void);
77extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
78extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
79extern bool cpuset_cpu_is_isolated(int cpu);
80extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
81#define cpuset_current_mems_allowed (current->mems_allowed)
82void cpuset_init_current_mems_allowed(void);
83int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
84
85extern bool cpuset_node_allowed(int node, gfp_t gfp_mask);
86
87static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
88{
89 return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
90}
91
92static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
93{
94 if (cpusets_enabled())
95 return __cpuset_zone_allowed(z, gfp_mask);
96 return true;
97}
98
99extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
100 const struct task_struct *tsk2);
101
102#define cpuset_memory_pressure_bump() \
103 do { \
104 if (cpuset_memory_pressure_enabled) \
105 __cpuset_memory_pressure_bump(); \
106 } while (0)
107extern int cpuset_memory_pressure_enabled;
108extern void __cpuset_memory_pressure_bump(void);
109
110extern void cpuset_task_status_allowed(struct seq_file *m,
111 struct task_struct *task);
112extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
113 struct pid *pid, struct task_struct *tsk);
114
115extern int cpuset_mem_spread_node(void);
116extern int cpuset_slab_spread_node(void);
117
118static inline int cpuset_do_page_mem_spread(void)
119{
120 return task_spread_page(current);
121}
122
123extern bool current_cpuset_is_being_rebound(void);
124
125extern void rebuild_sched_domains(void);
126
127extern void cpuset_print_current_mems_allowed(void);
128
129/*
130 * read_mems_allowed_begin is required when making decisions involving
131 * mems_allowed such as during page allocation. mems_allowed can be updated in
132 * parallel and depending on the new value an operation can fail potentially
133 * causing process failure. A retry loop with read_mems_allowed_begin and
134 * read_mems_allowed_retry prevents these artificial failures.
135 */
136static inline unsigned int read_mems_allowed_begin(void)
137{
138 if (!static_branch_unlikely(&cpusets_pre_enable_key))
139 return 0;
140
141 return read_seqcount_begin(&current->mems_allowed_seq);
142}
143
144/*
145 * If this returns true, the operation that took place after
146 * read_mems_allowed_begin may have failed artificially due to a concurrent
147 * update of mems_allowed. It is up to the caller to retry the operation if
148 * appropriate.
149 */
150static inline bool read_mems_allowed_retry(unsigned int seq)
151{
152 if (!static_branch_unlikely(&cpusets_enabled_key))
153 return false;
154
155 return read_seqcount_retry(&current->mems_allowed_seq, seq);
156}
157
158static inline void set_mems_allowed(nodemask_t nodemask)
159{
160 unsigned long flags;
161
162 task_lock(current);
163 local_irq_save(flags);
164 write_seqcount_begin(&current->mems_allowed_seq);
165 current->mems_allowed = nodemask;
166 write_seqcount_end(&current->mems_allowed_seq);
167 local_irq_restore(flags);
168 task_unlock(current);
169}
170
171#else /* !CONFIG_CPUSETS */
172
173static inline bool cpusets_enabled(void) { return false; }
174
175static inline bool cpusets_insane_config(void) { return false; }
176
177static inline int cpuset_init(void) { return 0; }
178static inline void cpuset_init_smp(void) {}
179
180static inline void cpuset_force_rebuild(void) { }
181
182static inline void cpuset_update_active_cpus(void)
183{
184 partition_sched_domains(1, NULL, NULL);
185}
186
187static inline void inc_dl_tasks_cs(struct task_struct *task) { }
188static inline void dec_dl_tasks_cs(struct task_struct *task) { }
189static inline void cpuset_lock(void) { }
190static inline void cpuset_unlock(void) { }
191
192static inline void cpuset_cpus_allowed(struct task_struct *p,
193 struct cpumask *mask)
194{
195 cpumask_copy(mask, task_cpu_possible_mask(p));
196}
197
198static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
199{
200 return false;
201}
202
203static inline bool cpuset_cpu_is_isolated(int cpu)
204{
205 return false;
206}
207
208static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
209{
210 return node_possible_map;
211}
212
213#define cpuset_current_mems_allowed (node_states[N_MEMORY])
214static inline void cpuset_init_current_mems_allowed(void) {}
215
216static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
217{
218 return 1;
219}
220
221static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
222{
223 return true;
224}
225
226static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
227{
228 return true;
229}
230
231static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
232 const struct task_struct *tsk2)
233{
234 return 1;
235}
236
237static inline void cpuset_memory_pressure_bump(void) {}
238
239static inline void cpuset_task_status_allowed(struct seq_file *m,
240 struct task_struct *task)
241{
242}
243
244static inline int cpuset_mem_spread_node(void)
245{
246 return 0;
247}
248
249static inline int cpuset_slab_spread_node(void)
250{
251 return 0;
252}
253
254static inline int cpuset_do_page_mem_spread(void)
255{
256 return 0;
257}
258
259static inline bool current_cpuset_is_being_rebound(void)
260{
261 return false;
262}
263
264static inline void rebuild_sched_domains(void)
265{
266 partition_sched_domains(1, NULL, NULL);
267}
268
269static inline void cpuset_print_current_mems_allowed(void)
270{
271}
272
273static inline void set_mems_allowed(nodemask_t nodemask)
274{
275}
276
277static inline unsigned int read_mems_allowed_begin(void)
278{
279 return 0;
280}
281
282static inline bool read_mems_allowed_retry(unsigned int seq)
283{
284 return false;
285}
286
287#endif /* !CONFIG_CPUSETS */
288
289#endif /* _LINUX_CPUSET_H */