Merge tag 'pci-v6.16-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci
[linux-2.6-block.git] / include / linux / cpuset.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_CPUSET_H
3#define _LINUX_CPUSET_H
4/*
5 * cpuset interface
6 *
7 * Copyright (C) 2003 BULL SA
825a46af 8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
1da177e4
LT
9 *
10 */
11
12#include <linux/sched.h>
105ab3d8 13#include <linux/sched/topology.h>
f719ff9b 14#include <linux/sched/task.h>
1da177e4
LT
15#include <linux/cpumask.h>
16#include <linux/nodemask.h>
a1bc5a4e 17#include <linux/mm.h>
d4b96fb9 18#include <linux/mmu_context.h>
664eedde 19#include <linux/jump_label.h>
1da177e4
LT
20
21#ifdef CONFIG_CPUSETS
22
89affbf5
DZ
23/*
24 * Static branch rewrites can happen in an arbitrary order for a given
25 * key. In code paths where we need to loop with read_mems_allowed_begin() and
26 * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
27 * to ensure that begin() always gets rewritten before retry() in the
28 * disabled -> enabled transition. If not, then if local irqs are disabled
29 * around the loop, we can deadlock since retry() would always be
30 * comparing the latest value of the mems_allowed seqcount against 0 as
31 * begin() still would see cpusets_enabled() as false. The enabled -> disabled
32 * transition should happen in reverse order for the same reasons (want to stop
33 * looking at real value of mems_allowed.sequence in retry() first).
34 */
35extern struct static_key_false cpusets_pre_enable_key;
002f2906 36extern struct static_key_false cpusets_enabled_key;
8ca1b5a4
FT
37extern struct static_key_false cpusets_insane_config_key;
38
664eedde
MG
39static inline bool cpusets_enabled(void)
40{
002f2906 41 return static_branch_unlikely(&cpusets_enabled_key);
664eedde
MG
42}
43
664eedde
MG
44static inline void cpuset_inc(void)
45{
d74b27d6
JL
46 static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
47 static_branch_inc_cpuslocked(&cpusets_enabled_key);
664eedde
MG
48}
49
50static inline void cpuset_dec(void)
51{
d74b27d6
JL
52 static_branch_dec_cpuslocked(&cpusets_enabled_key);
53 static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
664eedde 54}
202f72d5 55
8ca1b5a4
FT
56/*
57 * This will get enabled whenever a cpuset configuration is considered
58 * unsupportable in general. E.g. movable only node which cannot satisfy
59 * any non movable allocations (see update_nodemask). Page allocator
60 * needs to make additional checks for those configurations and this
61 * check is meant to guard those checks without any overhead for sane
62 * configurations.
63 */
64static inline bool cpusets_insane_config(void)
65{
66 return static_branch_unlikely(&cpusets_insane_config_key);
67}
68
1da177e4
LT
69extern int cpuset_init(void);
70extern void cpuset_init_smp(void);
50e76632 71extern void cpuset_force_rebuild(void);
30e03acd 72extern void cpuset_update_active_cpus(void);
6c24849f
JL
73extern void inc_dl_tasks_cs(struct task_struct *task);
74extern void dec_dl_tasks_cs(struct task_struct *task);
111cd11b
JL
75extern void cpuset_lock(void);
76extern void cpuset_unlock(void);
6af866af 77extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
97c0054d 78extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
3232e7aa 79extern bool cpuset_cpu_is_isolated(int cpu);
909d75a3 80extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
9276b1bc 81#define cpuset_current_mems_allowed (current->mems_allowed)
1da177e4 82void cpuset_init_current_mems_allowed(void);
19770b32 83int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
202f72d5 84
8adce085 85extern bool cpuset_current_node_allowed(int node, gfp_t gfp_mask);
02a0e53d 86
002f2906 87static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
202f72d5 88{
8adce085 89 return cpuset_current_node_allowed(zone_to_nid(z), gfp_mask);
002f2906
VB
90}
91
92static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
93{
94 if (cpusets_enabled())
95 return __cpuset_zone_allowed(z, gfp_mask);
96 return true;
202f72d5
PJ
97}
98
bbe373f2
DR
99extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
100 const struct task_struct *tsk2);
3e0d98b9 101
1abab1ba 102#ifdef CONFIG_CPUSETS_V1
3e0d98b9
PJ
103#define cpuset_memory_pressure_bump() \
104 do { \
105 if (cpuset_memory_pressure_enabled) \
106 __cpuset_memory_pressure_bump(); \
107 } while (0)
108extern int cpuset_memory_pressure_enabled;
109extern void __cpuset_memory_pressure_bump(void);
1abab1ba
CR
110#else
111static inline void cpuset_memory_pressure_bump(void) { }
112#endif
3e0d98b9 113
df5f8314
EB
114extern void cpuset_task_status_allowed(struct seq_file *m,
115 struct task_struct *task);
52de4779
ZL
116extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
117 struct pid *pid, struct task_struct *tsk);
1da177e4 118
825a46af
PJ
119extern int cpuset_mem_spread_node(void);
120
121static inline int cpuset_do_page_mem_spread(void)
122{
2ad654bc 123 return task_spread_page(current);
825a46af
PJ
124}
125
77ef80c6 126extern bool current_cpuset_is_being_rebound(void);
8793d854 127
34929a07 128extern void dl_rebuild_rd_accounting(void);
e761b772
MK
129extern void rebuild_sched_domains(void);
130
da39da3a 131extern void cpuset_print_current_mems_allowed(void);
2ff899e3 132extern void cpuset_reset_sched_domains(void);
75aa1994 133
c0ff7453 134/*
d26914d1
MG
135 * read_mems_allowed_begin is required when making decisions involving
136 * mems_allowed such as during page allocation. mems_allowed can be updated in
137 * parallel and depending on the new value an operation can fail potentially
138 * causing process failure. A retry loop with read_mems_allowed_begin and
139 * read_mems_allowed_retry prevents these artificial failures.
c0ff7453 140 */
d26914d1 141static inline unsigned int read_mems_allowed_begin(void)
c0ff7453 142{
89affbf5 143 if (!static_branch_unlikely(&cpusets_pre_enable_key))
46e700ab
MG
144 return 0;
145
cc9a6c87 146 return read_seqcount_begin(&current->mems_allowed_seq);
c0ff7453
MX
147}
148
cc9a6c87 149/*
d26914d1
MG
150 * If this returns true, the operation that took place after
151 * read_mems_allowed_begin may have failed artificially due to a concurrent
152 * update of mems_allowed. It is up to the caller to retry the operation if
cc9a6c87
MG
153 * appropriate.
154 */
d26914d1 155static inline bool read_mems_allowed_retry(unsigned int seq)
c0ff7453 156{
89affbf5 157 if (!static_branch_unlikely(&cpusets_enabled_key))
46e700ab
MG
158 return false;
159
d26914d1 160 return read_seqcount_retry(&current->mems_allowed_seq, seq);
c0ff7453
MX
161}
162
58568d2a
MX
163static inline void set_mems_allowed(nodemask_t nodemask)
164{
db751fe3
JS
165 unsigned long flags;
166
c0ff7453 167 task_lock(current);
db751fe3 168 local_irq_save(flags);
cc9a6c87 169 write_seqcount_begin(&current->mems_allowed_seq);
58568d2a 170 current->mems_allowed = nodemask;
cc9a6c87 171 write_seqcount_end(&current->mems_allowed_seq);
db751fe3 172 local_irq_restore(flags);
c0ff7453 173 task_unlock(current);
58568d2a
MX
174}
175
7d709f49 176extern bool cpuset_node_allowed(struct cgroup *cgroup, int nid);
1da177e4
LT
177#else /* !CONFIG_CPUSETS */
178
664eedde
MG
179static inline bool cpusets_enabled(void) { return false; }
180
8ca1b5a4
FT
181static inline bool cpusets_insane_config(void) { return false; }
182
1da177e4
LT
183static inline int cpuset_init(void) { return 0; }
184static inline void cpuset_init_smp(void) {}
1da177e4 185
50e76632
PZ
186static inline void cpuset_force_rebuild(void) { }
187
30e03acd 188static inline void cpuset_update_active_cpus(void)
3a101d05
TH
189{
190 partition_sched_domains(1, NULL, NULL);
191}
192
6c24849f
JL
193static inline void inc_dl_tasks_cs(struct task_struct *task) { }
194static inline void dec_dl_tasks_cs(struct task_struct *task) { }
111cd11b
JL
195static inline void cpuset_lock(void) { }
196static inline void cpuset_unlock(void) { }
710da3c8 197
6af866af
LZ
198static inline void cpuset_cpus_allowed(struct task_struct *p,
199 struct cpumask *mask)
1da177e4 200{
431c69fa 201 cpumask_copy(mask, task_cpu_possible_mask(p));
1da177e4
LT
202}
203
97c0054d 204static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
9084bb82 205{
97c0054d 206 return false;
9084bb82
ON
207}
208
3232e7aa
WL
209static inline bool cpuset_cpu_is_isolated(int cpu)
210{
211 return false;
212}
213
909d75a3
PJ
214static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
215{
216 return node_possible_map;
217}
218
38d7bee9 219#define cpuset_current_mems_allowed (node_states[N_MEMORY])
1da177e4 220static inline void cpuset_init_current_mems_allowed(void) {}
1da177e4 221
19770b32 222static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
1da177e4
LT
223{
224 return 1;
225}
226
002f2906 227static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
1da177e4 228{
002f2906
VB
229 return true;
230}
231
232static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
233{
234 return true;
1da177e4
LT
235}
236
bbe373f2
DR
237static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
238 const struct task_struct *tsk2)
ef08e3b4
PJ
239{
240 return 1;
241}
242
3e0d98b9
PJ
243static inline void cpuset_memory_pressure_bump(void) {}
244
df5f8314
EB
245static inline void cpuset_task_status_allowed(struct seq_file *m,
246 struct task_struct *task)
1da177e4 247{
1da177e4
LT
248}
249
825a46af
PJ
250static inline int cpuset_mem_spread_node(void)
251{
252 return 0;
253}
254
255static inline int cpuset_do_page_mem_spread(void)
256{
257 return 0;
258}
259
77ef80c6 260static inline bool current_cpuset_is_being_rebound(void)
8793d854 261{
77ef80c6 262 return false;
8793d854
PM
263}
264
34929a07
JL
265static inline void dl_rebuild_rd_accounting(void)
266{
267}
268
e761b772
MK
269static inline void rebuild_sched_domains(void)
270{
dfb512ec 271 partition_sched_domains(1, NULL, NULL);
e761b772
MK
272}
273
2ff899e3
JL
274static inline void cpuset_reset_sched_domains(void)
275{
276 partition_sched_domains(1, NULL, NULL);
277}
278
da39da3a 279static inline void cpuset_print_current_mems_allowed(void)
75aa1994
DR
280{
281}
282
58568d2a
MX
283static inline void set_mems_allowed(nodemask_t nodemask)
284{
285}
286
d26914d1 287static inline unsigned int read_mems_allowed_begin(void)
c0ff7453 288{
cc9a6c87 289 return 0;
c0ff7453
MX
290}
291
d26914d1 292static inline bool read_mems_allowed_retry(unsigned int seq)
c0ff7453 293{
d26914d1 294 return false;
c0ff7453
MX
295}
296
7d709f49
GP
297static inline bool cpuset_node_allowed(struct cgroup *cgroup, int nid)
298{
299 return true;
300}
1da177e4
LT
301#endif /* !CONFIG_CPUSETS */
302
303#endif /* _LINUX_CPUSET_H */