Linux 6.10-rc4
[linux-block.git] / include / linux / cpumask.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef __LINUX_CPUMASK_H
3#define __LINUX_CPUMASK_H
4
5/*
6 * Cpumasks provide a bitmap suitable for representing the
57f728d5 7 * set of CPUs in a system, one bit position per CPU number. In general,
6ba2ef7b 8 * only nr_cpu_ids (<= NR_CPUS) bits are valid.
1da177e4 9 */
dcee2280 10#include <linux/cleanup.h>
1da177e4
LT
11#include <linux/kernel.h>
12#include <linux/threads.h>
13#include <linux/bitmap.h>
0c09ab96 14#include <linux/atomic.h>
187f1882 15#include <linux/bug.h>
f0dd891d
YN
16#include <linux/gfp_types.h>
17#include <linux/numa.h>
1da177e4 18
cdfdef75 19/* Don't assign or return these: may not be this big! */
2d3854a3 20typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
1da177e4 21
ae7a47e7 22/**
6ba2ef7b
RR
23 * cpumask_bits - get the bits in a cpumask
24 * @maskp: the struct cpumask *
ae7a47e7 25 *
6ba2ef7b
RR
26 * You should only assume nr_cpu_ids bits of this mask are valid. This is
27 * a macro so it's const-correct.
ae7a47e7 28 */
6ba2ef7b 29#define cpumask_bits(maskp) ((maskp)->bits)
7ea931c9 30
f1bbc032
TH
31/**
32 * cpumask_pr_args - printf args to output a cpumask
33 * @maskp: cpumask to be printed
34 *
35 * Can be used to provide arguments for '%*pb[l]' when printing a cpumask.
36 */
37#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
38
6f9c07be
YN
39#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
40#define nr_cpu_ids ((unsigned int)NR_CPUS)
6ba2ef7b 41#else
9b130ad5 42extern unsigned int nr_cpu_ids;
6f9c07be 43#endif
38bef8e5
YN
44
45static inline void set_nr_cpu_ids(unsigned int nr)
46{
6f9c07be
YN
47#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
48 WARN_ON(nr != nr_cpu_ids);
49#else
38bef8e5 50 nr_cpu_ids = nr;
41df0d61 51#endif
6f9c07be 52}
41df0d61 53
596ff4a0
LT
54/*
55 * We have several different "preferred sizes" for the cpumask
56 * operations, depending on operation.
57 *
58 * For example, the bitmap scanning and operating operations have
59 * optimized routines that work for the single-word case, but only when
60 * the size is constant. So if NR_CPUS fits in one single word, we are
61 * better off using that small constant, in order to trigger the
62 * optimized bit finding. That is 'small_cpumask_size'.
63 *
64 * The clearing and copying operations will similarly perform better
65 * with a constant size, but we limit that size arbitrarily to four
66 * words. We call this 'large_cpumask_size'.
67 *
68 * Finally, some operations just want the exact limit, either because
69 * they set bits or just don't have any faster fixed-sized versions. We
80c16b2b 70 * call this just 'nr_cpumask_bits'.
596ff4a0
LT
71 *
72 * Note that these optional constants are always guaranteed to be at
73 * least as big as 'nr_cpu_ids' itself is, and all our cpumask
74 * allocations are at least that size (see cpumask_size()). The
75 * optimization comes from being able to potentially use a compile-time
76 * constant instead of a run-time generated exact number of CPUs.
77 */
78#if NR_CPUS <= BITS_PER_LONG
79 #define small_cpumask_bits ((unsigned int)NR_CPUS)
80 #define large_cpumask_bits ((unsigned int)NR_CPUS)
81#elif NR_CPUS <= 4*BITS_PER_LONG
82 #define small_cpumask_bits nr_cpu_ids
83 #define large_cpumask_bits ((unsigned int)NR_CPUS)
84#else
85 #define small_cpumask_bits nr_cpu_ids
86 #define large_cpumask_bits nr_cpu_ids
87#endif
88#define nr_cpumask_bits nr_cpu_ids
1da177e4
LT
89
90/*
91 * The following particular system cpumasks and operations manage
b3199c02 92 * possible, present, active and online cpus.
1da177e4 93 *
b3199c02
RR
94 * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable
95 * cpu_present_mask - has bit 'cpu' set iff cpu is populated
96 * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler
97 * cpu_active_mask - has bit 'cpu' set iff cpu available to migration
1da177e4 98 *
b3199c02 99 * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
1da177e4 100 *
57f728d5 101 * The cpu_possible_mask is fixed at boot time, as the set of CPU IDs
b3199c02
RR
102 * that it is possible might ever be plugged in at anytime during the
103 * life of that system boot. The cpu_present_mask is dynamic(*),
104 * representing which CPUs are currently plugged in. And
105 * cpu_online_mask is the dynamic subset of cpu_present_mask,
106 * indicating those CPUs available for scheduling.
107 *
b3199c02 108 * If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
1da177e4 109 * depending on what ACPI reports as currently plugged in, otherwise
b3199c02 110 * cpu_present_mask is just a copy of cpu_possible_mask.
1da177e4 111 *
b3199c02
RR
112 * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not
113 * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
1da177e4
LT
114 *
115 * Subtleties:
57f728d5 116 * 1) UP ARCHes (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
1da177e4 117 * assumption that their single CPU is online. The UP
b3199c02 118 * cpu_{online,possible,present}_masks are placebos. Changing them
1da177e4
LT
119 * will have no useful affect on the following num_*_cpus()
120 * and cpu_*() macros in the UP case. This ugliness is a UP
121 * optimization - don't waste any instructions or memory references
122 * asking if you're online or how many CPUs there are if there is
123 * only one CPU.
1da177e4
LT
124 */
125
4b804c85
RV
126extern struct cpumask __cpu_possible_mask;
127extern struct cpumask __cpu_online_mask;
128extern struct cpumask __cpu_present_mask;
129extern struct cpumask __cpu_active_mask;
e40f74c5 130extern struct cpumask __cpu_dying_mask;
5aec01b8
RV
131#define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
132#define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask)
133#define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask)
134#define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask)
e40f74c5 135#define cpu_dying_mask ((const struct cpumask *)&__cpu_dying_mask)
b3199c02 136
0c09ab96
TG
137extern atomic_t __num_online_cpus;
138
e797bda3
TG
139extern cpumask_t cpus_booted_once_mask;
140
f5c54f77 141static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
2d3854a3
RR
142{
143#ifdef CONFIG_DEBUG_PER_CPU_MAPS
80d19669 144 WARN_ON_ONCE(cpu >= bits);
2d3854a3 145#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
80d19669
AN
146}
147
148/* verify cpu argument to cpumask_* operators */
f5c54f77 149static __always_inline unsigned int cpumask_check(unsigned int cpu)
80d19669 150{
e7304080 151 cpu_max_bits_warn(cpu, small_cpumask_bits);
2d3854a3
RR
152 return cpu;
153}
154
2d3854a3
RR
155/**
156 * cpumask_first - get the first cpu in a cpumask
157 * @srcp: the cpumask pointer
158 *
57f728d5 159 * Return: >= nr_cpu_ids if no cpus set.
2d3854a3
RR
160 */
161static inline unsigned int cpumask_first(const struct cpumask *srcp)
162{
596ff4a0 163 return find_first_bit(cpumask_bits(srcp), small_cpumask_bits);
2d3854a3
RR
164}
165
9b51d9d8
YN
166/**
167 * cpumask_first_zero - get the first unset cpu in a cpumask
168 * @srcp: the cpumask pointer
169 *
57f728d5 170 * Return: >= nr_cpu_ids if all cpus are set.
9b51d9d8
YN
171 */
172static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
173{
596ff4a0 174 return find_first_zero_bit(cpumask_bits(srcp), small_cpumask_bits);
9b51d9d8
YN
175}
176
93ba139b
YN
177/**
178 * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
dcb60f9c
RD
179 * @srcp1: the first input
180 * @srcp2: the second input
93ba139b 181 *
57f728d5 182 * Return: >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
93ba139b
YN
183 */
184static inline
185unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
186{
596ff4a0 187 return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
93ba139b
YN
188}
189
cdc66553
DL
190/**
191 * cpumask_first_and_and - return the first cpu from *srcp1 & *srcp2 & *srcp3
192 * @srcp1: the first input
193 * @srcp2: the second input
194 * @srcp3: the third input
195 *
196 * Return: >= nr_cpu_ids if no cpus set in all.
197 */
198static inline
199unsigned int cpumask_first_and_and(const struct cpumask *srcp1,
200 const struct cpumask *srcp2,
201 const struct cpumask *srcp3)
202{
203 return find_first_and_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
204 cpumask_bits(srcp3), small_cpumask_bits);
205}
206
e22cdc3f
RM
207/**
208 * cpumask_last - get the last CPU in a cpumask
209 * @srcp: - the cpumask pointer
210 *
57f728d5 211 * Return: >= nr_cpumask_bits if no CPUs set.
e22cdc3f
RM
212 */
213static inline unsigned int cpumask_last(const struct cpumask *srcp)
214{
596ff4a0 215 return find_last_bit(cpumask_bits(srcp), small_cpumask_bits);
e22cdc3f
RM
216}
217
9b2e7086
YN
218/**
219 * cpumask_next - get the next cpu in a cpumask
57f728d5 220 * @n: the cpu prior to the place to search (i.e. return will be > @n)
9b2e7086
YN
221 * @srcp: the cpumask pointer
222 *
57f728d5 223 * Return: >= nr_cpu_ids if no further cpus set.
9b2e7086
YN
224 */
225static inline
226unsigned int cpumask_next(int n, const struct cpumask *srcp)
227{
80493877
TH
228 /* -1 is a legal arg here. */
229 if (n != -1)
230 cpumask_check(n);
596ff4a0 231 return find_next_bit(cpumask_bits(srcp), small_cpumask_bits, n + 1);
9b2e7086 232}
2d3854a3
RR
233
234/**
235 * cpumask_next_zero - get the next unset cpu in a cpumask
57f728d5 236 * @n: the cpu prior to the place to search (i.e. return will be > @n)
2d3854a3
RR
237 * @srcp: the cpumask pointer
238 *
57f728d5 239 * Return: >= nr_cpu_ids if no further cpus unset.
2d3854a3
RR
240 */
241static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
242{
80493877
TH
243 /* -1 is a legal arg here. */
244 if (n != -1)
245 cpumask_check(n);
596ff4a0 246 return find_next_zero_bit(cpumask_bits(srcp), small_cpumask_bits, n+1);
2d3854a3
RR
247}
248
b81dce77
SV
249#if NR_CPUS == 1
250/* Uniprocessor: there is only one valid CPU */
251static inline unsigned int cpumask_local_spread(unsigned int i, int node)
252{
253 return 0;
254}
255
be599244
SV
256static inline unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
257 const struct cpumask *src2p)
258{
b81dce77
SV
259 return cpumask_first_and(src1p, src2p);
260}
261
be599244 262static inline unsigned int cpumask_any_distribute(const struct cpumask *srcp)
b81dce77
SV
263{
264 return cpumask_first(srcp);
265}
266#else
f36963c9 267unsigned int cpumask_local_spread(unsigned int i, int node);
4e23eeeb 268unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
46a87b38 269 const struct cpumask *src2p);
4e23eeeb 270unsigned int cpumask_any_distribute(const struct cpumask *srcp);
b81dce77 271#endif /* NR_CPUS */
2d3854a3 272
9b2e7086
YN
273/**
274 * cpumask_next_and - get the next cpu in *src1p & *src2p
57f728d5 275 * @n: the cpu prior to the place to search (i.e. return will be > @n)
9b2e7086
YN
276 * @src1p: the first cpumask pointer
277 * @src2p: the second cpumask pointer
278 *
57f728d5 279 * Return: >= nr_cpu_ids if no further cpus set in both.
9b2e7086
YN
280 */
281static inline
282unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
283 const struct cpumask *src2p)
284{
80493877
TH
285 /* -1 is a legal arg here. */
286 if (n != -1)
287 cpumask_check(n);
9b2e7086 288 return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
596ff4a0 289 small_cpumask_bits, n + 1);
9b2e7086
YN
290}
291
984f2f37
RR
292/**
293 * for_each_cpu - iterate over every cpu in a mask
294 * @cpu: the (optionally unsigned) integer iterator
295 * @mask: the cpumask pointer
296 *
297 * After the loop, cpu is >= nr_cpu_ids.
298 */
2d3854a3 299#define for_each_cpu(cpu, mask) \
596ff4a0 300 for_each_set_bit(cpu, cpumask_bits(mask), small_cpumask_bits)
8bd93a2c 301
2248ccd8
SV
302#if NR_CPUS == 1
303static inline
304unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
305{
306 cpumask_check(start);
80493877
TH
307 if (n != -1)
308 cpumask_check(n);
2248ccd8
SV
309
310 /*
311 * Return the first available CPU when wrapping, or when starting before cpu0,
312 * since there is only one valid option.
313 */
314 if (wrap && n >= 0)
315 return nr_cpumask_bits;
316
317 return cpumask_first(mask);
318}
319#else
4e23eeeb 320unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
2248ccd8 321#endif
c743f0a5
PZ
322
323/**
324 * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
325 * @cpu: the (optionally unsigned) integer iterator
c23c8082 326 * @mask: the cpumask pointer
c743f0a5
PZ
327 * @start: the start location
328 *
329 * The implementation does not assume any bit in @mask is set (including @start).
330 *
331 * After the loop, cpu is >= nr_cpu_ids.
332 */
4fe49b3b 333#define for_each_cpu_wrap(cpu, mask, start) \
596ff4a0 334 for_each_set_bit_wrap(cpu, cpumask_bits(mask), small_cpumask_bits, start)
c743f0a5 335
984f2f37
RR
336/**
337 * for_each_cpu_and - iterate over every cpu in both masks
338 * @cpu: the (optionally unsigned) integer iterator
2a4a4082
AD
339 * @mask1: the first cpumask pointer
340 * @mask2: the second cpumask pointer
984f2f37
RR
341 *
342 * This saves a temporary CPU mask in many places. It is equivalent to:
343 * struct cpumask tmp;
2a4a4082 344 * cpumask_and(&tmp, &mask1, &mask2);
984f2f37
RR
345 * for_each_cpu(cpu, &tmp)
346 * ...
347 *
348 * After the loop, cpu is >= nr_cpu_ids.
349 */
2a4a4082 350#define for_each_cpu_and(cpu, mask1, mask2) \
596ff4a0 351 for_each_and_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
2d3854a3 352
5f75ff29
VS
353/**
354 * for_each_cpu_andnot - iterate over every cpu present in one mask, excluding
355 * those present in another.
356 * @cpu: the (optionally unsigned) integer iterator
357 * @mask1: the first cpumask pointer
358 * @mask2: the second cpumask pointer
359 *
360 * This saves a temporary CPU mask in many places. It is equivalent to:
361 * struct cpumask tmp;
362 * cpumask_andnot(&tmp, &mask1, &mask2);
363 * for_each_cpu(cpu, &tmp)
364 * ...
365 *
366 * After the loop, cpu is >= nr_cpu_ids.
367 */
368#define for_each_cpu_andnot(cpu, mask1, mask2) \
596ff4a0 369 for_each_andnot_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
5f75ff29 370
1470afef
DC
371/**
372 * for_each_cpu_or - iterate over every cpu present in either mask
373 * @cpu: the (optionally unsigned) integer iterator
374 * @mask1: the first cpumask pointer
375 * @mask2: the second cpumask pointer
376 *
377 * This saves a temporary CPU mask in many places. It is equivalent to:
378 * struct cpumask tmp;
379 * cpumask_or(&tmp, &mask1, &mask2);
380 * for_each_cpu(cpu, &tmp)
381 * ...
382 *
383 * After the loop, cpu is >= nr_cpu_ids.
384 */
385#define for_each_cpu_or(cpu, mask1, mask2) \
386 for_each_or_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
387
6802f934
KM
388/**
389 * for_each_cpu_from - iterate over CPUs present in @mask, from @cpu to the end of @mask.
390 * @cpu: the (optionally unsigned) integer iterator
391 * @mask: the cpumask pointer
392 *
393 * After the loop, cpu is >= nr_cpu_ids.
394 */
395#define for_each_cpu_from(cpu, mask) \
396 for_each_set_bit_from(cpu, cpumask_bits(mask), small_cpumask_bits)
397
9b2e7086
YN
398/**
399 * cpumask_any_but - return a "random" in a cpumask, but not this one.
400 * @mask: the cpumask to search
401 * @cpu: the cpu to ignore.
402 *
403 * Often used to find any cpu but smp_processor_id() in a mask.
57f728d5 404 * Return: >= nr_cpu_ids if no cpus set.
9b2e7086
YN
405 */
406static inline
407unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
408{
409 unsigned int i;
410
411 cpumask_check(cpu);
412 for_each_cpu(i, mask)
413 if (i != cpu)
414 break;
415 return i;
416}
2d3854a3 417
897fa2c3
MR
418/**
419 * cpumask_any_and_but - pick a "random" cpu from *mask1 & *mask2, but not this one.
420 * @mask1: the first input cpumask
421 * @mask2: the second input cpumask
422 * @cpu: the cpu to ignore
423 *
424 * Returns >= nr_cpu_ids if no cpus set.
425 */
426static inline
427unsigned int cpumask_any_and_but(const struct cpumask *mask1,
428 const struct cpumask *mask2,
429 unsigned int cpu)
430{
431 unsigned int i;
432
433 cpumask_check(cpu);
434 i = cpumask_first_and(mask1, mask2);
435 if (i != cpu)
436 return i;
437
438 return cpumask_next_and(cpu, mask1, mask2);
439}
440
944c417d 441/**
57f728d5 442 * cpumask_nth - get the Nth cpu in a cpumask
944c417d 443 * @srcp: the cpumask pointer
57f728d5 444 * @cpu: the Nth cpu to find, starting from 0
944c417d 445 *
57f728d5 446 * Return: >= nr_cpu_ids if such cpu doesn't exist.
944c417d
YN
447 */
448static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
449{
596ff4a0 450 return find_nth_bit(cpumask_bits(srcp), small_cpumask_bits, cpumask_check(cpu));
944c417d
YN
451}
452
453/**
57f728d5 454 * cpumask_nth_and - get the Nth cpu in 2 cpumasks
944c417d
YN
455 * @srcp1: the cpumask pointer
456 * @srcp2: the cpumask pointer
57f728d5 457 * @cpu: the Nth cpu to find, starting from 0
944c417d 458 *
57f728d5 459 * Return: >= nr_cpu_ids if such cpu doesn't exist.
944c417d
YN
460 */
461static inline
462unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
463 const struct cpumask *srcp2)
464{
465 return find_nth_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
596ff4a0 466 small_cpumask_bits, cpumask_check(cpu));
944c417d
YN
467}
468
469/**
57f728d5 470 * cpumask_nth_andnot - get the Nth cpu set in 1st cpumask, and clear in 2nd.
944c417d
YN
471 * @srcp1: the cpumask pointer
472 * @srcp2: the cpumask pointer
57f728d5 473 * @cpu: the Nth cpu to find, starting from 0
944c417d 474 *
57f728d5 475 * Return: >= nr_cpu_ids if such cpu doesn't exist.
944c417d
YN
476 */
477static inline
478unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
479 const struct cpumask *srcp2)
480{
481 return find_nth_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
596ff4a0 482 small_cpumask_bits, cpumask_check(cpu));
944c417d
YN
483}
484
62f4386e
YN
485/**
486 * cpumask_nth_and_andnot - get the Nth cpu set in 1st and 2nd cpumask, and clear in 3rd.
487 * @srcp1: the cpumask pointer
488 * @srcp2: the cpumask pointer
489 * @srcp3: the cpumask pointer
57f728d5 490 * @cpu: the Nth cpu to find, starting from 0
62f4386e 491 *
57f728d5 492 * Return: >= nr_cpu_ids if such cpu doesn't exist.
62f4386e
YN
493 */
494static __always_inline
495unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp1,
496 const struct cpumask *srcp2,
497 const struct cpumask *srcp3)
498{
499 return find_nth_and_andnot_bit(cpumask_bits(srcp1),
500 cpumask_bits(srcp2),
501 cpumask_bits(srcp3),
596ff4a0 502 small_cpumask_bits, cpumask_check(cpu));
62f4386e
YN
503}
504
2d3854a3
RR
505#define CPU_BITS_NONE \
506{ \
507 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
508}
509
510#define CPU_BITS_CPU0 \
511{ \
512 [0] = 1UL \
513}
514
515/**
516 * cpumask_set_cpu - set a cpu in a cpumask
517 * @cpu: cpu number (< nr_cpu_ids)
518 * @dstp: the cpumask pointer
519 */
1dc01aba 520static __always_inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
2d3854a3
RR
521{
522 set_bit(cpumask_check(cpu), cpumask_bits(dstp));
523}
524
1dc01aba 525static __always_inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
6c8557bd
PZ
526{
527 __set_bit(cpumask_check(cpu), cpumask_bits(dstp));
528}
529
530
2d3854a3
RR
531/**
532 * cpumask_clear_cpu - clear a cpu in a cpumask
533 * @cpu: cpu number (< nr_cpu_ids)
534 * @dstp: the cpumask pointer
535 */
1dc01aba 536static __always_inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
2d3854a3
RR
537{
538 clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
539}
540
1dc01aba 541static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
6c8557bd
PZ
542{
543 __clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
544}
545
decde1fa
CJ
546/**
547 * cpumask_assign_cpu - assign a cpu in a cpumask
548 * @cpu: cpu number (< nr_cpu_ids)
549 * @dstp: the cpumask pointer
550 * @bool: the value to assign
551 */
552static __always_inline void cpumask_assign_cpu(int cpu, struct cpumask *dstp, bool value)
553{
554 assign_bit(cpumask_check(cpu), cpumask_bits(dstp), value);
555}
556
557static __always_inline void __cpumask_assign_cpu(int cpu, struct cpumask *dstp, bool value)
558{
559 __assign_bit(cpumask_check(cpu), cpumask_bits(dstp), value);
560}
561
2d3854a3
RR
562/**
563 * cpumask_test_cpu - test for a cpu in a cpumask
564 * @cpu: cpu number (< nr_cpu_ids)
565 * @cpumask: the cpumask pointer
566 *
57f728d5 567 * Return: true if @cpu is set in @cpumask, else returns false
2d3854a3 568 */
cb32c285 569static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
3bbf7f46
RV
570{
571 return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
572}
2d3854a3
RR
573
574/**
575 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
576 * @cpu: cpu number (< nr_cpu_ids)
577 * @cpumask: the cpumask pointer
578 *
579 * test_and_set_bit wrapper for cpumasks.
57f728d5
RD
580 *
581 * Return: true if @cpu is set in old bitmap of @cpumask, else returns false
2d3854a3 582 */
cb32c285 583static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
2d3854a3
RR
584{
585 return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
586}
587
54fdade1
XG
588/**
589 * cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask
590 * @cpu: cpu number (< nr_cpu_ids)
591 * @cpumask: the cpumask pointer
592 *
593 * test_and_clear_bit wrapper for cpumasks.
57f728d5
RD
594 *
595 * Return: true if @cpu is set in old bitmap of @cpumask, else returns false
54fdade1 596 */
cb32c285 597static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
54fdade1
XG
598{
599 return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
600}
601
2d3854a3
RR
602/**
603 * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
604 * @dstp: the cpumask pointer
605 */
606static inline void cpumask_setall(struct cpumask *dstp)
607{
63355b98
LT
608 if (small_const_nbits(small_cpumask_bits)) {
609 cpumask_bits(dstp)[0] = BITMAP_LAST_WORD_MASK(nr_cpumask_bits);
610 return;
611 }
612 bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
2d3854a3
RR
613}
614
615/**
616 * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
617 * @dstp: the cpumask pointer
618 */
619static inline void cpumask_clear(struct cpumask *dstp)
620{
596ff4a0 621 bitmap_zero(cpumask_bits(dstp), large_cpumask_bits);
2d3854a3
RR
622}
623
624/**
625 * cpumask_and - *dstp = *src1p & *src2p
626 * @dstp: the cpumask result
627 * @src1p: the first input
628 * @src2p: the second input
c777ad69 629 *
57f728d5 630 * Return: false if *@dstp is empty, else returns true
2d3854a3 631 */
cb32c285 632static inline bool cpumask_and(struct cpumask *dstp,
2d3854a3
RR
633 const struct cpumask *src1p,
634 const struct cpumask *src2p)
635{
f4b0373b 636 return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
596ff4a0 637 cpumask_bits(src2p), small_cpumask_bits);
2d3854a3
RR
638}
639
640/**
641 * cpumask_or - *dstp = *src1p | *src2p
642 * @dstp: the cpumask result
643 * @src1p: the first input
644 * @src2p: the second input
645 */
646static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
647 const struct cpumask *src2p)
648{
649 bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
596ff4a0 650 cpumask_bits(src2p), small_cpumask_bits);
2d3854a3
RR
651}
652
653/**
654 * cpumask_xor - *dstp = *src1p ^ *src2p
655 * @dstp: the cpumask result
656 * @src1p: the first input
657 * @src2p: the second input
658 */
659static inline void cpumask_xor(struct cpumask *dstp,
660 const struct cpumask *src1p,
661 const struct cpumask *src2p)
662{
663 bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
596ff4a0 664 cpumask_bits(src2p), small_cpumask_bits);
2d3854a3
RR
665}
666
667/**
668 * cpumask_andnot - *dstp = *src1p & ~*src2p
669 * @dstp: the cpumask result
670 * @src1p: the first input
671 * @src2p: the second input
c777ad69 672 *
57f728d5 673 * Return: false if *@dstp is empty, else returns true
2d3854a3 674 */
cb32c285 675static inline bool cpumask_andnot(struct cpumask *dstp,
2d3854a3
RR
676 const struct cpumask *src1p,
677 const struct cpumask *src2p)
678{
f4b0373b 679 return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
596ff4a0 680 cpumask_bits(src2p), small_cpumask_bits);
2d3854a3
RR
681}
682
683/**
684 * cpumask_equal - *src1p == *src2p
685 * @src1p: the first input
686 * @src2p: the second input
57f728d5
RD
687 *
688 * Return: true if the cpumasks are equal, false if not
2d3854a3
RR
689 */
690static inline bool cpumask_equal(const struct cpumask *src1p,
691 const struct cpumask *src2p)
692{
693 return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
596ff4a0 694 small_cpumask_bits);
2d3854a3
RR
695}
696
b9fa6442
TG
697/**
698 * cpumask_or_equal - *src1p | *src2p == *src3p
699 * @src1p: the first input
700 * @src2p: the second input
701 * @src3p: the third input
57f728d5
RD
702 *
703 * Return: true if first cpumask ORed with second cpumask == third cpumask,
704 * otherwise false
b9fa6442
TG
705 */
706static inline bool cpumask_or_equal(const struct cpumask *src1p,
707 const struct cpumask *src2p,
708 const struct cpumask *src3p)
709{
710 return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p),
596ff4a0 711 cpumask_bits(src3p), small_cpumask_bits);
b9fa6442
TG
712}
713
2d3854a3
RR
714/**
715 * cpumask_intersects - (*src1p & *src2p) != 0
716 * @src1p: the first input
717 * @src2p: the second input
57f728d5
RD
718 *
719 * Return: true if first cpumask ANDed with second cpumask is non-empty,
720 * otherwise false
2d3854a3
RR
721 */
722static inline bool cpumask_intersects(const struct cpumask *src1p,
723 const struct cpumask *src2p)
724{
725 return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
596ff4a0 726 small_cpumask_bits);
2d3854a3
RR
727}
728
729/**
730 * cpumask_subset - (*src1p & ~*src2p) == 0
731 * @src1p: the first input
732 * @src2p: the second input
c777ad69 733 *
57f728d5 734 * Return: true if *@src1p is a subset of *@src2p, else returns false
2d3854a3 735 */
cb32c285 736static inline bool cpumask_subset(const struct cpumask *src1p,
2d3854a3
RR
737 const struct cpumask *src2p)
738{
739 return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
596ff4a0 740 small_cpumask_bits);
2d3854a3
RR
741}
742
743/**
744 * cpumask_empty - *srcp == 0
745 * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
57f728d5
RD
746 *
747 * Return: true if srcp is empty (has no bits set), else false
2d3854a3
RR
748 */
749static inline bool cpumask_empty(const struct cpumask *srcp)
750{
596ff4a0 751 return bitmap_empty(cpumask_bits(srcp), small_cpumask_bits);
2d3854a3
RR
752}
753
754/**
755 * cpumask_full - *srcp == 0xFFFFFFFF...
756 * @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
57f728d5
RD
757 *
758 * Return: true if srcp is full (has all bits set), else false
2d3854a3
RR
759 */
760static inline bool cpumask_full(const struct cpumask *srcp)
761{
762 return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
763}
764
765/**
766 * cpumask_weight - Count of bits in *srcp
767 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
57f728d5
RD
768 *
769 * Return: count of bits set in *srcp
2d3854a3
RR
770 */
771static inline unsigned int cpumask_weight(const struct cpumask *srcp)
772{
596ff4a0 773 return bitmap_weight(cpumask_bits(srcp), small_cpumask_bits);
2d3854a3
RR
774}
775
24291caf
YN
776/**
777 * cpumask_weight_and - Count of bits in (*srcp1 & *srcp2)
778 * @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
779 * @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
57f728d5
RD
780 *
781 * Return: count of bits set in both *srcp1 and *srcp2
24291caf
YN
782 */
783static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
784 const struct cpumask *srcp2)
785{
596ff4a0 786 return bitmap_weight_and(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
24291caf
YN
787}
788
c1f5204e
YN
789/**
790 * cpumask_weight_andnot - Count of bits in (*srcp1 & ~*srcp2)
791 * @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
792 * @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
793 *
794 * Return: count of bits set in both *srcp1 and *srcp2
795 */
796static inline unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
797 const struct cpumask *srcp2)
798{
799 return bitmap_weight_andnot(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
800}
801
2d3854a3
RR
802/**
803 * cpumask_shift_right - *dstp = *srcp >> n
804 * @dstp: the cpumask result
805 * @srcp: the input to shift
806 * @n: the number of bits to shift by
807 */
808static inline void cpumask_shift_right(struct cpumask *dstp,
809 const struct cpumask *srcp, int n)
810{
811 bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
596ff4a0 812 small_cpumask_bits);
2d3854a3
RR
813}
814
815/**
816 * cpumask_shift_left - *dstp = *srcp << n
817 * @dstp: the cpumask result
818 * @srcp: the input to shift
819 * @n: the number of bits to shift by
820 */
821static inline void cpumask_shift_left(struct cpumask *dstp,
822 const struct cpumask *srcp, int n)
823{
824 bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
825 nr_cpumask_bits);
826}
827
828/**
829 * cpumask_copy - *dstp = *srcp
830 * @dstp: the result
831 * @srcp: the input cpumask
832 */
833static inline void cpumask_copy(struct cpumask *dstp,
834 const struct cpumask *srcp)
835{
596ff4a0 836 bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), large_cpumask_bits);
2d3854a3
RR
837}
838
839/**
840 * cpumask_any - pick a "random" cpu from *srcp
841 * @srcp: the input cpumask
842 *
57f728d5 843 * Return: >= nr_cpu_ids if no cpus set.
2d3854a3
RR
844 */
845#define cpumask_any(srcp) cpumask_first(srcp)
846
2d3854a3
RR
847/**
848 * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
849 * @mask1: the first input cpumask
850 * @mask2: the second input cpumask
851 *
57f728d5 852 * Return: >= nr_cpu_ids if no cpus set.
2d3854a3
RR
853 */
854#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
855
cd83e42c
RR
856/**
857 * cpumask_of - the cpumask containing just a given cpu
858 * @cpu: the cpu (<= nr_cpu_ids)
859 */
860#define cpumask_of(cpu) (get_cpu_mask(cpu))
861
29c0177e
RR
862/**
863 * cpumask_parse_user - extract a cpumask from a user string
864 * @buf: the buffer to extract from
865 * @len: the length of the buffer
866 * @dstp: the cpumask to set.
867 *
57f728d5 868 * Return: -errno, or 0 for success.
29c0177e
RR
869 */
870static inline int cpumask_parse_user(const char __user *buf, int len,
871 struct cpumask *dstp)
872{
4d59b6cc 873 return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
29c0177e
RR
874}
875
4b060420
MT
876/**
877 * cpumask_parselist_user - extract a cpumask from a user string
878 * @buf: the buffer to extract from
879 * @len: the length of the buffer
880 * @dstp: the cpumask to set.
881 *
57f728d5 882 * Return: -errno, or 0 for success.
4b060420
MT
883 */
884static inline int cpumask_parselist_user(const char __user *buf, int len,
885 struct cpumask *dstp)
886{
887 return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
4d59b6cc 888 nr_cpumask_bits);
4b060420
MT
889}
890
ba630e49 891/**
b06fb415 892 * cpumask_parse - extract a cpumask from a string
ba630e49
TH
893 * @buf: the buffer to extract from
894 * @dstp: the cpumask to set.
895 *
57f728d5 896 * Return: -errno, or 0 for success.
ba630e49
TH
897 */
898static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
899{
190535f7 900 return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits);
ba630e49
TH
901}
902
29c0177e 903/**
231daf07 904 * cpulist_parse - extract a cpumask from a user string of ranges
29c0177e 905 * @buf: the buffer to extract from
29c0177e
RR
906 * @dstp: the cpumask to set.
907 *
57f728d5 908 * Return: -errno, or 0 for success.
29c0177e
RR
909 */
910static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
911{
4d59b6cc 912 return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
2d3854a3
RR
913}
914
915/**
57f728d5
RD
916 * cpumask_size - calculate size to allocate for a 'struct cpumask' in bytes
917 *
918 * Return: size to allocate for a &struct cpumask in bytes
2d3854a3 919 */
4de373a1 920static inline unsigned int cpumask_size(void)
2d3854a3 921{
a37fbe66 922 return bitmap_size(large_cpumask_bits);
2d3854a3
RR
923}
924
925/*
926 * cpumask_var_t: struct cpumask for stack usage.
927 *
928 * Oh, the wicked games we play! In order to make kernel coding a
929 * little more difficult, we typedef cpumask_var_t to an array or a
930 * pointer: doing &mask on an array is a noop, so it still works.
931 *
57f728d5 932 * i.e.
2d3854a3
RR
933 * cpumask_var_t tmpmask;
934 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
935 * return -ENOMEM;
936 *
937 * ... use 'tmpmask' like a normal struct cpumask * ...
938 *
939 * free_cpumask_var(tmpmask);
a64a26e8
KM
940 *
941 *
942 * However, one notable exception is there. alloc_cpumask_var() allocates
943 * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has
944 * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t.
945 *
946 * cpumask_var_t tmpmask;
947 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
948 * return -ENOMEM;
949 *
950 * var = *tmpmask;
951 *
952 * This code makes NR_CPUS length memcopy and brings to a memory corruption.
953 * cpumask_copy() provide safe copy functionality.
4ba29684
CL
954 *
955 * Note that there is another evil here: If you define a cpumask_var_t
956 * as a percpu variable then the way to obtain the address of the cpumask
957 * structure differently influences what this_cpu_* operation needs to be
958 * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
959 * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
960 * other type of cpumask_var_t implementation is configured.
668802c2
WL
961 *
962 * Please also note that __cpumask_var_read_mostly can be used to declare
963 * a cpumask_var_t variable itself (not its content) as read mostly.
2d3854a3
RR
964 */
965#ifdef CONFIG_CPUMASK_OFFSTACK
966typedef struct cpumask *cpumask_var_t;
967
668802c2
WL
968#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
969#define __cpumask_var_read_mostly __read_mostly
4ba29684 970
7b4967c5 971bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
f0dd891d
YN
972
973static inline
974bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
975{
976 return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
977}
978
979/**
980 * alloc_cpumask_var - allocate a struct cpumask
981 * @mask: pointer to cpumask_var_t where the cpumask is returned
982 * @flags: GFP_ flags
983 *
984 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
985 * a nop returning a constant 1 (in <linux/cpumask.h>).
986 *
987 * See alloc_cpumask_var_node.
57f728d5
RD
988 *
989 * Return: %true if allocation succeeded, %false if not
f0dd891d
YN
990 */
991static inline
992bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
993{
994 return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
995}
996
997static inline
998bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
999{
1000 return alloc_cpumask_var(mask, flags | __GFP_ZERO);
1001}
1002
2d3854a3
RR
1003void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
1004void free_cpumask_var(cpumask_var_t mask);
cd83e42c 1005void free_bootmem_cpumask_var(cpumask_var_t mask);
2d3854a3 1006
f7e30f01
MK
1007static inline bool cpumask_available(cpumask_var_t mask)
1008{
1009 return mask != NULL;
1010}
1011
2d3854a3
RR
1012#else
1013typedef struct cpumask cpumask_var_t[1];
1014
4ba29684 1015#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
668802c2 1016#define __cpumask_var_read_mostly
4ba29684 1017
2d3854a3
RR
1018static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
1019{
1020 return true;
1021}
1022
7b4967c5
MT
1023static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
1024 int node)
1025{
1026 return true;
1027}
1028
0281b5dc
YL
1029static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
1030{
1031 cpumask_clear(*mask);
1032 return true;
1033}
1034
1035static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
1036 int node)
1037{
1038 cpumask_clear(*mask);
1039 return true;
1040}
1041
2d3854a3
RR
1042static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
1043{
1044}
1045
1046static inline void free_cpumask_var(cpumask_var_t mask)
1047{
1048}
cd83e42c
RR
1049
1050static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
1051{
1052}
f7e30f01
MK
1053
1054static inline bool cpumask_available(cpumask_var_t mask)
1055{
1056 return true;
1057}
2d3854a3
RR
1058#endif /* CONFIG_CPUMASK_OFFSTACK */
1059
dcee2280
YN
1060DEFINE_FREE(free_cpumask_var, struct cpumask *, if (_T) free_cpumask_var(_T));
1061
2d3854a3
RR
1062/* It's common to want to use cpu_all_mask in struct member initializers,
1063 * so it has to refer to an address rather than a pointer. */
1064extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
1065#define cpu_all_mask to_cpumask(cpu_all_bits)
1066
1067/* First bits of cpu_bit_bitmap are in fact unset. */
1068#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
1069
4f099030
SV
1070#if NR_CPUS == 1
1071/* Uniprocessor: the possible/online/present masks are always "1" */
1072#define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
1073#define for_each_online_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
1074#define for_each_present_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
1075#else
ae7a47e7
RR
1076#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
1077#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
1078#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
4f099030 1079#endif
ae7a47e7 1080
2d3854a3 1081/* Wrappers for arch boot code to manipulate normally-constant masks */
3fa41520
RR
1082void init_cpu_present(const struct cpumask *src);
1083void init_cpu_possible(const struct cpumask *src);
1084void init_cpu_online(const struct cpumask *src);
6ba2ef7b 1085
9425676a
RV
1086static inline void
1087set_cpu_possible(unsigned int cpu, bool possible)
1088{
1089 if (possible)
1090 cpumask_set_cpu(cpu, &__cpu_possible_mask);
1091 else
1092 cpumask_clear_cpu(cpu, &__cpu_possible_mask);
1093}
1094
1095static inline void
1096set_cpu_present(unsigned int cpu, bool present)
1097{
1098 if (present)
1099 cpumask_set_cpu(cpu, &__cpu_present_mask);
1100 else
1101 cpumask_clear_cpu(cpu, &__cpu_present_mask);
1102}
1103
0c09ab96 1104void set_cpu_online(unsigned int cpu, bool online);
9425676a
RV
1105
1106static inline void
1107set_cpu_active(unsigned int cpu, bool active)
1108{
1109 if (active)
1110 cpumask_set_cpu(cpu, &__cpu_active_mask);
1111 else
1112 cpumask_clear_cpu(cpu, &__cpu_active_mask);
1113}
1114
e40f74c5
PZ
1115static inline void
1116set_cpu_dying(unsigned int cpu, bool dying)
1117{
1118 if (dying)
1119 cpumask_set_cpu(cpu, &__cpu_dying_mask);
1120 else
1121 cpumask_clear_cpu(cpu, &__cpu_dying_mask);
1122}
9425676a 1123
6ba2ef7b 1124/**
57f728d5 1125 * to_cpumask - convert a NR_CPUS bitmap to a struct cpumask *
6ba2ef7b
RR
1126 * @bitmap: the bitmap
1127 *
1128 * There are a few places where cpumask_var_t isn't appropriate and
1129 * static cpumasks must be used (eg. very early boot), yet we don't
1130 * expose the definition of 'struct cpumask'.
1131 *
1132 * This does the conversion, and can be used as a constant initializer.
1133 */
1134#define to_cpumask(bitmap) \
1135 ((struct cpumask *)(1 ? (bitmap) \
1136 : (void *)sizeof(__check_is_bitmap(bitmap))))
1137
1138static inline int __check_is_bitmap(const unsigned long *bitmap)
1139{
1140 return 1;
1141}
1142
1143/*
1144 * Special-case data structure for "single bit set only" constant CPU masks.
1145 *
1146 * We pre-generate all the 64 (or 32) possible bit positions, with enough
1147 * padding to the left and the right, and return the constant pointer
1148 * appropriately offset.
1149 */
1150extern const unsigned long
1151 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
1152
1153static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
1154{
1155 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
1156 p -= cpu / BITS_PER_LONG;
1157 return to_cpumask(p);
1158}
1159
b02a4fd8
PZ
1160#if NR_CPUS > 1
1161/**
1162 * num_online_cpus() - Read the number of online CPUs
1163 *
1164 * Despite the fact that __num_online_cpus is of type atomic_t, this
1165 * interface gives only a momentary snapshot and is not protected against
1166 * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held
1167 * region.
57f728d5
RD
1168 *
1169 * Return: momentary snapshot of the number of online CPUs
b02a4fd8 1170 */
6a123d6a 1171static __always_inline unsigned int num_online_cpus(void)
b02a4fd8 1172{
0f613bfa 1173 return raw_atomic_read(&__num_online_cpus);
b02a4fd8
PZ
1174}
1175#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
1176#define num_present_cpus() cpumask_weight(cpu_present_mask)
1177#define num_active_cpus() cpumask_weight(cpu_active_mask)
1178
1179static inline bool cpu_online(unsigned int cpu)
1180{
1181 return cpumask_test_cpu(cpu, cpu_online_mask);
1182}
1183
1184static inline bool cpu_possible(unsigned int cpu)
1185{
1186 return cpumask_test_cpu(cpu, cpu_possible_mask);
1187}
1188
1189static inline bool cpu_present(unsigned int cpu)
1190{
1191 return cpumask_test_cpu(cpu, cpu_present_mask);
1192}
1193
1194static inline bool cpu_active(unsigned int cpu)
1195{
1196 return cpumask_test_cpu(cpu, cpu_active_mask);
1197}
1198
e40f74c5
PZ
1199static inline bool cpu_dying(unsigned int cpu)
1200{
1201 return cpumask_test_cpu(cpu, cpu_dying_mask);
1202}
1203
b02a4fd8
PZ
1204#else
1205
1206#define num_online_cpus() 1U
1207#define num_possible_cpus() 1U
1208#define num_present_cpus() 1U
1209#define num_active_cpus() 1U
1210
1211static inline bool cpu_online(unsigned int cpu)
1212{
1213 return cpu == 0;
1214}
1215
1216static inline bool cpu_possible(unsigned int cpu)
1217{
1218 return cpu == 0;
1219}
1220
1221static inline bool cpu_present(unsigned int cpu)
1222{
1223 return cpu == 0;
1224}
1225
1226static inline bool cpu_active(unsigned int cpu)
1227{
1228 return cpu == 0;
1229}
1230
e40f74c5
PZ
1231static inline bool cpu_dying(unsigned int cpu)
1232{
1233 return false;
1234}
1235
b02a4fd8
PZ
1236#endif /* NR_CPUS > 1 */
1237
6ba2ef7b
RR
1238#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
1239
1240#if NR_CPUS <= BITS_PER_LONG
1241#define CPU_BITS_ALL \
1242{ \
9941a383 1243 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
6ba2ef7b
RR
1244}
1245
1246#else /* NR_CPUS > BITS_PER_LONG */
1247
1248#define CPU_BITS_ALL \
1249{ \
1250 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
9941a383 1251 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
6ba2ef7b
RR
1252}
1253#endif /* NR_CPUS > BITS_PER_LONG */
1254
5aaba363
SH
1255/**
1256 * cpumap_print_to_pagebuf - copies the cpumask into the buffer either
1257 * as comma-separated list of cpus or hex values of cpumask
1258 * @list: indicates whether the cpumap must be list
1259 * @mask: the cpumask to copy
1260 * @buf: the buffer to copy into
1261 *
57f728d5 1262 * Return: the length of the (null-terminated) @buf string, zero if
5aaba363
SH
1263 * nothing is copied.
1264 */
1265static inline ssize_t
1266cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
1267{
1268 return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
513e3d2d 1269 nr_cpu_ids);
5aaba363
SH
1270}
1271
1fae5629
TT
1272/**
1273 * cpumap_print_bitmask_to_buf - copies the cpumask into the buffer as
1274 * hex values of cpumask
1275 *
1276 * @buf: the buffer to copy into
1277 * @mask: the cpumask to copy
1278 * @off: in the string from which we are copying, we copy to @buf
1279 * @count: the maximum number of bytes to print
1280 *
1281 * The function prints the cpumask into the buffer as hex values of
1282 * cpumask; Typically used by bin_attribute to export cpumask bitmask
1283 * ABI.
1284 *
57f728d5 1285 * Return: the length of how many bytes have been copied, excluding
c86a2d90 1286 * terminating '\0'.
1fae5629
TT
1287 */
1288static inline ssize_t
1289cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
1290 loff_t off, size_t count)
1291{
1292 return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
c86a2d90 1293 nr_cpu_ids, off, count) - 1;
1fae5629
TT
1294}
1295
1296/**
1297 * cpumap_print_list_to_buf - copies the cpumask into the buffer as
1298 * comma-separated list of cpus
dcb60f9c
RD
1299 * @buf: the buffer to copy into
1300 * @mask: the cpumask to copy
1301 * @off: in the string from which we are copying, we copy to @buf
1302 * @count: the maximum number of bytes to print
1fae5629
TT
1303 *
1304 * Everything is same with the above cpumap_print_bitmask_to_buf()
1305 * except the print format.
57f728d5
RD
1306 *
1307 * Return: the length of how many bytes have been copied, excluding
1308 * terminating '\0'.
1fae5629
TT
1309 */
1310static inline ssize_t
1311cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
1312 loff_t off, size_t count)
1313{
1314 return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
c86a2d90 1315 nr_cpu_ids, off, count) - 1;
1fae5629
TT
1316}
1317
6ba2ef7b 1318#if NR_CPUS <= BITS_PER_LONG
6ba2ef7b
RR
1319#define CPU_MASK_ALL \
1320(cpumask_t) { { \
9941a383 1321 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
6ba2ef7b 1322} }
6ba2ef7b 1323#else
6ba2ef7b
RR
1324#define CPU_MASK_ALL \
1325(cpumask_t) { { \
1326 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
9941a383 1327 [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
6ba2ef7b 1328} }
9941a383 1329#endif /* NR_CPUS > BITS_PER_LONG */
6ba2ef7b
RR
1330
1331#define CPU_MASK_NONE \
1332(cpumask_t) { { \
1333 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
1334} }
1335
1527781d
RR
1336#define CPU_MASK_CPU0 \
1337(cpumask_t) { { \
1338 [0] = 1UL \
1339} }
1340
7ee951ac
PA
1341/*
1342 * Provide a valid theoretical max size for cpumap and cpulist sysfs files
1343 * to avoid breaking userspace which may allocate a buffer based on the size
1344 * reported by e.g. fstat.
1345 *
1346 * for cpumap NR_CPUS * 9/32 - 1 should be an exact length.
1347 *
1348 * For cpulist 7 is (ceil(log10(NR_CPUS)) + 1) allowing for NR_CPUS to be up
1349 * to 2 orders of magnitude larger than 8192. And then we divide by 2 to
1350 * cover a worst-case of every other cpu being on one of two nodes for a
1351 * very large NR_CPUS.
1352 *
b9be19ee
PA
1353 * Use PAGE_SIZE as a minimum for smaller configurations while avoiding
1354 * unsigned comparison to -1.
7ee951ac 1355 */
b9be19ee 1356#define CPUMAP_FILE_MAX_BYTES (((NR_CPUS * 9)/32 > PAGE_SIZE) \
7ee951ac
PA
1357 ? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
1358#define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)
1359
1da177e4 1360#endif /* __LINUX_CPUMASK_H */