Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
5a0e3ad6 | 2 | #include <linux/slab.h> |
ccb46000 AM |
3 | #include <linux/kernel.h> |
4 | #include <linux/bitops.h> | |
5 | #include <linux/cpumask.h> | |
8bc3bcc9 | 6 | #include <linux/export.h> |
57c8a661 | 7 | #include <linux/memblock.h> |
98fa15f3 | 8 | #include <linux/numa.h> |
ccb46000 | 9 | |
c743f0a5 PZ |
10 | /** |
11 | * cpumask_next_wrap - helper to implement for_each_cpu_wrap | |
12 | * @n: the cpu prior to the place to search | |
13 | * @mask: the cpumask pointer | |
14 | * @start: the start point of the iteration | |
15 | * @wrap: assume @n crossing @start terminates the iteration | |
16 | * | |
17 | * Returns >= nr_cpu_ids on completion | |
18 | * | |
19 | * Note: the @wrap argument is required for the start condition when | |
20 | * we cannot assume @start is set in @mask. | |
21 | */ | |
8b6b795d | 22 | unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) |
c743f0a5 | 23 | { |
8b6b795d | 24 | unsigned int next; |
c743f0a5 PZ |
25 | |
26 | again: | |
27 | next = cpumask_next(n, mask); | |
28 | ||
29 | if (wrap && n < start && next >= start) { | |
30 | return nr_cpumask_bits; | |
31 | ||
32 | } else if (next >= nr_cpumask_bits) { | |
33 | wrap = true; | |
34 | n = -1; | |
35 | goto again; | |
36 | } | |
37 | ||
38 | return next; | |
39 | } | |
40 | EXPORT_SYMBOL(cpumask_next_wrap); | |
41 | ||
2d3854a3 RR |
42 | /* These are not inline because of header tangles. */ |
43 | #ifdef CONFIG_CPUMASK_OFFSTACK | |
ec26b805 MT |
44 | /** |
45 | * alloc_cpumask_var_node - allocate a struct cpumask on a given node | |
46 | * @mask: pointer to cpumask_var_t where the cpumask is returned | |
47 | * @flags: GFP_ flags | |
48 | * | |
49 | * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is | |
50 | * a nop returning a constant 1 (in <linux/cpumask.h>) | |
51 | * Returns TRUE if memory allocation succeeded, FALSE otherwise. | |
52 | * | |
53 | * In addition, mask will be NULL if this fails. Note that gcc is | |
54 | * usually smart enough to know that mask can never be NULL if | |
55 | * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case | |
56 | * too. | |
57 | */ | |
7b4967c5 | 58 | bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) |
2d3854a3 | 59 | { |
38c7fed2 YL |
60 | *mask = kmalloc_node(cpumask_size(), flags, node); |
61 | ||
2d3854a3 RR |
62 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
63 | if (!*mask) { | |
64 | printk(KERN_ERR "=> alloc_cpumask_var: failed!\n"); | |
65 | dump_stack(); | |
66 | } | |
67 | #endif | |
2a530080 | 68 | |
2d3854a3 RR |
69 | return *mask != NULL; |
70 | } | |
7b4967c5 MT |
71 | EXPORT_SYMBOL(alloc_cpumask_var_node); |
72 | ||
ec26b805 MT |
73 | /** |
74 | * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. | |
75 | * @mask: pointer to cpumask_var_t where the cpumask is returned | |
76 | * | |
77 | * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is | |
e9690a6e | 78 | * a nop (in <linux/cpumask.h>). |
ec26b805 MT |
79 | * Either returns an allocated (zero-filled) cpumask, or causes the |
80 | * system to panic. | |
81 | */ | |
2d3854a3 RR |
82 | void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) |
83 | { | |
7e1c4e27 | 84 | *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES); |
8a7f97b9 MR |
85 | if (!*mask) |
86 | panic("%s: Failed to allocate %u bytes\n", __func__, | |
87 | cpumask_size()); | |
2d3854a3 RR |
88 | } |
89 | ||
ec26b805 MT |
90 | /** |
91 | * free_cpumask_var - frees memory allocated for a struct cpumask. | |
92 | * @mask: cpumask to free | |
93 | * | |
94 | * This is safe on a NULL mask. | |
95 | */ | |
2d3854a3 RR |
96 | void free_cpumask_var(cpumask_var_t mask) |
97 | { | |
98 | kfree(mask); | |
99 | } | |
100 | EXPORT_SYMBOL(free_cpumask_var); | |
cd83e42c | 101 | |
ec26b805 MT |
102 | /** |
103 | * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var | |
104 | * @mask: cpumask to free | |
105 | */ | |
984f2f37 | 106 | void __init free_bootmem_cpumask_var(cpumask_var_t mask) |
cd83e42c | 107 | { |
4421cca0 | 108 | memblock_free(mask, cpumask_size()); |
cd83e42c | 109 | } |
2d3854a3 | 110 | #endif |
da91309e | 111 | |
b81dce77 | 112 | #if NR_CPUS > 1 |
da91309e | 113 | /** |
f36963c9 | 114 | * cpumask_local_spread - select the i'th cpu with local numa cpu's first |
da91309e | 115 | * @i: index number |
f36963c9 | 116 | * @node: local numa_node |
da91309e | 117 | * |
f36963c9 RR |
118 | * This function selects an online CPU according to a numa aware policy; |
119 | * local cpus are returned first, followed by non-local ones, then it | |
120 | * wraps around. | |
da91309e | 121 | * |
f36963c9 | 122 | * It's not very efficient, but useful for setup. |
da91309e | 123 | */ |
f36963c9 | 124 | unsigned int cpumask_local_spread(unsigned int i, int node) |
da91309e | 125 | { |
8b6b795d | 126 | unsigned int cpu; |
da91309e | 127 | |
f36963c9 | 128 | /* Wrap: we always want a cpu. */ |
2452483d | 129 | i %= num_online_cpus(); |
da91309e | 130 | |
98fa15f3 | 131 | if (node == NUMA_NO_NODE) { |
2452483d | 132 | for_each_cpu(cpu, cpu_online_mask) |
f36963c9 RR |
133 | if (i-- == 0) |
134 | return cpu; | |
da91309e | 135 | } else { |
f36963c9 | 136 | /* NUMA first. */ |
2452483d | 137 | for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) |
f36963c9 RR |
138 | if (i-- == 0) |
139 | return cpu; | |
140 | ||
2452483d | 141 | for_each_cpu(cpu, cpu_online_mask) { |
f36963c9 RR |
142 | /* Skip NUMA nodes, done above. */ |
143 | if (cpumask_test_cpu(cpu, cpumask_of_node(node))) | |
144 | continue; | |
145 | ||
146 | if (i-- == 0) | |
147 | return cpu; | |
da91309e AV |
148 | } |
149 | } | |
f36963c9 | 150 | BUG(); |
da91309e | 151 | } |
f36963c9 | 152 | EXPORT_SYMBOL(cpumask_local_spread); |
46a87b38 PT |
153 | |
154 | static DEFINE_PER_CPU(int, distribute_cpu_mask_prev); | |
155 | ||
156 | /** | |
157 | * Returns an arbitrary cpu within srcp1 & srcp2. | |
158 | * | |
159 | * Iterated calls using the same srcp1 and srcp2 will be distributed within | |
160 | * their intersection. | |
161 | * | |
162 | * Returns >= nr_cpu_ids if the intersection is empty. | |
163 | */ | |
8b6b795d | 164 | unsigned int cpumask_any_and_distribute(const struct cpumask *src1p, |
46a87b38 PT |
165 | const struct cpumask *src2p) |
166 | { | |
8b6b795d | 167 | unsigned int next, prev; |
46a87b38 PT |
168 | |
169 | /* NOTE: our first selection will skip 0. */ | |
170 | prev = __this_cpu_read(distribute_cpu_mask_prev); | |
171 | ||
172 | next = cpumask_next_and(prev, src1p, src2p); | |
173 | if (next >= nr_cpu_ids) | |
174 | next = cpumask_first_and(src1p, src2p); | |
175 | ||
176 | if (next < nr_cpu_ids) | |
177 | __this_cpu_write(distribute_cpu_mask_prev, next); | |
178 | ||
179 | return next; | |
180 | } | |
181 | EXPORT_SYMBOL(cpumask_any_and_distribute); | |
14e292f8 | 182 | |
8b6b795d | 183 | unsigned int cpumask_any_distribute(const struct cpumask *srcp) |
14e292f8 | 184 | { |
8b6b795d | 185 | unsigned int next, prev; |
14e292f8 PZ |
186 | |
187 | /* NOTE: our first selection will skip 0. */ | |
188 | prev = __this_cpu_read(distribute_cpu_mask_prev); | |
189 | ||
190 | next = cpumask_next(prev, srcp); | |
191 | if (next >= nr_cpu_ids) | |
192 | next = cpumask_first(srcp); | |
193 | ||
194 | if (next < nr_cpu_ids) | |
195 | __this_cpu_write(distribute_cpu_mask_prev, next); | |
196 | ||
197 | return next; | |
198 | } | |
199 | EXPORT_SYMBOL(cpumask_any_distribute); | |
b81dce77 | 200 | #endif /* NR_CPUS */ |