Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef __LINUX_SMP_H |
3 | #define __LINUX_SMP_H | |
4 | ||
5 | /* | |
6 | * Generic SMP support | |
7 | * Alan Cox. <alan@redhat.com> | |
8 | */ | |
9 | ||
79974a0e | 10 | #include <linux/errno.h> |
54514a70 | 11 | #include <linux/types.h> |
3d442233 | 12 | #include <linux/list.h> |
3d442233 | 13 | #include <linux/cpumask.h> |
04948c7f | 14 | #include <linux/init.h> |
8c4890d1 | 15 | #include <linux/smp_types.h> |
1da177e4 | 16 | |
3a5f65df | 17 | typedef void (*smp_call_func_t)(void *info); |
5671d814 | 18 | typedef bool (*smp_cond_func_t)(int cpu, void *info); |
4b44a21d | 19 | |
4b44a21d PZ |
20 | /* |
21 | * structure shares (partial) layout with struct irq_work | |
22 | */ | |
966a9671 | 23 | struct __call_single_data { |
8c4890d1 PZ |
24 | union { |
25 | struct __call_single_node node; | |
26 | struct { | |
27 | struct llist_node llist; | |
28 | unsigned int flags; | |
29 | }; | |
30 | }; | |
3a5f65df | 31 | smp_call_func_t func; |
3d442233 | 32 | void *info; |
3d442233 JA |
33 | }; |
34 | ||
966a9671 YH |
35 | /* Use __aligned() to avoid to use 2 cache lines for 1 csd */ |
36 | typedef struct __call_single_data call_single_data_t | |
37 | __aligned(sizeof(struct __call_single_data)); | |
38 | ||
4b44a21d PZ |
39 | /* |
40 | * Enqueue a llist_node on the call_single_queue; be very careful, read | |
41 | * flush_smp_call_function_queue() in detail. | |
42 | */ | |
43 | extern void __smp_call_single_queue(int cpu, struct llist_node *node); | |
44 | ||
e057d7ae MT |
45 | /* total number of cpus in this system (may exceed NR_CPUS) */ |
46 | extern unsigned int total_cpus; | |
47 | ||
3a5f65df DH |
48 | int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, |
49 | int wait); | |
53ce3d95 | 50 | |
bff2dc42 DD |
51 | /* |
52 | * Call a function on all processors | |
53 | */ | |
caa75932 | 54 | void on_each_cpu(smp_call_func_t func, void *info, int wait); |
bff2dc42 | 55 | |
fa688207 DD |
56 | /* |
57 | * Call a function on processors specified by mask, which might include | |
58 | * the local one. | |
59 | */ | |
60 | void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, | |
61 | void *info, bool wait); | |
62 | ||
63 | /* | |
64 | * Call a function on each processor for which the supplied function | |
65 | * cond_func returns a positive value. This may include the local | |
66 | * processor. | |
67 | */ | |
5671d814 | 68 | void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func, |
cb923159 | 69 | void *info, bool wait); |
fa688207 | 70 | |
5671d814 | 71 | void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, |
cb923159 | 72 | void *info, bool wait, const struct cpumask *mask); |
7d49b28a | 73 | |
966a9671 | 74 | int smp_call_function_single_async(int cpu, call_single_data_t *csd); |
7cf64f86 | 75 | |
1da177e4 LT |
76 | #ifdef CONFIG_SMP |
77 | ||
78 | #include <linux/preempt.h> | |
79 | #include <linux/kernel.h> | |
80 | #include <linux/compiler.h> | |
81 | #include <linux/thread_info.h> | |
82 | #include <asm/smp.h> | |
1da177e4 LT |
83 | |
84 | /* | |
85 | * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. | |
86 | * (defined in asm header): | |
d1dedb52 | 87 | */ |
1da177e4 LT |
88 | |
89 | /* | |
90 | * stops all CPUs but the current one: | |
91 | */ | |
92 | extern void smp_send_stop(void); | |
93 | ||
94 | /* | |
95 | * sends a 'reschedule' event to another CPU: | |
96 | */ | |
97 | extern void smp_send_reschedule(int cpu); | |
98 | ||
99 | ||
100 | /* | |
101 | * Prepare machine for booting other CPUs. | |
102 | */ | |
103 | extern void smp_prepare_cpus(unsigned int max_cpus); | |
104 | ||
105 | /* | |
106 | * Bring a CPU up | |
107 | */ | |
8239c25f | 108 | extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle); |
1da177e4 LT |
109 | |
110 | /* | |
111 | * Final polishing of CPUs | |
112 | */ | |
113 | extern void smp_cpus_done(unsigned int max_cpus); | |
114 | ||
115 | /* | |
116 | * Call a function on all other processors | |
117 | */ | |
caa75932 | 118 | void smp_call_function(smp_call_func_t func, void *info, int wait); |
54b11e6d | 119 | void smp_call_function_many(const struct cpumask *mask, |
3a5f65df | 120 | smp_call_func_t func, void *info, bool wait); |
2d3854a3 | 121 | |
2ea6dec4 | 122 | int smp_call_function_any(const struct cpumask *mask, |
3a5f65df | 123 | smp_call_func_t func, void *info, int wait); |
2ea6dec4 | 124 | |
f37f435f | 125 | void kick_all_cpus_sync(void); |
c6f4459f | 126 | void wake_up_all_idle_cpus(void); |
f37f435f | 127 | |
3d442233 JA |
128 | /* |
129 | * Generic and arch helpers | |
130 | */ | |
d8ad7d11 | 131 | void __init call_function_init(void); |
3d442233 | 132 | void generic_smp_call_function_single_interrupt(void); |
9a46ad6d SL |
133 | #define generic_smp_call_function_interrupt \ |
134 | generic_smp_call_function_single_interrupt | |
a3bc0dbc | 135 | |
1da177e4 LT |
136 | /* |
137 | * Mark the boot cpu "online" so that it can call console drivers in | |
138 | * printk() and can access its per-cpu storage. | |
139 | */ | |
140 | void smp_prepare_boot_cpu(void); | |
141 | ||
ca74a6f8 | 142 | extern unsigned int setup_max_cpus; |
34db18a0 AW |
143 | extern void __init setup_nr_cpu_ids(void); |
144 | extern void __init smp_init(void); | |
ca74a6f8 | 145 | |
8ce371f9 PZ |
146 | extern int __boot_cpu_id; |
147 | ||
148 | static inline int get_boot_cpu_id(void) | |
149 | { | |
150 | return __boot_cpu_id; | |
151 | } | |
152 | ||
1da177e4 LT |
153 | #else /* !SMP */ |
154 | ||
d1dedb52 IM |
155 | static inline void smp_send_stop(void) { } |
156 | ||
1da177e4 LT |
157 | /* |
158 | * These macros fold the SMP functionality into a single CPU system | |
159 | */ | |
39c715b7 | 160 | #define raw_smp_processor_id() 0 |
caa75932 | 161 | static inline void up_smp_call_function(smp_call_func_t func, void *info) |
3c30b06d | 162 | { |
3c30b06d | 163 | } |
8691e5a8 | 164 | #define smp_call_function(func, info, wait) \ |
a5fbb6d1 | 165 | (up_smp_call_function(func, info)) |
3b8967d7 | 166 | |
79a88102 | 167 | static inline void smp_send_reschedule(int cpu) { } |
2ac6608c | 168 | #define smp_prepare_boot_cpu() do {} while (0) |
d2ff9118 RR |
169 | #define smp_call_function_many(mask, func, info, wait) \ |
170 | (up_smp_call_function(func, info)) | |
d8ad7d11 | 171 | static inline void call_function_init(void) { } |
2ea6dec4 RR |
172 | |
173 | static inline int | |
3a5f65df | 174 | smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, |
2ea6dec4 | 175 | void *info, int wait) |
3d442233 | 176 | { |
2ea6dec4 | 177 | return smp_call_function_single(0, func, info, wait); |
3d442233 | 178 | } |
2ea6dec4 | 179 | |
f37f435f | 180 | static inline void kick_all_cpus_sync(void) { } |
c6f4459f | 181 | static inline void wake_up_all_idle_cpus(void) { } |
f37f435f | 182 | |
30b8b006 TG |
183 | #ifdef CONFIG_UP_LATE_INIT |
184 | extern void __init up_late_init(void); | |
185 | static inline void smp_init(void) { up_late_init(); } | |
186 | #else | |
187 | static inline void smp_init(void) { } | |
188 | #endif | |
189 | ||
8ce371f9 PZ |
190 | static inline int get_boot_cpu_id(void) |
191 | { | |
192 | return 0; | |
193 | } | |
194 | ||
1da177e4 LT |
195 | #endif /* !SMP */ |
196 | ||
9ed7d75b PZ |
197 | /** |
198 | * raw_processor_id() - get the current (unstable) CPU id | |
199 | * | |
200 | * For then you know what you are doing and need an unstable | |
201 | * CPU id. | |
202 | */ | |
203 | ||
204 | /** | |
205 | * smp_processor_id() - get the current (stable) CPU id | |
206 | * | |
207 | * This is the normal accessor to the CPU id and should be used | |
208 | * whenever possible. | |
209 | * | |
210 | * The CPU id is stable when: | |
1da177e4 | 211 | * |
9ed7d75b PZ |
212 | * - IRQs are disabled; |
213 | * - preemption is disabled; | |
214 | * - the task is CPU affine. | |
1da177e4 | 215 | * |
9ed7d75b PZ |
216 | * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN |
217 | * when smp_processor_id() is used when the CPU id is not stable. | |
1da177e4 | 218 | */ |
9ed7d75b PZ |
219 | |
220 | /* | |
221 | * Allow the architecture to differentiate between a stable and unstable read. | |
222 | * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a | |
223 | * regular asm read for the stable. | |
224 | */ | |
225 | #ifndef __smp_processor_id | |
226 | #define __smp_processor_id(x) raw_smp_processor_id(x) | |
227 | #endif | |
228 | ||
39c715b7 IM |
229 | #ifdef CONFIG_DEBUG_PREEMPT |
230 | extern unsigned int debug_smp_processor_id(void); | |
231 | # define smp_processor_id() debug_smp_processor_id() | |
1da177e4 | 232 | #else |
9ed7d75b | 233 | # define smp_processor_id() __smp_processor_id() |
1da177e4 LT |
234 | #endif |
235 | ||
9ed7d75b | 236 | #define get_cpu() ({ preempt_disable(); __smp_processor_id(); }) |
1da177e4 | 237 | #define put_cpu() preempt_enable() |
1da177e4 | 238 | |
a146649b IM |
239 | /* |
240 | * Callback to arch code if there's nosmp or maxcpus=0 on the | |
241 | * boot command line: | |
242 | */ | |
243 | extern void arch_disable_smp_support(void); | |
244 | ||
56555855 QY |
245 | extern void arch_thaw_secondary_cpus_begin(void); |
246 | extern void arch_thaw_secondary_cpus_end(void); | |
fb37bb04 | 247 | |
033ab7f8 AM |
248 | void smp_setup_processor_id(void); |
249 | ||
df8ce9d7 JG |
250 | int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, |
251 | bool phys); | |
252 | ||
31487f83 RW |
253 | /* SMP core functions */ |
254 | int smpcfd_prepare_cpu(unsigned int cpu); | |
255 | int smpcfd_dead_cpu(unsigned int cpu); | |
256 | int smpcfd_dying_cpu(unsigned int cpu); | |
257 | ||
1da177e4 | 258 | #endif /* __LINUX_SMP_H */ |