Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef __LINUX_PREEMPT_H |
3 | #define __LINUX_PREEMPT_H | |
4 | ||
5 | /* | |
6 | * include/linux/preempt.h - macros for accessing and manipulating | |
7 | * preempt_count (used for kernel preemption, interrupt count, etc.) | |
8 | */ | |
9 | ||
1da177e4 | 10 | #include <linux/linkage.h> |
54da6a09 | 11 | #include <linux/cleanup.h> |
2b010a69 | 12 | #include <linux/types.h> |
1da177e4 | 13 | |
92cf2118 FW |
14 | /* |
15 | * We put the hardirq and softirq counter into the preemption | |
16 | * counter. The bitmask has the following meaning: | |
17 | * | |
18 | * - bits 0-7 are the preemption count (max preemption depth: 256) | |
19 | * - bits 8-15 are the softirq count (max # of softirqs: 256) | |
20 | * | |
21 | * The hardirq count could in theory be the same as the number of | |
22 | * interrupts in the system, but we run all interrupt handlers with | |
23 | * interrupts disabled, so we cannot have nesting interrupts. Though | |
24 | * there are a few palaeontologic drivers which reenable interrupts in | |
25 | * the handler, so we need more than one bit here. | |
26 | * | |
2e10e71c FW |
27 | * PREEMPT_MASK: 0x000000ff |
28 | * SOFTIRQ_MASK: 0x0000ff00 | |
29 | * HARDIRQ_MASK: 0x000f0000 | |
69ea03b5 | 30 | * NMI_MASK: 0x00f00000 |
2e10e71c | 31 | * PREEMPT_NEED_RESCHED: 0x80000000 |
92cf2118 FW |
32 | */ |
33 | #define PREEMPT_BITS 8 | |
34 | #define SOFTIRQ_BITS 8 | |
35 | #define HARDIRQ_BITS 4 | |
69ea03b5 | 36 | #define NMI_BITS 4 |
92cf2118 FW |
37 | |
38 | #define PREEMPT_SHIFT 0 | |
39 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) | |
40 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) | |
41 | #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) | |
42 | ||
43 | #define __IRQ_MASK(x) ((1UL << (x))-1) | |
44 | ||
45 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) | |
46 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | |
47 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) | |
48 | #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) | |
49 | ||
50 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) | |
51 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) | |
52 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) | |
53 | #define NMI_OFFSET (1UL << NMI_SHIFT) | |
54 | ||
55 | #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) | |
56 | ||
d04b0ad3 IM |
57 | #define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) |
58 | ||
59 | /* | |
60 | * Disable preemption until the scheduler is running -- use an unconditional | |
61 | * value so that it also works on !PREEMPT_COUNT kernels. | |
62 | * | |
63 | * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). | |
64 | */ | |
65 | #define INIT_PREEMPT_COUNT PREEMPT_OFFSET | |
66 | ||
67 | /* | |
68 | * Initial preempt_count value; reflects the preempt_count schedule invariant | |
69 | * which states that during context switches: | |
70 | * | |
71 | * preempt_count() == 2*PREEMPT_DISABLE_OFFSET | |
72 | * | |
73 | * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. | |
74 | * Note: See finish_task_switch(). | |
75 | */ | |
76 | #define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) | |
77 | ||
2e10e71c FW |
78 | /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ |
79 | #include <asm/preempt.h> | |
80 | ||
91ebe8bc SRV |
81 | /** |
82 | * interrupt_context_level - return interrupt context level | |
83 | * | |
84 | * Returns the current interrupt context level. | |
85 | * 0 - normal context | |
86 | * 1 - softirq context | |
87 | * 2 - hardirq context | |
88 | * 3 - NMI context | |
89 | */ | |
90 | static __always_inline unsigned char interrupt_context_level(void) | |
91 | { | |
92 | unsigned long pc = preempt_count(); | |
93 | unsigned char level = 0; | |
94 | ||
95 | level += !!(pc & (NMI_MASK)); | |
96 | level += !!(pc & (NMI_MASK | HARDIRQ_MASK)); | |
97 | level += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)); | |
98 | ||
99 | return level; | |
100 | } | |
101 | ||
87c3a589 FT |
102 | /* |
103 | * These macro definitions avoid redundant invocations of preempt_count() | |
104 | * because such invocations would result in redundant loads given that | |
105 | * preempt_count() is commonly implemented with READ_ONCE(). | |
106 | */ | |
107 | ||
15115830 | 108 | #define nmi_count() (preempt_count() & NMI_MASK) |
92cf2118 | 109 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
728b478d TG |
110 | #ifdef CONFIG_PREEMPT_RT |
111 | # define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK) | |
87c3a589 | 112 | # define irq_count() ((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | softirq_count()) |
728b478d TG |
113 | #else |
114 | # define softirq_count() (preempt_count() & SOFTIRQ_MASK) | |
87c3a589 | 115 | # define irq_count() (preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK)) |
728b478d | 116 | #endif |
92cf2118 FW |
117 | |
118 | /* | |
15115830 | 119 | * Macros to retrieve the current execution context: |
7c478895 | 120 | * |
15115830 TG |
121 | * in_nmi() - We're in NMI context |
122 | * in_hardirq() - We're in hard IRQ context | |
123 | * in_serving_softirq() - We're in softirq context | |
124 | * in_task() - We're in task context | |
125 | */ | |
126 | #define in_nmi() (nmi_count()) | |
127 | #define in_hardirq() (hardirq_count()) | |
128 | #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) | |
87c3a589 FT |
129 | #ifdef CONFIG_PREEMPT_RT |
130 | # define in_task() (!((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | in_serving_softirq())) | |
131 | #else | |
132 | # define in_task() (!(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) | |
133 | #endif | |
15115830 TG |
134 | |
135 | /* | |
136 | * The following macros are deprecated and should not be used in new code: | |
137 | * in_irq() - Obsolete version of in_hardirq() | |
7c478895 PZ |
138 | * in_softirq() - We have BH disabled, or are processing softirqs |
139 | * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled | |
92cf2118 FW |
140 | */ |
141 | #define in_irq() (hardirq_count()) | |
142 | #define in_softirq() (softirq_count()) | |
143 | #define in_interrupt() (irq_count()) | |
92cf2118 | 144 | |
fe32d3cd KK |
145 | /* |
146 | * The preempt_count offset after preempt_disable(); | |
147 | */ | |
92cf2118 | 148 | #if defined(CONFIG_PREEMPT_COUNT) |
fe32d3cd | 149 | # define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET |
92cf2118 | 150 | #else |
fe32d3cd | 151 | # define PREEMPT_DISABLE_OFFSET 0 |
92cf2118 FW |
152 | #endif |
153 | ||
fe32d3cd KK |
154 | /* |
155 | * The preempt_count offset after spin_lock() | |
156 | */ | |
015680aa | 157 | #if !defined(CONFIG_PREEMPT_RT) |
3e9cc688 | 158 | #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET |
015680aa | 159 | #else |
3e9cc688 TG |
160 | /* Locks on RT do not disable preemption */ |
161 | #define PREEMPT_LOCK_OFFSET 0 | |
015680aa | 162 | #endif |
fe32d3cd | 163 | |
92cf2118 FW |
164 | /* |
165 | * The preempt_count offset needed for things like: | |
166 | * | |
167 | * spin_lock_bh() | |
168 | * | |
169 | * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and | |
170 | * softirqs, such that unlock sequences of: | |
171 | * | |
172 | * spin_unlock(); | |
173 | * local_bh_enable(); | |
174 | * | |
175 | * Work as expected. | |
176 | */ | |
fe32d3cd | 177 | #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET) |
92cf2118 FW |
178 | |
179 | /* | |
180 | * Are we running in atomic context? WARNING: this macro cannot | |
181 | * always detect atomic context; in particular, it cannot know about | |
182 | * held spinlocks in non-preemptible kernels. Thus it should not be | |
183 | * used in the general case to determine whether sleeping is possible. | |
184 | * Do not use in_atomic() in driver code. | |
185 | */ | |
3e51f3c4 | 186 | #define in_atomic() (preempt_count() != 0) |
92cf2118 FW |
187 | |
188 | /* | |
189 | * Check whether we were atomic before we did preempt_disable(): | |
e017cf21 | 190 | * (used by the scheduler) |
92cf2118 | 191 | */ |
da7142e2 | 192 | #define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET) |
92cf2118 | 193 | |
c3bc8fd6 | 194 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE) |
bdb43806 PZ |
195 | extern void preempt_count_add(int val); |
196 | extern void preempt_count_sub(int val); | |
fe32d3cd KK |
197 | #define preempt_count_dec_and_test() \ |
198 | ({ preempt_count_sub(1); should_resched(0); }) | |
1da177e4 | 199 | #else |
bdb43806 PZ |
200 | #define preempt_count_add(val) __preempt_count_add(val) |
201 | #define preempt_count_sub(val) __preempt_count_sub(val) | |
202 | #define preempt_count_dec_and_test() __preempt_count_dec_and_test() | |
1da177e4 LT |
203 | #endif |
204 | ||
bdb43806 PZ |
205 | #define __preempt_count_inc() __preempt_count_add(1) |
206 | #define __preempt_count_dec() __preempt_count_sub(1) | |
bdd4e85d | 207 | |
bdb43806 PZ |
208 | #define preempt_count_inc() preempt_count_add(1) |
209 | #define preempt_count_dec() preempt_count_sub(1) | |
bdd4e85d FW |
210 | |
211 | #ifdef CONFIG_PREEMPT_COUNT | |
212 | ||
1da177e4 LT |
213 | #define preempt_disable() \ |
214 | do { \ | |
bdb43806 | 215 | preempt_count_inc(); \ |
1da177e4 LT |
216 | barrier(); \ |
217 | } while (0) | |
218 | ||
ba74c144 | 219 | #define sched_preempt_enable_no_resched() \ |
1da177e4 LT |
220 | do { \ |
221 | barrier(); \ | |
bdb43806 | 222 | preempt_count_dec(); \ |
1da177e4 LT |
223 | } while (0) |
224 | ||
bdb43806 | 225 | #define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
ba74c144 | 226 | |
2e10e71c FW |
227 | #define preemptible() (preempt_count() == 0 && !irqs_disabled()) |
228 | ||
c1a280b6 | 229 | #ifdef CONFIG_PREEMPTION |
1da177e4 LT |
230 | #define preempt_enable() \ |
231 | do { \ | |
bdb43806 PZ |
232 | barrier(); \ |
233 | if (unlikely(preempt_count_dec_and_test())) \ | |
1a338ac3 | 234 | __preempt_schedule(); \ |
1da177e4 LT |
235 | } while (0) |
236 | ||
9a92e3dc FW |
237 | #define preempt_enable_notrace() \ |
238 | do { \ | |
239 | barrier(); \ | |
240 | if (unlikely(__preempt_count_dec_and_test())) \ | |
241 | __preempt_schedule_notrace(); \ | |
242 | } while (0) | |
243 | ||
bdb43806 PZ |
244 | #define preempt_check_resched() \ |
245 | do { \ | |
fe32d3cd | 246 | if (should_resched(0)) \ |
1a338ac3 | 247 | __preempt_schedule(); \ |
bdb43806 PZ |
248 | } while (0) |
249 | ||
c1a280b6 | 250 | #else /* !CONFIG_PREEMPTION */ |
62b94a08 PZ |
251 | #define preempt_enable() \ |
252 | do { \ | |
253 | barrier(); \ | |
254 | preempt_count_dec(); \ | |
255 | } while (0) | |
50282528 | 256 | |
9a92e3dc | 257 | #define preempt_enable_notrace() \ |
50282528 SR |
258 | do { \ |
259 | barrier(); \ | |
bdb43806 | 260 | __preempt_count_dec(); \ |
50282528 SR |
261 | } while (0) |
262 | ||
9a92e3dc | 263 | #define preempt_check_resched() do { } while (0) |
c1a280b6 | 264 | #endif /* CONFIG_PREEMPTION */ |
bdb43806 | 265 | |
9a92e3dc | 266 | #define preempt_disable_notrace() \ |
50282528 | 267 | do { \ |
9a92e3dc | 268 | __preempt_count_inc(); \ |
bdb43806 | 269 | barrier(); \ |
50282528 | 270 | } while (0) |
9a92e3dc FW |
271 | |
272 | #define preempt_enable_no_resched_notrace() \ | |
62b94a08 PZ |
273 | do { \ |
274 | barrier(); \ | |
275 | __preempt_count_dec(); \ | |
276 | } while (0) | |
50282528 | 277 | |
bdd4e85d | 278 | #else /* !CONFIG_PREEMPT_COUNT */ |
1da177e4 | 279 | |
386afc91 LT |
280 | /* |
281 | * Even if we don't have any preemption, we need preempt disable/enable | |
282 | * to be barriers, so that we don't have things like get_user/put_user | |
283 | * that can cause faults and scheduling migrate into our preempt-protected | |
284 | * region. | |
285 | */ | |
bdb43806 | 286 | #define preempt_disable() barrier() |
386afc91 | 287 | #define sched_preempt_enable_no_resched() barrier() |
bdb43806 PZ |
288 | #define preempt_enable_no_resched() barrier() |
289 | #define preempt_enable() barrier() | |
290 | #define preempt_check_resched() do { } while (0) | |
386afc91 LT |
291 | |
292 | #define preempt_disable_notrace() barrier() | |
293 | #define preempt_enable_no_resched_notrace() barrier() | |
294 | #define preempt_enable_notrace() barrier() | |
2e10e71c | 295 | #define preemptible() 0 |
50282528 | 296 | |
bdd4e85d | 297 | #endif /* CONFIG_PREEMPT_COUNT */ |
1da177e4 | 298 | |
62b94a08 PZ |
299 | #ifdef MODULE |
300 | /* | |
301 | * Modules have no business playing preemption tricks. | |
302 | */ | |
303 | #undef sched_preempt_enable_no_resched | |
304 | #undef preempt_enable_no_resched | |
305 | #undef preempt_enable_no_resched_notrace | |
306 | #undef preempt_check_resched | |
307 | #endif | |
308 | ||
8cb75e0c PZ |
309 | #define preempt_set_need_resched() \ |
310 | do { \ | |
311 | set_preempt_need_resched(); \ | |
312 | } while (0) | |
313 | #define preempt_fold_need_resched() \ | |
314 | do { \ | |
315 | if (tif_need_resched()) \ | |
316 | set_preempt_need_resched(); \ | |
317 | } while (0) | |
8cb75e0c | 318 | |
e107be36 AK |
319 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
320 | ||
321 | struct preempt_notifier; | |
322 | ||
323 | /** | |
324 | * preempt_ops - notifiers called when a task is preempted and rescheduled | |
325 | * @sched_in: we're about to be rescheduled: | |
326 | * notifier: struct preempt_notifier for the task being scheduled | |
327 | * cpu: cpu we're scheduled on | |
328 | * @sched_out: we've just been preempted | |
329 | * notifier: struct preempt_notifier for the task being preempted | |
330 | * next: the task that's kicking us out | |
8592e648 TH |
331 | * |
332 | * Please note that sched_in and out are called under different | |
333 | * contexts. sched_out is called with rq lock held and irq disabled | |
334 | * while sched_in is called without rq lock and irq enabled. This | |
335 | * difference is intentional and depended upon by its users. | |
e107be36 AK |
336 | */ |
337 | struct preempt_ops { | |
338 | void (*sched_in)(struct preempt_notifier *notifier, int cpu); | |
339 | void (*sched_out)(struct preempt_notifier *notifier, | |
340 | struct task_struct *next); | |
341 | }; | |
342 | ||
343 | /** | |
344 | * preempt_notifier - key for installing preemption notifiers | |
345 | * @link: internal use | |
346 | * @ops: defines the notifier functions to be called | |
347 | * | |
348 | * Usually used in conjunction with container_of(). | |
349 | */ | |
350 | struct preempt_notifier { | |
351 | struct hlist_node link; | |
352 | struct preempt_ops *ops; | |
353 | }; | |
354 | ||
2ecd9d29 PZ |
355 | void preempt_notifier_inc(void); |
356 | void preempt_notifier_dec(void); | |
e107be36 AK |
357 | void preempt_notifier_register(struct preempt_notifier *notifier); |
358 | void preempt_notifier_unregister(struct preempt_notifier *notifier); | |
359 | ||
360 | static inline void preempt_notifier_init(struct preempt_notifier *notifier, | |
361 | struct preempt_ops *ops) | |
362 | { | |
2b010a69 KO |
363 | /* INIT_HLIST_NODE() open coded, to avoid dependency on list.h */ |
364 | notifier->link.next = NULL; | |
365 | notifier->link.pprev = NULL; | |
e107be36 AK |
366 | notifier->ops = ops; |
367 | } | |
368 | ||
369 | #endif | |
370 | ||
74d862b6 | 371 | #ifdef CONFIG_SMP |
af449901 PZ |
372 | |
373 | /* | |
a7c81556 | 374 | * Migrate-Disable and why it is undesired. |
af449901 | 375 | * |
a7c81556 PZ |
376 | * When a preempted task becomes elegible to run under the ideal model (IOW it |
377 | * becomes one of the M highest priority tasks), it might still have to wait | |
378 | * for the preemptee's migrate_disable() section to complete. Thereby suffering | |
379 | * a reduction in bandwidth in the exact duration of the migrate_disable() | |
380 | * section. | |
af449901 | 381 | * |
a7c81556 PZ |
382 | * Per this argument, the change from preempt_disable() to migrate_disable() |
383 | * gets us: | |
af449901 | 384 | * |
a7c81556 PZ |
385 | * - a higher priority tasks gains reduced wake-up latency; with preempt_disable() |
386 | * it would have had to wait for the lower priority task. | |
387 | * | |
388 | * - a lower priority tasks; which under preempt_disable() could've instantly | |
389 | * migrated away when another CPU becomes available, is now constrained | |
390 | * by the ability to push the higher priority task away, which might itself be | |
391 | * in a migrate_disable() section, reducing it's available bandwidth. | |
392 | * | |
393 | * IOW it trades latency / moves the interference term, but it stays in the | |
394 | * system, and as long as it remains unbounded, the system is not fully | |
395 | * deterministic. | |
af449901 PZ |
396 | * |
397 | * | |
398 | * The reason we have it anyway. | |
399 | * | |
400 | * PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a | |
401 | * number of primitives into becoming preemptible, they would also allow | |
402 | * migration. This turns out to break a bunch of per-cpu usage. To this end, | |
403 | * all these primitives employ migirate_disable() to restore this implicit | |
404 | * assumption. | |
405 | * | |
406 | * This is a 'temporary' work-around at best. The correct solution is getting | |
407 | * rid of the above assumptions and reworking the code to employ explicit | |
408 | * per-cpu locking or short preempt-disable regions. | |
409 | * | |
410 | * The end goal must be to get rid of migrate_disable(), alternatively we need | |
411 | * a schedulability theory that does not depend on abritrary migration. | |
412 | * | |
413 | * | |
414 | * Notes on the implementation. | |
415 | * | |
416 | * The implementation is particularly tricky since existing code patterns | |
417 | * dictate neither migrate_disable() nor migrate_enable() is allowed to block. | |
418 | * This means that it cannot use cpus_read_lock() to serialize against hotplug, | |
419 | * nor can it easily migrate itself into a pending affinity mask change on | |
420 | * migrate_enable(). | |
421 | * | |
422 | * | |
423 | * Note: even non-work-conserving schedulers like semi-partitioned depends on | |
424 | * migration, so migrate_disable() is not only a problem for | |
425 | * work-conserving schedulers. | |
426 | * | |
427 | */ | |
428 | extern void migrate_disable(void); | |
429 | extern void migrate_enable(void); | |
430 | ||
74d862b6 | 431 | #else |
af449901 PZ |
432 | |
433 | static inline void migrate_disable(void) { } | |
434 | static inline void migrate_enable(void) { } | |
435 | ||
74d862b6 | 436 | #endif /* CONFIG_SMP */ |
af449901 | 437 | |
555bb4cc TG |
438 | /** |
439 | * preempt_disable_nested - Disable preemption inside a normally preempt disabled section | |
440 | * | |
441 | * Use for code which requires preemption protection inside a critical | |
442 | * section which has preemption disabled implicitly on non-PREEMPT_RT | |
443 | * enabled kernels, by e.g.: | |
444 | * - holding a spinlock/rwlock | |
445 | * - soft interrupt context | |
446 | * - regular interrupt handlers | |
447 | * | |
448 | * On PREEMPT_RT enabled kernels spinlock/rwlock held sections, soft | |
449 | * interrupt context and regular interrupt handlers are preemptible and | |
450 | * only prevent migration. preempt_disable_nested() ensures that preemption | |
451 | * is disabled for cases which require CPU local serialization even on | |
452 | * PREEMPT_RT. For non-PREEMPT_RT kernels this is a NOP. | |
453 | * | |
454 | * The use cases are code sequences which are not serialized by a | |
455 | * particular lock instance, e.g.: | |
456 | * - seqcount write side critical sections where the seqcount is not | |
457 | * associated to a particular lock and therefore the automatic | |
458 | * protection mechanism does not work. This prevents a live lock | |
459 | * against a preempting high priority reader. | |
460 | * - RMW per CPU variable updates like vmstat. | |
461 | */ | |
462 | /* Macro to avoid header recursion hell vs. lockdep */ | |
463 | #define preempt_disable_nested() \ | |
464 | do { \ | |
465 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) \ | |
466 | preempt_disable(); \ | |
467 | else \ | |
468 | lockdep_assert_preemption_disabled(); \ | |
469 | } while (0) | |
470 | ||
471 | /** | |
472 | * preempt_enable_nested - Undo the effect of preempt_disable_nested() | |
473 | */ | |
474 | static __always_inline void preempt_enable_nested(void) | |
475 | { | |
476 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) | |
477 | preempt_enable(); | |
478 | } | |
479 | ||
54da6a09 PZ |
480 | DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable()) |
481 | DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace()) | |
482 | DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable()) | |
483 | ||
f0dc887f SC |
484 | #ifdef CONFIG_PREEMPT_DYNAMIC |
485 | ||
486 | extern bool preempt_model_none(void); | |
487 | extern bool preempt_model_voluntary(void); | |
488 | extern bool preempt_model_full(void); | |
489 | ||
490 | #else | |
491 | ||
492 | static inline bool preempt_model_none(void) | |
493 | { | |
494 | return IS_ENABLED(CONFIG_PREEMPT_NONE); | |
495 | } | |
496 | static inline bool preempt_model_voluntary(void) | |
497 | { | |
498 | return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY); | |
499 | } | |
500 | static inline bool preempt_model_full(void) | |
501 | { | |
502 | return IS_ENABLED(CONFIG_PREEMPT); | |
503 | } | |
504 | ||
505 | #endif | |
506 | ||
507 | static inline bool preempt_model_rt(void) | |
508 | { | |
509 | return IS_ENABLED(CONFIG_PREEMPT_RT); | |
510 | } | |
511 | ||
512 | /* | |
513 | * Does the preemption model allow non-cooperative preemption? | |
514 | * | |
515 | * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with | |
516 | * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the | |
517 | * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the | |
518 | * PREEMPT_NONE model. | |
519 | */ | |
520 | static inline bool preempt_model_preemptible(void) | |
521 | { | |
522 | return preempt_model_full() || preempt_model_rt(); | |
523 | } | |
524 | ||
1da177e4 | 525 | #endif /* __LINUX_PREEMPT_H */ |