Commit | Line | Data |
---|---|---|
a7878709 PZ |
1 | #ifndef __ASM_PREEMPT_H |
2 | #define __ASM_PREEMPT_H | |
3 | ||
4 | #include <linux/thread_info.h> | |
5 | ||
6 | /* | |
7 | * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users | |
8 | * that think a non-zero value indicates we cannot preempt. | |
9 | */ | |
10 | static __always_inline int preempt_count(void) | |
11 | { | |
12 | return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED; | |
13 | } | |
14 | ||
15 | static __always_inline int *preempt_count_ptr(void) | |
16 | { | |
17 | return ¤t_thread_info()->preempt_count; | |
18 | } | |
19 | ||
20 | /* | |
21 | * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the | |
22 | * alternative is loosing a reschedule. Better schedule too often -- also this | |
23 | * should be a very rare operation. | |
24 | */ | |
25 | static __always_inline void preempt_count_set(int pc) | |
26 | { | |
27 | *preempt_count_ptr() = pc; | |
28 | } | |
29 | ||
01028747 PZ |
30 | /* |
31 | * must be macros to avoid header recursion hell | |
32 | */ | |
33 | #define task_preempt_count(p) \ | |
34 | (task_thread_info(p)->preempt_count & ~PREEMPT_NEED_RESCHED) | |
35 | ||
36 | #define init_task_preempt_count(p) do { \ | |
37 | task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \ | |
38 | } while (0) | |
39 | ||
40 | #define init_idle_preempt_count(p, cpu) do { \ | |
41 | task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ | |
42 | } while (0) | |
43 | ||
a7878709 PZ |
44 | /* |
45 | * We fold the NEED_RESCHED bit into the preempt count such that | |
46 | * preempt_enable() can decrement and test for needing to reschedule with a | |
47 | * single instruction. | |
48 | * | |
49 | * We invert the actual bit, so that when the decrement hits 0 we know we both | |
50 | * need to resched (the bit is cleared) and can resched (no preempt count). | |
51 | */ | |
52 | ||
53 | static __always_inline void set_preempt_need_resched(void) | |
54 | { | |
55 | *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED; | |
56 | } | |
57 | ||
58 | static __always_inline void clear_preempt_need_resched(void) | |
59 | { | |
60 | *preempt_count_ptr() |= PREEMPT_NEED_RESCHED; | |
61 | } | |
62 | ||
63 | static __always_inline bool test_preempt_need_resched(void) | |
64 | { | |
65 | return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); | |
66 | } | |
67 | ||
bdb43806 PZ |
68 | /* |
69 | * The various preempt_count add/sub methods | |
70 | */ | |
71 | ||
72 | static __always_inline void __preempt_count_add(int val) | |
73 | { | |
74 | *preempt_count_ptr() += val; | |
75 | } | |
76 | ||
77 | static __always_inline void __preempt_count_sub(int val) | |
78 | { | |
79 | *preempt_count_ptr() -= val; | |
80 | } | |
81 | ||
82 | static __always_inline bool __preempt_count_dec_and_test(void) | |
83 | { | |
84 | return !--*preempt_count_ptr(); | |
85 | } | |
86 | ||
bdb43806 PZ |
87 | /* |
88 | * Returns true when we need to resched and can (barring IRQ state). | |
89 | */ | |
90 | static __always_inline bool should_resched(void) | |
91 | { | |
92 | return unlikely(!*preempt_count_ptr()); | |
93 | } | |
94 | ||
1a338ac3 PZ |
95 | #ifdef CONFIG_PREEMPT |
96 | extern asmlinkage void preempt_schedule(void); | |
97 | #define __preempt_schedule() preempt_schedule() | |
98 | ||
99 | #ifdef CONFIG_CONTEXT_TRACKING | |
100 | extern asmlinkage void preempt_schedule_context(void); | |
101 | #define __preempt_schedule_context() preempt_schedule_context() | |
102 | #endif | |
103 | #endif /* CONFIG_PREEMPT */ | |
104 | ||
a7878709 | 105 | #endif /* __ASM_PREEMPT_H */ |