Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * lib/kernel_lock.c | |
3 | * | |
4 | * This is the traditional BKL - big kernel lock. Largely | |
5895df96 | 5 | * relegated to obsolescence, but used by various less |
1da177e4 LT |
6 | * important (or lazy) subsystems. |
7 | */ | |
1da177e4 LT |
8 | #include <linux/module.h> |
9 | #include <linux/kallsyms.h> | |
6188e10d | 10 | #include <linux/semaphore.h> |
96a2c464 | 11 | #include <linux/smp_lock.h> |
1da177e4 | 12 | |
925936eb FW |
13 | #define CREATE_TRACE_POINTS |
14 | #include <trace/events/bkl.h> | |
15 | ||
1da177e4 | 16 | /* |
8e3e076c | 17 | * The 'big kernel lock' |
1da177e4 | 18 | * |
8e3e076c | 19 | * This spinlock is taken and released recursively by lock_kernel() |
d6e05edc | 20 | * and unlock_kernel(). It is transparently dropped and reacquired |
1da177e4 LT |
21 | * over schedule(). It is used to protect legacy code that hasn't |
22 | * been migrated to a proper locking design yet. | |
23 | * | |
1da177e4 LT |
24 | * Don't use in new code. |
25 | */ | |
8e3e076c LT |
26 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); |
27 | ||
1da177e4 LT |
28 | |
29 | /* | |
8e3e076c | 30 | * Acquire/release the underlying lock from the scheduler. |
1da177e4 | 31 | * |
8e3e076c LT |
32 | * This is called with preemption disabled, and should |
33 | * return an error value if it cannot get the lock and | |
34 | * TIF_NEED_RESCHED gets set. | |
1da177e4 | 35 | * |
8e3e076c LT |
36 | * If it successfully gets the lock, it should increment |
37 | * the preemption count like any spinlock does. | |
38 | * | |
39 | * (This works on UP too - _raw_spin_trylock will never | |
40 | * return false in that case) | |
1da177e4 LT |
41 | */ |
42 | int __lockfunc __reacquire_kernel_lock(void) | |
43 | { | |
8e3e076c | 44 | while (!_raw_spin_trylock(&kernel_flag)) { |
5ed0cec0 | 45 | if (need_resched()) |
8e3e076c LT |
46 | return -EAGAIN; |
47 | cpu_relax(); | |
48 | } | |
1da177e4 | 49 | preempt_disable(); |
1da177e4 LT |
50 | return 0; |
51 | } | |
52 | ||
53 | void __lockfunc __release_kernel_lock(void) | |
54 | { | |
8e3e076c LT |
55 | _raw_spin_unlock(&kernel_flag); |
56 | preempt_enable_no_resched(); | |
1da177e4 LT |
57 | } |
58 | ||
59 | /* | |
8e3e076c LT |
60 | * These are the BKL spinlocks - we try to be polite about preemption. |
61 | * If SMP is not on (ie UP preemption), this all goes away because the | |
62 | * _raw_spin_trylock() will always succeed. | |
1da177e4 | 63 | */ |
8e3e076c LT |
64 | #ifdef CONFIG_PREEMPT |
65 | static inline void __lock_kernel(void) | |
1da177e4 | 66 | { |
8e3e076c LT |
67 | preempt_disable(); |
68 | if (unlikely(!_raw_spin_trylock(&kernel_flag))) { | |
69 | /* | |
70 | * If preemption was disabled even before this | |
71 | * was called, there's nothing we can be polite | |
72 | * about - just spin. | |
73 | */ | |
74 | if (preempt_count() > 1) { | |
75 | _raw_spin_lock(&kernel_flag); | |
76 | return; | |
77 | } | |
1da177e4 | 78 | |
1da177e4 | 79 | /* |
8e3e076c LT |
80 | * Otherwise, let's wait for the kernel lock |
81 | * with preemption enabled.. | |
1da177e4 | 82 | */ |
8e3e076c LT |
83 | do { |
84 | preempt_enable(); | |
85 | while (spin_is_locked(&kernel_flag)) | |
86 | cpu_relax(); | |
87 | preempt_disable(); | |
88 | } while (!_raw_spin_trylock(&kernel_flag)); | |
89 | } | |
90 | } | |
1da177e4 | 91 | |
8e3e076c LT |
92 | #else |
93 | ||
94 | /* | |
95 | * Non-preemption case - just get the spinlock | |
96 | */ | |
97 | static inline void __lock_kernel(void) | |
98 | { | |
99 | _raw_spin_lock(&kernel_flag); | |
1da177e4 | 100 | } |
8e3e076c | 101 | #endif |
1da177e4 | 102 | |
8e3e076c | 103 | static inline void __unlock_kernel(void) |
1da177e4 | 104 | { |
8e3e076c LT |
105 | /* |
106 | * the BKL is not covered by lockdep, so we open-code the | |
107 | * unlocking sequence (and thus avoid the dep-chain ops): | |
108 | */ | |
109 | _raw_spin_unlock(&kernel_flag); | |
110 | preempt_enable(); | |
111 | } | |
1da177e4 | 112 | |
8e3e076c LT |
113 | /* |
114 | * Getting the big kernel lock. | |
115 | * | |
116 | * This cannot happen asynchronously, so we only need to | |
117 | * worry about other CPU's. | |
118 | */ | |
925936eb | 119 | void __lockfunc _lock_kernel(const char *func, const char *file, int line) |
8e3e076c | 120 | { |
925936eb FW |
121 | int depth = current->lock_depth + 1; |
122 | ||
123 | trace_lock_kernel(func, file, line); | |
124 | ||
8e3e076c LT |
125 | if (likely(!depth)) |
126 | __lock_kernel(); | |
127 | current->lock_depth = depth; | |
128 | } | |
1da177e4 | 129 | |
925936eb | 130 | void __lockfunc _unlock_kernel(const char *func, const char *file, int line) |
8e3e076c LT |
131 | { |
132 | BUG_ON(current->lock_depth < 0); | |
133 | if (likely(--current->lock_depth < 0)) | |
134 | __unlock_kernel(); | |
925936eb FW |
135 | |
136 | trace_unlock_kernel(func, file, line); | |
1da177e4 LT |
137 | } |
138 | ||
96a2c464 FW |
139 | EXPORT_SYMBOL(_lock_kernel); |
140 | EXPORT_SYMBOL(_unlock_kernel); | |
1da177e4 | 141 |