Commit | Line | Data |
---|---|---|
a33fda35 WL |
1 | /* |
2 | * Queued spinlock | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. | |
15 | * (C) Copyright 2013-2014 Red Hat, Inc. | |
16 | * (C) Copyright 2015 Intel Corp. | |
64d816cb | 17 | * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP |
a33fda35 | 18 | * |
64d816cb | 19 | * Authors: Waiman Long <waiman.long@hpe.com> |
a33fda35 WL |
20 | * Peter Zijlstra <peterz@infradead.org> |
21 | */ | |
a23db284 WL |
22 | |
23 | #ifndef _GEN_PV_LOCK_SLOWPATH | |
24 | ||
a33fda35 WL |
25 | #include <linux/smp.h> |
26 | #include <linux/bug.h> | |
27 | #include <linux/cpumask.h> | |
28 | #include <linux/percpu.h> | |
29 | #include <linux/hardirq.h> | |
30 | #include <linux/mutex.h> | |
69f9cae9 | 31 | #include <asm/byteorder.h> |
a33fda35 WL |
32 | #include <asm/qspinlock.h> |
33 | ||
34 | /* | |
35 | * The basic principle of a queue-based spinlock can best be understood | |
36 | * by studying a classic queue-based spinlock implementation called the | |
37 | * MCS lock. The paper below provides a good description for this kind | |
38 | * of lock. | |
39 | * | |
40 | * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf | |
41 | * | |
42 | * This queued spinlock implementation is based on the MCS lock, however to make | |
43 | * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing | |
44 | * API, we must modify it somehow. | |
45 | * | |
46 | * In particular; where the traditional MCS lock consists of a tail pointer | |
47 | * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to | |
48 | * unlock the next pending (next->locked), we compress both these: {tail, | |
49 | * next->locked} into a single u32 value. | |
50 | * | |
51 | * Since a spinlock disables recursion of its own context and there is a limit | |
52 | * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there | |
53 | * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now | |
54 | * we can encode the tail by combining the 2-bit nesting level with the cpu | |
55 | * number. With one byte for the lock value and 3 bytes for the tail, only a | |
56 | * 32-bit word is now needed. Even though we only need 1 bit for the lock, | |
57 | * we extend it to a full byte to achieve better performance for architectures | |
58 | * that support atomic byte write. | |
59 | * | |
60 | * We also change the first spinner to spin on the lock bit instead of its | |
61 | * node; whereby avoiding the need to carry a node from lock to unlock, and | |
62 | * preserving existing lock API. This also makes the unlock code simpler and | |
63 | * faster. | |
69f9cae9 PZI |
64 | * |
65 | * N.B. The current implementation only supports architectures that allow | |
66 | * atomic operations on smaller 8-bit and 16-bit data types. | |
67 | * | |
a33fda35 WL |
68 | */ |
69 | ||
70 | #include "mcs_spinlock.h" | |
71 | ||
a23db284 WL |
72 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
73 | #define MAX_NODES 8 | |
74 | #else | |
75 | #define MAX_NODES 4 | |
76 | #endif | |
77 | ||
a33fda35 WL |
78 | /* |
79 | * Per-CPU queue node structures; we can never have more than 4 nested | |
80 | * contexts: task, softirq, hardirq, nmi. | |
81 | * | |
82 | * Exactly fits one 64-byte cacheline on a 64-bit architecture. | |
a23db284 WL |
83 | * |
84 | * PV doubles the storage and uses the second cacheline for PV state. | |
a33fda35 | 85 | */ |
a23db284 | 86 | static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]); |
a33fda35 WL |
87 | |
88 | /* | |
89 | * We must be able to distinguish between no-tail and the tail at 0:0, | |
90 | * therefore increment the cpu number by one. | |
91 | */ | |
92 | ||
93 | static inline u32 encode_tail(int cpu, int idx) | |
94 | { | |
95 | u32 tail; | |
96 | ||
97 | #ifdef CONFIG_DEBUG_SPINLOCK | |
98 | BUG_ON(idx > 3); | |
99 | #endif | |
100 | tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET; | |
101 | tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ | |
102 | ||
103 | return tail; | |
104 | } | |
105 | ||
106 | static inline struct mcs_spinlock *decode_tail(u32 tail) | |
107 | { | |
108 | int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1; | |
109 | int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; | |
110 | ||
111 | return per_cpu_ptr(&mcs_nodes[idx], cpu); | |
112 | } | |
113 | ||
c1fb159d PZI |
114 | #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK) |
115 | ||
69f9cae9 PZI |
116 | /* |
117 | * By using the whole 2nd least significant byte for the pending bit, we | |
118 | * can allow better optimization of the lock acquisition for the pending | |
119 | * bit holder. | |
2c83e8e9 WL |
120 | * |
121 | * This internal structure is also used by the set_locked function which | |
122 | * is not restricted to _Q_PENDING_BITS == 8. | |
69f9cae9 | 123 | */ |
69f9cae9 PZI |
124 | struct __qspinlock { |
125 | union { | |
126 | atomic_t val; | |
69f9cae9 | 127 | #ifdef __LITTLE_ENDIAN |
2c83e8e9 WL |
128 | struct { |
129 | u8 locked; | |
130 | u8 pending; | |
131 | }; | |
132 | struct { | |
69f9cae9 PZI |
133 | u16 locked_pending; |
134 | u16 tail; | |
2c83e8e9 | 135 | }; |
69f9cae9 | 136 | #else |
2c83e8e9 | 137 | struct { |
69f9cae9 PZI |
138 | u16 tail; |
139 | u16 locked_pending; | |
69f9cae9 | 140 | }; |
2c83e8e9 WL |
141 | struct { |
142 | u8 reserved[2]; | |
143 | u8 pending; | |
144 | u8 locked; | |
145 | }; | |
146 | #endif | |
69f9cae9 PZI |
147 | }; |
148 | }; | |
149 | ||
2c83e8e9 | 150 | #if _Q_PENDING_BITS == 8 |
69f9cae9 PZI |
151 | /** |
152 | * clear_pending_set_locked - take ownership and clear the pending bit. | |
153 | * @lock: Pointer to queued spinlock structure | |
154 | * | |
155 | * *,1,0 -> *,0,1 | |
156 | * | |
157 | * Lock stealing is not allowed if this function is used. | |
158 | */ | |
159 | static __always_inline void clear_pending_set_locked(struct qspinlock *lock) | |
160 | { | |
161 | struct __qspinlock *l = (void *)lock; | |
162 | ||
163 | WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL); | |
164 | } | |
165 | ||
166 | /* | |
167 | * xchg_tail - Put in the new queue tail code word & retrieve previous one | |
168 | * @lock : Pointer to queued spinlock structure | |
169 | * @tail : The new queue tail code word | |
170 | * Return: The previous queue tail code word | |
171 | * | |
172 | * xchg(lock, tail) | |
173 | * | |
174 | * p,*,* -> n,*,* ; prev = xchg(lock, node) | |
175 | */ | |
176 | static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) | |
177 | { | |
178 | struct __qspinlock *l = (void *)lock; | |
179 | ||
64d816cb WL |
180 | /* |
181 | * Use release semantics to make sure that the MCS node is properly | |
182 | * initialized before changing the tail code. | |
183 | */ | |
184 | return (u32)xchg_release(&l->tail, | |
185 | tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET; | |
69f9cae9 PZI |
186 | } |
187 | ||
188 | #else /* _Q_PENDING_BITS == 8 */ | |
189 | ||
6403bd7d WL |
190 | /** |
191 | * clear_pending_set_locked - take ownership and clear the pending bit. | |
192 | * @lock: Pointer to queued spinlock structure | |
193 | * | |
194 | * *,1,0 -> *,0,1 | |
195 | */ | |
196 | static __always_inline void clear_pending_set_locked(struct qspinlock *lock) | |
197 | { | |
198 | atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val); | |
199 | } | |
200 | ||
201 | /** | |
202 | * xchg_tail - Put in the new queue tail code word & retrieve previous one | |
203 | * @lock : Pointer to queued spinlock structure | |
204 | * @tail : The new queue tail code word | |
205 | * Return: The previous queue tail code word | |
206 | * | |
207 | * xchg(lock, tail) | |
208 | * | |
209 | * p,*,* -> n,*,* ; prev = xchg(lock, node) | |
210 | */ | |
211 | static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) | |
212 | { | |
213 | u32 old, new, val = atomic_read(&lock->val); | |
214 | ||
215 | for (;;) { | |
216 | new = (val & _Q_LOCKED_PENDING_MASK) | tail; | |
64d816cb WL |
217 | /* |
218 | * Use release semantics to make sure that the MCS node is | |
219 | * properly initialized before changing the tail code. | |
220 | */ | |
221 | old = atomic_cmpxchg_release(&lock->val, val, new); | |
6403bd7d WL |
222 | if (old == val) |
223 | break; | |
224 | ||
225 | val = old; | |
226 | } | |
227 | return old; | |
228 | } | |
69f9cae9 | 229 | #endif /* _Q_PENDING_BITS == 8 */ |
6403bd7d | 230 | |
2c83e8e9 WL |
231 | /** |
232 | * set_locked - Set the lock bit and own the lock | |
233 | * @lock: Pointer to queued spinlock structure | |
234 | * | |
235 | * *,*,0 -> *,0,1 | |
236 | */ | |
237 | static __always_inline void set_locked(struct qspinlock *lock) | |
238 | { | |
239 | struct __qspinlock *l = (void *)lock; | |
240 | ||
241 | WRITE_ONCE(l->locked, _Q_LOCKED_VAL); | |
242 | } | |
243 | ||
a23db284 WL |
244 | |
245 | /* | |
246 | * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for | |
247 | * all the PV callbacks. | |
248 | */ | |
249 | ||
250 | static __always_inline void __pv_init_node(struct mcs_spinlock *node) { } | |
251 | static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { } | |
75d22702 WL |
252 | static __always_inline void __pv_kick_node(struct qspinlock *lock, |
253 | struct mcs_spinlock *node) { } | |
a23db284 WL |
254 | static __always_inline void __pv_wait_head(struct qspinlock *lock, |
255 | struct mcs_spinlock *node) { } | |
256 | ||
257 | #define pv_enabled() false | |
258 | ||
259 | #define pv_init_node __pv_init_node | |
260 | #define pv_wait_node __pv_wait_node | |
261 | #define pv_kick_node __pv_kick_node | |
262 | #define pv_wait_head __pv_wait_head | |
263 | ||
264 | #ifdef CONFIG_PARAVIRT_SPINLOCKS | |
265 | #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath | |
266 | #endif | |
267 | ||
268 | #endif /* _GEN_PV_LOCK_SLOWPATH */ | |
269 | ||
a33fda35 WL |
270 | /** |
271 | * queued_spin_lock_slowpath - acquire the queued spinlock | |
272 | * @lock: Pointer to queued spinlock structure | |
273 | * @val: Current value of the queued spinlock 32-bit word | |
274 | * | |
c1fb159d | 275 | * (queue tail, pending bit, lock value) |
a33fda35 | 276 | * |
c1fb159d PZI |
277 | * fast : slow : unlock |
278 | * : : | |
279 | * uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0) | |
280 | * : | ^--------.------. / : | |
281 | * : v \ \ | : | |
282 | * pending : (0,1,1) +--> (0,1,0) \ | : | |
283 | * : | ^--' | | : | |
284 | * : v | | : | |
285 | * uncontended : (n,x,y) +--> (n,0,0) --' | : | |
286 | * queue : | ^--' | : | |
287 | * : v | : | |
288 | * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' : | |
289 | * queue : ^--' : | |
a33fda35 WL |
290 | */ |
291 | void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) | |
292 | { | |
293 | struct mcs_spinlock *prev, *next, *node; | |
294 | u32 new, old, tail; | |
295 | int idx; | |
296 | ||
297 | BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); | |
298 | ||
a23db284 WL |
299 | if (pv_enabled()) |
300 | goto queue; | |
301 | ||
43b3f028 | 302 | if (virt_spin_lock(lock)) |
2aa79af6 PZI |
303 | return; |
304 | ||
c1fb159d PZI |
305 | /* |
306 | * wait for in-progress pending->locked hand-overs | |
307 | * | |
308 | * 0,1,0 -> 0,0,1 | |
309 | */ | |
310 | if (val == _Q_PENDING_VAL) { | |
311 | while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL) | |
312 | cpu_relax(); | |
313 | } | |
314 | ||
315 | /* | |
316 | * trylock || pending | |
317 | * | |
318 | * 0,0,0 -> 0,0,1 ; trylock | |
319 | * 0,0,1 -> 0,1,1 ; pending | |
320 | */ | |
321 | for (;;) { | |
322 | /* | |
323 | * If we observe any contention; queue. | |
324 | */ | |
325 | if (val & ~_Q_LOCKED_MASK) | |
326 | goto queue; | |
327 | ||
328 | new = _Q_LOCKED_VAL; | |
329 | if (val == new) | |
330 | new |= _Q_PENDING_VAL; | |
331 | ||
64d816cb WL |
332 | /* |
333 | * Acquire semantic is required here as the function may | |
334 | * return immediately if the lock was free. | |
335 | */ | |
336 | old = atomic_cmpxchg_acquire(&lock->val, val, new); | |
c1fb159d PZI |
337 | if (old == val) |
338 | break; | |
339 | ||
340 | val = old; | |
341 | } | |
342 | ||
343 | /* | |
344 | * we won the trylock | |
345 | */ | |
346 | if (new == _Q_LOCKED_VAL) | |
347 | return; | |
348 | ||
349 | /* | |
350 | * we're pending, wait for the owner to go away. | |
351 | * | |
352 | * *,1,1 -> *,1,0 | |
69f9cae9 PZI |
353 | * |
354 | * this wait loop must be a load-acquire such that we match the | |
355 | * store-release that clears the locked bit and create lock | |
356 | * sequentiality; this is because not all clear_pending_set_locked() | |
357 | * implementations imply full barriers. | |
c1fb159d | 358 | */ |
69f9cae9 | 359 | while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_MASK) |
c1fb159d PZI |
360 | cpu_relax(); |
361 | ||
362 | /* | |
363 | * take ownership and clear the pending bit. | |
364 | * | |
365 | * *,1,0 -> *,0,1 | |
366 | */ | |
6403bd7d | 367 | clear_pending_set_locked(lock); |
c1fb159d PZI |
368 | return; |
369 | ||
370 | /* | |
371 | * End of pending bit optimistic spinning and beginning of MCS | |
372 | * queuing. | |
373 | */ | |
374 | queue: | |
a33fda35 WL |
375 | node = this_cpu_ptr(&mcs_nodes[0]); |
376 | idx = node->count++; | |
377 | tail = encode_tail(smp_processor_id(), idx); | |
378 | ||
379 | node += idx; | |
380 | node->locked = 0; | |
381 | node->next = NULL; | |
a23db284 | 382 | pv_init_node(node); |
a33fda35 WL |
383 | |
384 | /* | |
6403bd7d WL |
385 | * We touched a (possibly) cold cacheline in the per-cpu queue node; |
386 | * attempt the trylock once more in the hope someone let go while we | |
387 | * weren't watching. | |
a33fda35 | 388 | */ |
6403bd7d WL |
389 | if (queued_spin_trylock(lock)) |
390 | goto release; | |
a33fda35 WL |
391 | |
392 | /* | |
6403bd7d WL |
393 | * We have already touched the queueing cacheline; don't bother with |
394 | * pending stuff. | |
395 | * | |
396 | * p,*,* -> n,*,* | |
a33fda35 | 397 | */ |
6403bd7d | 398 | old = xchg_tail(lock, tail); |
aa68744f | 399 | next = NULL; |
a33fda35 WL |
400 | |
401 | /* | |
402 | * if there was a previous node; link it and wait until reaching the | |
403 | * head of the waitqueue. | |
404 | */ | |
6403bd7d | 405 | if (old & _Q_TAIL_MASK) { |
a33fda35 WL |
406 | prev = decode_tail(old); |
407 | WRITE_ONCE(prev->next, node); | |
408 | ||
a23db284 | 409 | pv_wait_node(node); |
a33fda35 | 410 | arch_mcs_spin_lock_contended(&node->locked); |
81b55986 WL |
411 | |
412 | /* | |
413 | * While waiting for the MCS lock, the next pointer may have | |
414 | * been set by another lock waiter. We optimistically load | |
415 | * the next pointer & prefetch the cacheline for writing | |
416 | * to reduce latency in the upcoming MCS unlock operation. | |
417 | */ | |
418 | next = READ_ONCE(node->next); | |
419 | if (next) | |
420 | prefetchw(next); | |
a33fda35 WL |
421 | } |
422 | ||
423 | /* | |
c1fb159d PZI |
424 | * we're at the head of the waitqueue, wait for the owner & pending to |
425 | * go away. | |
a33fda35 | 426 | * |
c1fb159d | 427 | * *,x,y -> *,0,0 |
2c83e8e9 WL |
428 | * |
429 | * this wait loop must use a load-acquire such that we match the | |
430 | * store-release that clears the locked bit and create lock | |
431 | * sequentiality; this is because the set_locked() function below | |
432 | * does not imply a full barrier. | |
433 | * | |
a33fda35 | 434 | */ |
a23db284 | 435 | pv_wait_head(lock, node); |
b3e0b1b6 | 436 | smp_cond_acquire(!((val = atomic_read(&lock->val)) & _Q_LOCKED_PENDING_MASK)); |
a33fda35 WL |
437 | |
438 | /* | |
439 | * claim the lock: | |
440 | * | |
c1fb159d PZI |
441 | * n,0,0 -> 0,0,1 : lock, uncontended |
442 | * *,0,0 -> *,0,1 : lock, contended | |
2c83e8e9 WL |
443 | * |
444 | * If the queue head is the only one in the queue (lock value == tail), | |
445 | * clear the tail code and grab the lock. Otherwise, we only need | |
446 | * to grab the lock. | |
a33fda35 WL |
447 | */ |
448 | for (;;) { | |
2c83e8e9 WL |
449 | if (val != tail) { |
450 | set_locked(lock); | |
a33fda35 | 451 | break; |
2c83e8e9 | 452 | } |
64d816cb WL |
453 | /* |
454 | * The smp_load_acquire() call above has provided the necessary | |
455 | * acquire semantics required for locking. At most two | |
456 | * iterations of this loop may be ran. | |
457 | */ | |
458 | old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL); | |
2c83e8e9 WL |
459 | if (old == val) |
460 | goto release; /* No contention */ | |
a33fda35 WL |
461 | |
462 | val = old; | |
463 | } | |
464 | ||
465 | /* | |
aa68744f | 466 | * contended path; wait for next if not observed yet, release. |
a33fda35 | 467 | */ |
aa68744f WL |
468 | if (!next) { |
469 | while (!(next = READ_ONCE(node->next))) | |
470 | cpu_relax(); | |
471 | } | |
a33fda35 | 472 | |
2c83e8e9 | 473 | arch_mcs_spin_unlock_contended(&next->locked); |
75d22702 | 474 | pv_kick_node(lock, next); |
a33fda35 WL |
475 | |
476 | release: | |
477 | /* | |
478 | * release the node | |
479 | */ | |
480 | this_cpu_dec(mcs_nodes[0].count); | |
481 | } | |
482 | EXPORT_SYMBOL(queued_spin_lock_slowpath); | |
a23db284 WL |
483 | |
484 | /* | |
485 | * Generate the paravirt code for queued_spin_unlock_slowpath(). | |
486 | */ | |
487 | #if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS) | |
488 | #define _GEN_PV_LOCK_SLOWPATH | |
489 | ||
490 | #undef pv_enabled | |
491 | #define pv_enabled() true | |
492 | ||
493 | #undef pv_init_node | |
494 | #undef pv_wait_node | |
495 | #undef pv_kick_node | |
496 | #undef pv_wait_head | |
497 | ||
498 | #undef queued_spin_lock_slowpath | |
499 | #define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath | |
500 | ||
501 | #include "qspinlock_paravirt.h" | |
502 | #include "qspinlock.c" | |
503 | ||
504 | #endif |