Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
e7224674 TC |
2 | /* |
3 | * MCS lock defines | |
4 | * | |
5 | * This file contains the main data structure and API definitions of MCS lock. | |
6 | * | |
7 | * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock | |
8 | * with the desirable properties of being fair, and with each cpu trying | |
9 | * to acquire the lock spinning on a local variable. | |
10 | * It avoids expensive cache bouncings that common test-and-set spin-lock | |
11 | * implementations incur. | |
12 | */ | |
13 | #ifndef __LINUX_MCS_SPINLOCK_H | |
14 | #define __LINUX_MCS_SPINLOCK_H | |
15 | ||
ddf1d169 TC |
16 | #include <asm/mcs_spinlock.h> |
17 | ||
e7224674 TC |
18 | struct mcs_spinlock { |
19 | struct mcs_spinlock *next; | |
20 | int locked; /* 1 if lock acquired */ | |
a33fda35 | 21 | int count; /* nesting count, see qspinlock.c */ |
e7224674 TC |
22 | }; |
23 | ||
e207552e WD |
24 | #ifndef arch_mcs_spin_lock_contended |
25 | /* | |
7f56b58a JL |
26 | * Using smp_cond_load_acquire() provides the acquire semantics |
27 | * required so that subsequent operations happen after the | |
28 | * lock is acquired. Additionally, some architectures such as | |
29 | * ARM64 would like to do spin-waiting instead of purely | |
30 | * spinning, and smp_cond_load_acquire() provides that behavior. | |
e207552e WD |
31 | */ |
32 | #define arch_mcs_spin_lock_contended(l) \ | |
33 | do { \ | |
7f56b58a | 34 | smp_cond_load_acquire(l, VAL); \ |
e207552e WD |
35 | } while (0) |
36 | #endif | |
37 | ||
38 | #ifndef arch_mcs_spin_unlock_contended | |
39 | /* | |
40 | * smp_store_release() provides a memory barrier to ensure all | |
41 | * operations in the critical section has been completed before | |
42 | * unlocking. | |
43 | */ | |
44 | #define arch_mcs_spin_unlock_contended(l) \ | |
45 | smp_store_release((l), 1) | |
46 | #endif | |
47 | ||
e7224674 TC |
48 | /* |
49 | * Note: the smp_load_acquire/smp_store_release pair is not | |
50 | * sufficient to form a full memory barrier across | |
51 | * cpus for many architectures (except x86) for mcs_unlock and mcs_lock. | |
52 | * For applications that need a full barrier across multiple cpus | |
53 | * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be | |
54 | * used after mcs_lock. | |
55 | */ | |
5faeb8ad JL |
56 | |
57 | /* | |
58 | * In order to acquire the lock, the caller should declare a local node and | |
59 | * pass a reference of the node to this function in addition to the lock. | |
60 | * If the lock has already been acquired, then this will proceed to spin | |
61 | * on this node->locked until the previous lock holder sets the node->locked | |
62 | * in mcs_spin_unlock(). | |
5faeb8ad | 63 | */ |
e7224674 TC |
64 | static inline |
65 | void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) | |
66 | { | |
67 | struct mcs_spinlock *prev; | |
68 | ||
69 | /* Init node */ | |
70 | node->locked = 0; | |
71 | node->next = NULL; | |
72 | ||
920c720a PZ |
73 | /* |
74 | * We rely on the full barrier with global transitivity implied by the | |
75 | * below xchg() to order the initialization stores above against any | |
76 | * observation of @node. And to provide the ACQUIRE ordering associated | |
77 | * with a LOCK primitive. | |
78 | */ | |
79 | prev = xchg(lock, node); | |
e7224674 | 80 | if (likely(prev == NULL)) { |
5faeb8ad JL |
81 | /* |
82 | * Lock acquired, don't need to set node->locked to 1. Threads | |
83 | * only spin on its own node->locked value for lock acquisition. | |
84 | * However, since this thread can immediately acquire the lock | |
85 | * and does not proceed to spin on its own node->locked, this | |
86 | * value won't be used. If a debug mode is needed to | |
87 | * audit lock status, then set node->locked value here. | |
88 | */ | |
e7224674 TC |
89 | return; |
90 | } | |
4d3199e4 | 91 | WRITE_ONCE(prev->next, node); |
e207552e WD |
92 | |
93 | /* Wait until the lock holder passes the lock down. */ | |
94 | arch_mcs_spin_lock_contended(&node->locked); | |
e7224674 TC |
95 | } |
96 | ||
5faeb8ad JL |
97 | /* |
98 | * Releases the lock. The caller should pass in the corresponding node that | |
99 | * was used to acquire the lock. | |
100 | */ | |
e7224674 TC |
101 | static inline |
102 | void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) | |
103 | { | |
4d3199e4 | 104 | struct mcs_spinlock *next = READ_ONCE(node->next); |
e7224674 TC |
105 | |
106 | if (likely(!next)) { | |
107 | /* | |
108 | * Release the lock by setting it to NULL | |
109 | */ | |
3552a07a | 110 | if (likely(cmpxchg_release(lock, node, NULL) == node)) |
e7224674 TC |
111 | return; |
112 | /* Wait until the next pointer is set */ | |
4d3199e4 | 113 | while (!(next = READ_ONCE(node->next))) |
f2f09a4c | 114 | cpu_relax(); |
e7224674 | 115 | } |
e207552e WD |
116 | |
117 | /* Pass lock to next waiter. */ | |
118 | arch_mcs_spin_unlock_contended(&next->locked); | |
e7224674 TC |
119 | } |
120 | ||
121 | #endif /* __LINUX_MCS_SPINLOCK_H */ |