Commit | Line | Data |
---|---|---|
c942fddf | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
a33fda35 WL |
2 | /* |
3 | * Queued spinlock | |
4 | * | |
a8ad07e5 PZ |
5 | * A 'generic' spinlock implementation that is based on MCS locks. For an |
6 | * architecture that's looking for a 'generic' spinlock, please first consider | |
7 | * ticket-lock.h and only come looking here when you've considered all the | |
8 | * constraints below and can show your hardware does actually perform better | |
9 | * with qspinlock. | |
10 | * | |
11 | * qspinlock relies on atomic_*_release()/atomic_*_acquire() to be RCsc (or no | |
12 | * weaker than RCtso if you're power), where regular code only expects atomic_t | |
13 | * to be RCpc. | |
14 | * | |
15 | * qspinlock relies on a far greater (compared to asm-generic/spinlock.h) set | |
16 | * of atomic operations to behave well together, please audit them carefully to | |
17 | * ensure they all have forward progress. Many atomic operations may default to | |
18 | * cmpxchg() loops which will not have good forward progress properties on | |
19 | * LL/SC architectures. | |
20 | * | |
21 | * One notable example is atomic_fetch_or_acquire(), which x86 cannot (cheaply) | |
22 | * do. Carefully read the patches that introduced | |
23 | * queued_fetch_set_pending_acquire(). | |
24 | * | |
25 | * qspinlock also heavily relies on mixed size atomic operations, in specific | |
26 | * it requires architectures to have xchg16; something which many LL/SC | |
27 | * architectures need to implement as a 32bit and+or in order to satisfy the | |
28 | * forward progress guarantees mentioned above. | |
29 | * | |
30 | * Further reading on mixed size atomics that might be relevant: | |
31 | * | |
32 | * http://www.cl.cam.ac.uk/~pes20/popl17/mixed-size.pdf | |
33 | * | |
a33fda35 | 34 | * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. |
64d816cb | 35 | * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP |
a33fda35 | 36 | * |
64d816cb | 37 | * Authors: Waiman Long <waiman.long@hpe.com> |
a33fda35 WL |
38 | */ |
39 | #ifndef __ASM_GENERIC_QSPINLOCK_H | |
40 | #define __ASM_GENERIC_QSPINLOCK_H | |
41 | ||
42 | #include <asm-generic/qspinlock_types.h> | |
459e3953 | 43 | #include <linux/atomic.h> |
a33fda35 | 44 | |
aa65ff6b | 45 | #ifndef queued_spin_is_locked |
a33fda35 WL |
46 | /** |
47 | * queued_spin_is_locked - is the spinlock locked? | |
48 | * @lock: Pointer to queued spinlock structure | |
49 | * Return: 1 if it is locked, 0 otherwise | |
50 | */ | |
51 | static __always_inline int queued_spin_is_locked(struct qspinlock *lock) | |
52 | { | |
54cf809b | 53 | /* |
2c610022 PZ |
54 | * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL |
55 | * isn't immediately observable. | |
54cf809b | 56 | */ |
2c610022 | 57 | return atomic_read(&lock->val); |
a33fda35 | 58 | } |
aa65ff6b | 59 | #endif |
a33fda35 WL |
60 | |
61 | /** | |
62 | * queued_spin_value_unlocked - is the spinlock structure unlocked? | |
63 | * @lock: queued spinlock structure | |
64 | * Return: 1 if it is unlocked, 0 otherwise | |
65 | * | |
66 | * N.B. Whenever there are tasks waiting for the lock, it is considered | |
67 | * locked wrt the lockref code to avoid lock stealing by the lockref | |
68 | * code and change things underneath the lock. This also allows some | |
69 | * optimizations to be applied without conflict with lockref. | |
70 | */ | |
71 | static __always_inline int queued_spin_value_unlocked(struct qspinlock lock) | |
72 | { | |
73 | return !atomic_read(&lock.val); | |
74 | } | |
75 | ||
76 | /** | |
77 | * queued_spin_is_contended - check if the lock is contended | |
78 | * @lock : Pointer to queued spinlock structure | |
79 | * Return: 1 if lock contended, 0 otherwise | |
80 | */ | |
81 | static __always_inline int queued_spin_is_contended(struct qspinlock *lock) | |
82 | { | |
83 | return atomic_read(&lock->val) & ~_Q_LOCKED_MASK; | |
84 | } | |
85 | /** | |
86 | * queued_spin_trylock - try to acquire the queued spinlock | |
87 | * @lock : Pointer to queued spinlock structure | |
88 | * Return: 1 if lock acquired, 0 if failed | |
89 | */ | |
90 | static __always_inline int queued_spin_trylock(struct qspinlock *lock) | |
91 | { | |
f44ca087 | 92 | int val = atomic_read(&lock->val); |
27df8968 MW |
93 | |
94 | if (unlikely(val)) | |
95 | return 0; | |
96 | ||
97 | return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)); | |
a33fda35 WL |
98 | } |
99 | ||
100 | extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); | |
101 | ||
20c0e826 | 102 | #ifndef queued_spin_lock |
a33fda35 WL |
103 | /** |
104 | * queued_spin_lock - acquire a queued spinlock | |
105 | * @lock: Pointer to queued spinlock structure | |
106 | */ | |
107 | static __always_inline void queued_spin_lock(struct qspinlock *lock) | |
108 | { | |
f44ca087 | 109 | int val = 0; |
a33fda35 | 110 | |
27df8968 | 111 | if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) |
a33fda35 | 112 | return; |
27df8968 | 113 | |
a33fda35 WL |
114 | queued_spin_lock_slowpath(lock, val); |
115 | } | |
20c0e826 | 116 | #endif |
a33fda35 WL |
117 | |
118 | #ifndef queued_spin_unlock | |
119 | /** | |
120 | * queued_spin_unlock - release a queued spinlock | |
121 | * @lock : Pointer to queued spinlock structure | |
122 | */ | |
123 | static __always_inline void queued_spin_unlock(struct qspinlock *lock) | |
124 | { | |
125 | /* | |
ca50e426 | 126 | * unlock() needs release semantics: |
a33fda35 | 127 | */ |
626e5fbc | 128 | smp_store_release(&lock->locked, 0); |
a33fda35 WL |
129 | } |
130 | #endif | |
131 | ||
43b3f028 PZ |
132 | #ifndef virt_spin_lock |
133 | static __always_inline bool virt_spin_lock(struct qspinlock *lock) | |
2aa79af6 PZI |
134 | { |
135 | return false; | |
136 | } | |
137 | #endif | |
138 | ||
a33fda35 WL |
139 | /* |
140 | * Remapping spinlock architecture specific functions to the corresponding | |
141 | * queued spinlock functions. | |
142 | */ | |
143 | #define arch_spin_is_locked(l) queued_spin_is_locked(l) | |
144 | #define arch_spin_is_contended(l) queued_spin_is_contended(l) | |
145 | #define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l) | |
146 | #define arch_spin_lock(l) queued_spin_lock(l) | |
147 | #define arch_spin_trylock(l) queued_spin_trylock(l) | |
148 | #define arch_spin_unlock(l) queued_spin_unlock(l) | |
a33fda35 WL |
149 | |
150 | #endif /* __ASM_GENERIC_QSPINLOCK_H */ |